hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fc63dea630bc5c4125015a5d1d443364d31e7cfa
| 1,853 |
py
|
Python
|
trace_analysis/trace_analysis/architecture/interface.py
|
hsgwa/trace_analysis
|
16169f84e838af5202e2be8f4883dfca5bc7f592
|
[
"Apache-2.0"
] | null | null | null |
trace_analysis/trace_analysis/architecture/interface.py
|
hsgwa/trace_analysis
|
16169f84e838af5202e2be8f4883dfca5bc7f592
|
[
"Apache-2.0"
] | null | null | null |
trace_analysis/trace_analysis/architecture/interface.py
|
hsgwa/trace_analysis
|
16169f84e838af5202e2be8f4883dfca5bc7f592
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Research Institute of Systems Planning, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
from abc import ABCMeta, abstractmethod
from trace_analysis.callback import CallbackBase
from trace_analysis.communication import VariablePassing, Communication
from trace_analysis.node import Node
UNDEFINED_STR = "UNDEFINED"
| 26.471429 | 81 | 0.72531 |
fc652ca3b217323de609509cf9bf7a86e05a1571
| 28 |
py
|
Python
|
disrank/__init__.py
|
treehousekingcomic/disrank
|
6a6ef3a2f2d4dc81bc3da8064b897dac4c773ef7
|
[
"MIT"
] | 1 |
2021-05-06T14:46:46.000Z
|
2021-05-06T14:46:46.000Z
|
disrank/__init__.py
|
treehousekingcomic/disrank
|
6a6ef3a2f2d4dc81bc3da8064b897dac4c773ef7
|
[
"MIT"
] | null | null | null |
disrank/__init__.py
|
treehousekingcomic/disrank
|
6a6ef3a2f2d4dc81bc3da8064b897dac4c773ef7
|
[
"MIT"
] | null | null | null |
from thkc_disrank import *
| 14 | 27 | 0.785714 |
fc66871c7a70ed30f6605efa0e99f0abf3ceaa25
| 4,854 |
py
|
Python
|
layers/gin_layer.py
|
JakeStevens/benchmarking-gnns
|
a17fdf1b1d758fc65d5eeaf3726f5efa747a4081
|
[
"MIT"
] | 275 |
2020-10-22T22:03:33.000Z
|
2022-03-25T06:08:05.000Z
|
semisupervised_MNIST_CIFAR10/pre-training/layers/gin_layer.py
|
xgbt/GraphCL
|
d857849d51bb168568267e07007c0b0c8bb6d869
|
[
"MIT"
] | 43 |
2020-10-30T08:28:01.000Z
|
2022-03-31T16:55:12.000Z
|
semisupervised_MNIST_CIFAR10/pre-training/layers/gin_layer.py
|
xgbt/GraphCL
|
d857849d51bb168568267e07007c0b0c8bb6d869
|
[
"MIT"
] | 70 |
2020-10-28T19:14:18.000Z
|
2022-03-27T06:11:51.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
"""
GIN: Graph Isomorphism Networks
HOW POWERFUL ARE GRAPH NEURAL NETWORKS? (Keyulu Xu, Weihua Hu, Jure Leskovec and Stefanie Jegelka, ICLR 2019)
https://arxiv.org/pdf/1810.00826.pdf
"""
| 32.577181 | 124 | 0.581994 |
fc66cd08fbbe334f7cc1e76eb76063eb07e5b49e
| 673 |
py
|
Python
|
music/distance/aural/diatonic/__init__.py
|
jedhsu/music
|
dea68c4a82296cd4910e786f533b2cbf861377c3
|
[
"MIT"
] | null | null | null |
music/distance/aural/diatonic/__init__.py
|
jedhsu/music
|
dea68c4a82296cd4910e786f533b2cbf861377c3
|
[
"MIT"
] | null | null | null |
music/distance/aural/diatonic/__init__.py
|
jedhsu/music
|
dea68c4a82296cd4910e786f533b2cbf861377c3
|
[
"MIT"
] | null | null | null |
"""
*mus . it . dia*
The simple diatonic intervals.
"""
from .second import MinorSecond
from .second import MajorSecond
from .third import MinorThird
from .third import MajorThird
from .fourth import PerfectFourth
from .fifth import Tritone
from .fifth import PerfectFifth
from .sixth import MinorSixth
from .sixth import MajorSixth
from .seventh import MinorSeventh
from .seventh import MajorSeventh
from .eighth import Octave
__all__ = [
"MinorSecond",
"MajorSecond",
"MinorThird",
"MajorThird",
"PerfectFourth",
"Tritone",
"PerfectFifth",
"MinorSixth",
"MajorSixth",
"MinorSeventh",
"MajorSeventh",
"Octave",
]
| 18.694444 | 33 | 0.708767 |
fc6780fb69ebe4416f273d6821ceb9f2cb3226e8
| 760 |
py
|
Python
|
selenium_tests/test_functions.py
|
AriTheGuitarMan/AriTheGuitarMan.github.io
|
8348ad0c47e48477560e7e40ec7eac8bca6fcdfa
|
[
"MIT"
] | null | null | null |
selenium_tests/test_functions.py
|
AriTheGuitarMan/AriTheGuitarMan.github.io
|
8348ad0c47e48477560e7e40ec7eac8bca6fcdfa
|
[
"MIT"
] | null | null | null |
selenium_tests/test_functions.py
|
AriTheGuitarMan/AriTheGuitarMan.github.io
|
8348ad0c47e48477560e7e40ec7eac8bca6fcdfa
|
[
"MIT"
] | null | null | null |
# this file holds some common testing functions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
depurl = "localhost:3000"
| 33.043478 | 93 | 0.727632 |
fc68e0edf30588586dedbfe1358fdf97ab01598d
| 4,783 |
py
|
Python
|
papirus_renderer.py
|
ryuchihoon/WeatherStation
|
e3fd210939a961bc1724197f3885964cb4ae5a28
|
[
"Apache-2.0"
] | null | null | null |
papirus_renderer.py
|
ryuchihoon/WeatherStation
|
e3fd210939a961bc1724197f3885964cb4ae5a28
|
[
"Apache-2.0"
] | null | null | null |
papirus_renderer.py
|
ryuchihoon/WeatherStation
|
e3fd210939a961bc1724197f3885964cb4ae5a28
|
[
"Apache-2.0"
] | null | null | null |
#-- coding: utf-8 --
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
import time
import collections
from PIL import Image, ImageOps, ImageDraw, ImageFont
code_2_icono = collections.defaultdict(lambda : '38')
kor_2_eng = collections.defaultdict(lambda : 'UNKNOWN')
code_2_icono['SKY_O00'] = ['38']
code_2_icono['SKY_O01'] = ['01', '08']
code_2_icono['SKY_O02'] = ['02', '09']
code_2_icono['SKY_O03'] = ['03', '10']
code_2_icono['SKY_O04'] = ['12', '40']
code_2_icono['SKY_O05'] = ['13', '41']
code_2_icono['SKY_O06'] = ['14', '42']
code_2_icono['SKY_O07'] = ['18']
code_2_icono['SKY_O08'] = ['21']
code_2_icono['SKY_O09'] = ['32']
code_2_icono['SKY_O10'] = ['04']
code_2_icono['SKY_O11'] = ['29']
code_2_icono['SKY_O12'] = ['26']
code_2_icono['SKY_O13'] = ['27']
code_2_icono['SKY_O14'] = ['28']
code_2_icono['SKY_W00'] = ['38']
code_2_icono['SKY_W01'] = ['01', '08']
code_2_icono['SKY_W02'] = ['02', '09']
code_2_icono['SKY_W03'] = ['03', '10']
code_2_icono['SKY_W04'] = ['18']
code_2_icono['SKY_W07'] = ['21']
code_2_icono['SKY_W09'] = ['12', '40']
code_2_icono['SKY_W10'] = ['21']
code_2_icono['SKY_W11'] = ['04']
code_2_icono['SKY_W12'] = ['13', '41']
code_2_icono['SKY_W13'] = ['32']
kor_2_eng[u''] = ['GOOD']
kor_2_eng[u''] = ['NORMAL']
kor_2_eng[u''] = ['BAD']
kor_2_eng[u' '] = ['V BAD']
BLACK = 0
WHITE = 1
| 32.986207 | 127 | 0.615931 |
fc69e4e9cacf2317d6b062809fbe0cb9a22ea2b1
| 72 |
py
|
Python
|
hydrobox/discharge/__init__.py
|
VForWaTer/hydrobox
|
ae7d10bf5aa48bf7daf3d1094e6bb66f0a7ce96b
|
[
"MIT"
] | 4 |
2020-10-08T15:31:36.000Z
|
2021-06-25T00:46:40.000Z
|
hydrobox/discharge/__init__.py
|
joergmeyer-kit/hydrobox
|
af75a5ba87147e00656435c170535c69fc3298a8
|
[
"MIT"
] | 5 |
2020-05-12T08:45:18.000Z
|
2021-05-20T07:18:47.000Z
|
hydrobox/discharge/__init__.py
|
joergmeyer-kit/hydrobox
|
af75a5ba87147e00656435c170535c69fc3298a8
|
[
"MIT"
] | 3 |
2020-07-27T07:16:14.000Z
|
2021-04-28T21:57:48.000Z
|
from .catchment import regime, flow_duration_curve
from . import indices
| 36 | 50 | 0.847222 |
fc69e76506c689aa4c8cc54b37cd338453f7483a
| 1,256 |
py
|
Python
|
scripts/convert_keras2onnx.py
|
ecmwf-lab/infero
|
4fec006175af48cd0313b2f89722c01636e961db
|
[
"Apache-2.0"
] | 8 |
2021-12-20T06:24:16.000Z
|
2022-02-17T15:21:55.000Z
|
scripts/convert_keras2onnx.py
|
ecmwf-projects/infero
|
4c229a16ce75a249c83cbf43e0c953a7a42f2f83
|
[
"Apache-2.0"
] | null | null | null |
scripts/convert_keras2onnx.py
|
ecmwf-projects/infero
|
4c229a16ce75a249c83cbf43e0c953a7a42f2f83
|
[
"Apache-2.0"
] | 1 |
2021-10-04T10:14:23.000Z
|
2021-10-04T10:14:23.000Z
|
#
# (C) Copyright 1996- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import os
import numpy as np
import argparse
import keras
import keras2onnx
if __name__ == "__main__":
"""
Lightweight script to convert a keras model into a TFlite model
"""
parser = argparse.ArgumentParser("Data Augmentation")
parser.add_argument('keras_model_path', help="Path of the input keras model")
parser.add_argument('onnx_model_path', help="Path of the output onnx model")
parser.add_argument("--verify_with", help="Check the model by passing an input numpy path")
args = parser.parse_args()
# load the keras model
model = keras.models.load_model(args.keras_model_path)
model.summary()
# do the conversion
onnx_model = keras2onnx.convert_keras(model, model.name)
# write to file
file = open(args.onnx_model_path, "wb")
file.write(onnx_model.SerializeToString())
file.close()
| 30.634146 | 95 | 0.72293 |
fc6ae61e76507e8c85be28c293840912ca2612a4
| 7,683 |
py
|
Python
|
src/lava/lib/dl/slayer/utils/assistant.py
|
timcheck/lava-dl
|
e680722071129fde952ea0d744984aa2a038797a
|
[
"BSD-3-Clause"
] | 37 |
2021-09-30T16:47:15.000Z
|
2022-03-07T22:29:21.000Z
|
src/lava/lib/dl/slayer/utils/assistant.py
|
timcheck/lava-dl
|
e680722071129fde952ea0d744984aa2a038797a
|
[
"BSD-3-Clause"
] | 36 |
2021-11-04T16:54:55.000Z
|
2022-03-31T02:26:29.000Z
|
src/lava/lib/dl/slayer/utils/assistant.py
|
timcheck/lava-dl
|
e680722071129fde952ea0d744984aa2a038797a
|
[
"BSD-3-Clause"
] | 20 |
2021-10-29T22:55:58.000Z
|
2022-03-22T17:27:16.000Z
|
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
"""Assistant utility for automatically load network from network
description."""
import torch
| 29.436782 | 78 | 0.521281 |
fc6bbad5b323947c5f4946373831830060872620
| 5,961 |
py
|
Python
|
lstm-synthetic-wave-anomaly-detect.py
|
cse-icon-dataAnalytics/lstm-anomaly-detect
|
bcfb01db383698acbd5692f1a76a5f20ec3629a8
|
[
"MIT"
] | 178 |
2016-03-31T05:32:07.000Z
|
2022-03-26T02:36:35.000Z
|
lstm-synthetic-wave-anomaly-detect.py
|
rob-med/lstm-anomaly-detect
|
bcfb01db383698acbd5692f1a76a5f20ec3629a8
|
[
"MIT"
] | 4 |
2016-11-01T01:51:06.000Z
|
2018-04-24T13:42:33.000Z
|
lstm-synthetic-wave-anomaly-detect.py
|
rob-med/lstm-anomaly-detect
|
bcfb01db383698acbd5692f1a76a5f20ec3629a8
|
[
"MIT"
] | 106 |
2016-03-31T05:32:11.000Z
|
2021-08-28T09:49:16.000Z
|
""" Inspired by example from
https://github.com/Vict0rSch/deep_learning/tree/master/keras/recurrent
Uses the TensorFlow backend
The basic idea is to detect anomalies in a time-series.
"""
import matplotlib.pyplot as plt
import numpy as np
import time
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from numpy import arange, sin, pi, random
np.random.seed(1234)
# Global hyper-parameters
sequence_length = 100
random_data_dup = 10 # each sample randomly duplicated between 0 and 9 times, see dropin function
epochs = 1
batch_size = 50
def dropin(X, y):
""" The name suggests the inverse of dropout, i.e. adding more samples. See Data Augmentation section at
http://simaaron.github.io/Estimating-rainfall-from-weather-radar-readings-using-recurrent-neural-networks/
:param X: Each row is a training sequence
:param y: Tne target we train and will later predict
:return: new augmented X, y
"""
print("X shape:", X.shape)
print("y shape:", y.shape)
X_hat = []
y_hat = []
for i in range(0, len(X)):
for j in range(0, np.random.random_integers(0, random_data_dup)):
X_hat.append(X[i, :])
y_hat.append(y[i])
return np.asarray(X_hat), np.asarray(y_hat)
def gen_wave():
""" Generate a synthetic wave by adding up a few sine waves and some noise
:return: the final wave
"""
t = np.arange(0.0, 10.0, 0.01)
wave1 = sin(2 * 2 * pi * t)
noise = random.normal(0, 0.1, len(t))
wave1 = wave1 + noise
print("wave1", len(wave1))
wave2 = sin(2 * pi * t)
print("wave2", len(wave2))
t_rider = arange(0.0, 0.5, 0.01)
wave3 = sin(10 * pi * t_rider)
print("wave3", len(wave3))
insert = round(0.8 * len(t))
wave1[insert:insert + 50] = wave1[insert:insert + 50] + wave3
return wave1 + wave2
run_network()
| 30.258883 | 110 | 0.631102 |
fc6bfc96896fcc886f4088ddc53f2aa20f638760
| 1,309 |
py
|
Python
|
hpc-historias-clinicas/historias/migrations/0007_auto_20150425_1459.py
|
btenaglia/hpc-historias-clinicas
|
649d8660381381b1c591667760c122d73071d5ec
|
[
"BSD-3-Clause"
] | null | null | null |
hpc-historias-clinicas/historias/migrations/0007_auto_20150425_1459.py
|
btenaglia/hpc-historias-clinicas
|
649d8660381381b1c591667760c122d73071d5ec
|
[
"BSD-3-Clause"
] | null | null | null |
hpc-historias-clinicas/historias/migrations/0007_auto_20150425_1459.py
|
btenaglia/hpc-historias-clinicas
|
649d8660381381b1c591667760c122d73071d5ec
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
| 38.5 | 290 | 0.593583 |
fc6d62eed45d350cb72c202ceedfb98394117cd4
| 315 |
py
|
Python
|
venv/Lib/site-packages/har2case/__about__.py
|
Verckolf/MyInterfaceTest
|
e05674bd673a6a43cfb33f7cb4318886ba92a05c
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/har2case/__about__.py
|
Verckolf/MyInterfaceTest
|
e05674bd673a6a43cfb33f7cb4318886ba92a05c
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/har2case/__about__.py
|
Verckolf/MyInterfaceTest
|
e05674bd673a6a43cfb33f7cb4318886ba92a05c
|
[
"MIT"
] | null | null | null |
__title__ = 'har2case'
__description__ = 'Convert HAR(HTTP Archive) to YAML/JSON testcases for HttpRunner.'
__url__ = 'https://github.com/HttpRunner/har2case'
__version__ = '0.2.0'
__author__ = 'debugtalk'
__author_email__ = '[email protected]'
__license__ = 'Apache-2.0'
__copyright__ = 'Copyright 2017 debugtalk'
| 39.375 | 84 | 0.771429 |
fc6eb694558519bbe4bb770c6ebebf2c2317f744
| 2,160 |
py
|
Python
|
app_id_utils.py
|
woctezuma/match-steam-banners
|
dff1bc2ddf35a37bcdea46a220f5d0257d47e017
|
[
"MIT"
] | null | null | null |
app_id_utils.py
|
woctezuma/match-steam-banners
|
dff1bc2ddf35a37bcdea46a220f5d0257d47e017
|
[
"MIT"
] | 10 |
2021-05-01T19:57:06.000Z
|
2022-03-12T00:54:04.000Z
|
app_id_utils.py
|
woctezuma/match-steam-banners
|
dff1bc2ddf35a37bcdea46a220f5d0257d47e017
|
[
"MIT"
] | null | null | null |
import os
from pathlib import Path
from data_utils import get_data_path, get_image_data_path, get_image_extension
| 29.589041 | 115 | 0.747222 |
fc6f65558ddc1ad343876953a1a42d3ab2c832a9
| 348 |
py
|
Python
|
upload.py
|
sjm446/aMAZEd
|
38789f9898097991b19e686fd76ef4abd5bfe94c
|
[
"MIT"
] | null | null | null |
upload.py
|
sjm446/aMAZEd
|
38789f9898097991b19e686fd76ef4abd5bfe94c
|
[
"MIT"
] | null | null | null |
upload.py
|
sjm446/aMAZEd
|
38789f9898097991b19e686fd76ef4abd5bfe94c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import boto3
import random
import os
BUCKET=os.environ.get('EXPORT_S3_BUCKET_URL')
if (BUCKET != None):
s3 = boto3.client('s3')
with open("maze.txt", "rb") as f:
s3.upload_fileobj(f, BUCKET, "maze"+str(random.randrange(100000))+".txt")
else:
print("EXPORT_S3_BUCKET_URL was not set so not uploading file")
| 29 | 81 | 0.692529 |
fc709a454475435a2a06bba2371531a53e2d11c0
| 2,790 |
py
|
Python
|
zerver/management/commands/list_realms.py
|
rtzll/zulip
|
b831df8f7fc2f5b89ec998266901ac491d52a7fc
|
[
"Apache-2.0"
] | null | null | null |
zerver/management/commands/list_realms.py
|
rtzll/zulip
|
b831df8f7fc2f5b89ec998266901ac491d52a7fc
|
[
"Apache-2.0"
] | null | null | null |
zerver/management/commands/list_realms.py
|
rtzll/zulip
|
b831df8f7fc2f5b89ec998266901ac491d52a7fc
|
[
"Apache-2.0"
] | 1 |
2019-10-14T23:36:14.000Z
|
2019-10-14T23:36:14.000Z
|
import sys
from typing import Any
from argparse import ArgumentParser
from zerver.models import Realm
from zerver.lib.management import ZulipBaseCommand
| 37.702703 | 99 | 0.557348 |
fc7137abb720c86400bb993740cb9e14c54237f5
| 8,892 |
py
|
Python
|
tests/integration/ec2/test_connection.py
|
bopopescu/debpkg_python-boto
|
06f9b6f3693ba1933be8214da69cebcd5212cd97
|
[
"MIT"
] | 15 |
2015-03-25T05:24:11.000Z
|
2021-12-18T04:24:06.000Z
|
tests/integration/ec2/test_connection.py
|
bopopescu/debpkg_python-boto
|
06f9b6f3693ba1933be8214da69cebcd5212cd97
|
[
"MIT"
] | null | null | null |
tests/integration/ec2/test_connection.py
|
bopopescu/debpkg_python-boto
|
06f9b6f3693ba1933be8214da69cebcd5212cd97
|
[
"MIT"
] | 10 |
2015-04-26T17:56:37.000Z
|
2020-09-24T14:01:53.000Z
|
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2009, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Some unit tests for the EC2Connection
"""
import unittest
import time
import telnetlib
import socket
from nose.plugins.attrib import attr
from boto.ec2.connection import EC2Connection
from boto.exception import EC2ResponseError
| 36.743802 | 77 | 0.587607 |
fc719b76ab94dd3be6cc776d78a4aa8bbc400d47
| 8,761 |
py
|
Python
|
docusign_esign/models/conditional_recipient_rule_filter.py
|
joekohlsdorf/docusign-esign-python-client
|
40407544f79c88716d36fabf36f65c3ef1a5c3ba
|
[
"MIT"
] | 58 |
2017-10-18T23:06:57.000Z
|
2021-04-15T23:14:58.000Z
|
docusign_esign/models/conditional_recipient_rule_filter.py
|
joekohlsdorf/docusign-esign-python-client
|
40407544f79c88716d36fabf36f65c3ef1a5c3ba
|
[
"MIT"
] | 49 |
2017-10-27T05:54:09.000Z
|
2021-04-29T22:06:17.000Z
|
docusign_esign/models/conditional_recipient_rule_filter.py
|
joekohlsdorf/docusign-esign-python-client
|
40407544f79c88716d36fabf36f65c3ef1a5c3ba
|
[
"MIT"
] | 49 |
2017-09-16T07:23:41.000Z
|
2021-05-07T20:21:20.000Z
|
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from docusign_esign.client.configuration import Configuration
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ConditionalRecipientRuleFilter):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ConditionalRecipientRuleFilter):
return True
return self.to_dict() != other.to_dict()
| 30.526132 | 140 | 0.604155 |
fc72be3f56a4f14c061f7d67ae504148f309762b
| 407 |
py
|
Python
|
conman/routes/apps.py
|
meshy/django-conman
|
c739d09250d02d99068358e925ed8298a2a37a75
|
[
"BSD-2-Clause"
] | null | null | null |
conman/routes/apps.py
|
meshy/django-conman
|
c739d09250d02d99068358e925ed8298a2a37a75
|
[
"BSD-2-Clause"
] | 81 |
2015-07-27T23:21:49.000Z
|
2018-05-21T22:06:09.000Z
|
conman/routes/apps.py
|
meshy/django-conman
|
c739d09250d02d99068358e925ed8298a2a37a75
|
[
"BSD-2-Clause"
] | 2 |
2015-10-06T09:18:06.000Z
|
2017-03-17T08:51:56.000Z
|
from django.apps import AppConfig
from django.core.checks import register
from . import checks
| 25.4375 | 48 | 0.712531 |
fc72c71783ded10cb6bea92ec124528a890dca34
| 627 |
py
|
Python
|
Other/transactionlog entries since timestamp.py
|
DJHig/TM1py-samples
|
da4050380447472a02e2a107a2c5be79ac284d0a
|
[
"MIT"
] | 1 |
2019-05-30T10:10:20.000Z
|
2019-05-30T10:10:20.000Z
|
Other/transactionlog entries since timestamp.py
|
DJHig/TM1py-samples
|
da4050380447472a02e2a107a2c5be79ac284d0a
|
[
"MIT"
] | null | null | null |
Other/transactionlog entries since timestamp.py
|
DJHig/TM1py-samples
|
da4050380447472a02e2a107a2c5be79ac284d0a
|
[
"MIT"
] | 1 |
2017-09-01T03:35:18.000Z
|
2017-09-01T03:35:18.000Z
|
"""
Get all TM1 transactions for all cubes starting to a specific date.
"""
import configparser
config = configparser.ConfigParser()
config.read('..\config.ini')
from datetime import datetime
from TM1py.Services import TM1Service
with TM1Service(**config['tm1srv01']) as tm1:
# Timestamp for Message-Log parsing
timestamp = datetime(year=2018, month=2, day=15, hour=16, minute=2, second=0)
# Get all entries since timestamp
entries = tm1.server.get_transaction_log_entries(since=timestamp)
# loop through entries
for entry in entries:
# Do stuff
print(entry['TimeStamp'], entry)
| 24.115385 | 81 | 0.716108 |
fc73faf5f8cb49244c505ccd988f122eb6b59c66
| 6,019 |
py
|
Python
|
src/patteRNA/Dataset.py
|
AviranLab/patteRNA
|
88b900844016717a71b6ec8e4f2d10d8888600ce
|
[
"BSD-2-Clause"
] | 12 |
2018-03-02T21:48:46.000Z
|
2022-01-31T02:58:59.000Z
|
src/patteRNA/Dataset.py
|
AviranLab/patteRNA
|
88b900844016717a71b6ec8e4f2d10d8888600ce
|
[
"BSD-2-Clause"
] | 2 |
2018-07-19T01:11:11.000Z
|
2019-04-08T23:54:08.000Z
|
src/patteRNA/Dataset.py
|
AviranLab/patteRNA
|
88b900844016717a71b6ec8e4f2d10d8888600ce
|
[
"BSD-2-Clause"
] | 5 |
2018-03-06T18:13:36.000Z
|
2021-01-08T20:54:28.000Z
|
import logging
import numpy as np
from scipy.stats import entropy
from patteRNA.Transcript import Transcript
from patteRNA import filelib
logger = logging.getLogger(__name__)
| 39.339869 | 113 | 0.6104 |
fc74cac1c4d460520f828712a631b873e6f7c8d7
| 3,118 |
py
|
Python
|
src/Simulation/developer_0/main.py
|
GYRY-NEU/CS7610-Experiments
|
3731b45c4a9cba2a1d7e44d37f28d1046a38de47
|
[
"MIT"
] | null | null | null |
src/Simulation/developer_0/main.py
|
GYRY-NEU/CS7610-Experiments
|
3731b45c4a9cba2a1d7e44d37f28d1046a38de47
|
[
"MIT"
] | 1 |
2021-12-02T20:45:02.000Z
|
2021-12-02T20:45:02.000Z
|
src/Simulation/developer_0/main.py
|
GYRY-NEU/Simulation
|
3731b45c4a9cba2a1d7e44d37f28d1046a38de47
|
[
"MIT"
] | null | null | null |
import library
import json
def updateModel(model, list_weights):
"""
list_weights : 3D list of shape : (clientNumber,modelOuter, modelInner)
It contains all the models for each client
"""
# this part will change developer to developer
# one can just take avg
# or one can discard smallest and largest than take average
# this example just takes avg without use of external library
alpha = library.get("alpha")
# getting shape of 3D array
number_clients = len(list_weights)
size_outer = len(list_weights[0])
size_inner = len(list_weights[0][0])
# constructing a new 2D array of zeros of same size
newModel = [ [0 for j in range(size_inner)] for i in range(size_outer)]
# validate new created shape
assert(len(newModel) == size_outer)
assert(len(newModel[0]) == size_inner)
# sum for all the clients
for weights in list_weights:
for outerIndex, outerList in enumerate(weights):
for innerIndex, innerVal in enumerate(outerList):
newModel[outerIndex][innerIndex] += innerVal
# average it by number of clients
for outerIndex, outerList in enumerate(newModel):
for innerIndex, innerVal in enumerate(outerList):
newModel[outerIndex][innerIndex] /= number_clients
# now update the model using the learning rate using below formula
# model = (1-a) * model + a * new_model
# Prev. part and next part could be merged for efficiency but readability they implemented with two loops
# Iterate over model
for outerIndex, outerList in enumerate(newModel):
for innerIndex, innerVal in enumerate(outerList):
model[outerIndex][innerIndex] *= 1-alpha
model[outerIndex][innerIndex] += alpha * newModel[outerIndex][innerIndex]
# Finally update round number
return model
| 29.695238 | 109 | 0.645606 |
fc75b91ed00a53eb686038762f01dc7958ac5d5b
| 172 |
py
|
Python
|
molecule_ignite/test/unit/test_driver.py
|
ragingpastry/molecule-ignite
|
aaf005cabba9a8c933191458cf8553da9bac581d
|
[
"MIT"
] | 17 |
2020-02-19T08:16:49.000Z
|
2022-02-05T08:16:42.000Z
|
molecule_ignite/test/unit/test_driver.py
|
ragingpastry/molecule-ignite
|
aaf005cabba9a8c933191458cf8553da9bac581d
|
[
"MIT"
] | 15 |
2020-06-27T10:16:44.000Z
|
2022-01-04T10:37:54.000Z
|
molecule_ignite/test/unit/test_driver.py
|
ragingpastry/molecule-ignite
|
aaf005cabba9a8c933191458cf8553da9bac581d
|
[
"MIT"
] | 11 |
2020-02-18T16:24:29.000Z
|
2022-03-28T11:44:51.000Z
|
from molecule import api
| 24.571429 | 57 | 0.686047 |
fc7738cdaacc95969a1834885a266a49c73d4c6b
| 12,361 |
py
|
Python
|
coffeine/pipelines.py
|
dengemann/meegpowreg
|
e9cc8f2372f8b8ef4b372bfea113ed0b9646cb39
|
[
"MIT"
] | 6 |
2021-07-19T12:17:59.000Z
|
2021-08-09T15:50:18.000Z
|
coffeine/pipelines.py
|
dengemann/meegpowreg
|
e9cc8f2372f8b8ef4b372bfea113ed0b9646cb39
|
[
"MIT"
] | 23 |
2021-04-16T21:41:36.000Z
|
2021-07-13T10:08:47.000Z
|
coffeine/pipelines.py
|
dengemann/meegpowreg
|
e9cc8f2372f8b8ef4b372bfea113ed0b9646cb39
|
[
"MIT"
] | 5 |
2021-04-15T15:28:51.000Z
|
2021-06-28T21:17:11.000Z
|
import numpy as np
from coffeine.covariance_transformers import (
Diag,
LogDiag,
ExpandFeatures,
Riemann,
RiemannSnp,
NaiveVec)
from coffeine.spatial_filters import (
ProjIdentitySpace,
ProjCommonSpace,
ProjLWSpace,
ProjRandomSpace,
ProjSPoCSpace)
from sklearn.compose import make_column_transformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import RidgeCV, LogisticRegression
def make_filter_bank_transformer(names, method='riemann',
projection_params=None,
vectorization_params=None,
categorical_interaction=None):
"""Generate pipeline for filterbank models.
Prepare filter bank models as used in [1]_. These models take as input
sensor-space covariance matrices computed from M/EEG signals in different
frequency bands. Then transformations are applied to improve the
applicability of linear regression techniques by reducing the impact of
field spread.
In terms of implementation, this involves 1) projection
(e.g. spatial filters) and 2) vectorization (e.g. taking the log on the
diagonal).
.. note::
The resulting model expects as inputs data frames in which different
covarances (e.g. for different frequencies) are stored inside columns
indexed by ``names``.
Other columns will be passed through by the underlying column
transformers.
The pipeline also supports fitting categorical interaction effects
after projection and vectorization steps are performed.
.. note::
All essential methods from [1]_ are implemented here. In practice,
we recommend comparing `riemann', `spoc' and `diag' as a baseline.
Parameters
----------
names : list of str
The column names of the data frame corresponding to different
covariances.
method : str
The method used for extracting features from covariances. Defaults
to ``'riemann'``. Can be ``'riemann'``, ``'lw_riemann'``, ``'diag'``,
``'log_diag'``, ``'random'``, ``'naive'``, ``'spoc'``,
``'riemann_wasserstein'``.
projection_params : dict | None
The parameters for the projection step.
vectorization_params : dict | None
The parameters for the vectorization step.
categorical_interaction : str
The column in the input data frame containing a binary descriptor
used to fit 2-way interaction effects.
References
----------
[1] D. Sabbagh, P. Ablin, G. Varoquaux, A. Gramfort, and D.A. Engemann.
Predictive regression modeling with MEG/EEG: from source power
to signals and cognitive states.
*NeuroImage*, page 116893,2020. ISSN 1053-8119.
https://doi.org/10.1016/j.neuroimage.2020.116893
"""
# put defaults here for projection and vectorization step
projection_defaults = {
'riemann': dict(scale=1, n_compo='full', reg=1.e-05),
'lw_riemann': dict(shrink=1),
'diag': dict(),
'log_diag': dict(),
'random': dict(n_compo='full'),
'naive': dict(),
'spoc': dict(n_compo='full', scale='auto', reg=1.e-05, shrink=1),
'riemann_wasserstein': dict()
}
vectorization_defaults = {
'riemann': dict(metric='riemann'),
'lw_riemann': dict(metric='riemann'),
'diag': dict(),
'log_diag': dict(),
'random': dict(),
'naive': dict(method='upper'),
'spoc': dict(),
'riemann_wasserstein': dict(rank='full')
}
assert set(projection_defaults) == set(vectorization_defaults)
if method not in projection_defaults:
raise ValueError(
f"The `method` ('{method}') you specified is unknown.")
# update defaults
projection_params_ = projection_defaults[method]
if projection_params is not None:
projection_params_.update(**projection_params)
vectorization_params_ = vectorization_defaults[method]
if vectorization_params is not None:
vectorization_params_.update(**vectorization_params)
# setup pipelines (projection + vectorization step)
steps = tuple()
if method == 'riemann':
steps = (ProjCommonSpace, Riemann)
elif method == 'lw_riemann':
steps = (ProjLWSpace, Riemann)
elif method == 'diag':
steps = (ProjIdentitySpace, Diag)
elif method == 'log_diag':
steps = (ProjIdentitySpace, LogDiag)
elif method == 'random':
steps = (ProjRandomSpace, LogDiag)
elif method == 'naive':
steps = (ProjIdentitySpace, NaiveVec)
elif method == 'spoc':
steps = (ProjSPoCSpace, LogDiag)
elif method == 'riemann_wasserstein':
steps = (ProjIdentitySpace, RiemannSnp)
filter_bank_transformer = make_column_transformer(
*_get_projector_vectorizer(*steps), remainder='passthrough')
if categorical_interaction is not None:
filter_bank_transformer = ExpandFeatures(
filter_bank_transformer, expander_column=categorical_interaction)
return filter_bank_transformer
def make_filter_bank_regressor(names, method='riemann',
projection_params=None,
vectorization_params=None,
categorical_interaction=None, scaling=None,
estimator=None):
"""Generate pipeline for regression with filter bank model.
Prepare filter bank models as used in [1]_. These models take as input
sensor-space covariance matrices computed from M/EEG signals in different
frequency bands. Then transformations are applied to improve the
applicability of linear regression techniques by reducing the impact of
field spread.
In terms of implementation, this involves 1) projection
(e.g. spatial filters) and 2) vectorization (e.g. taking the log on the
diagonal).
.. note::
The resulting model expects as inputs data frames in which different
covarances (e.g. for different frequencies) are stored inside columns
indexed by ``names``.
Other columns will be passed through by the underlying column
transformers.
The pipeline also supports fitting categorical interaction effects
after projection and vectorization steps are performed.
.. note::
All essential methods from [1]_ are implemented here. In practice,
we recommend comparing `riemann', `spoc' and `diag' as a baseline.
Parameters
----------
names : list of str
The column names of the data frame corresponding to different
covariances.
method : str
The method used for extracting features from covariances. Defaults
to ``'riemann'``. Can be ``'riemann'``, ``'lw_riemann'``, ``'diag'``,
``'log_diag'``, ``'random'``, ``'naive'``, ``'spoc'``,
``'riemann_wasserstein'``.
projection_params : dict | None
The parameters for the projection step.
vectorization_params : dict | None
The parameters for the vectorization step.
categorical_interaction : str
The column in the input data frame containing a binary descriptor
used to fit 2-way interaction effects.
scaling : scikit-learn Transformer object | None
Method for re-rescaling the features. Defaults to None. If None,
StandardScaler is used.
estimator : scikit-learn Estimator object.
The estimator object. Defaults to None. If None, RidgeCV
is performed with default values.
References
----------
[1] D. Sabbagh, P. Ablin, G. Varoquaux, A. Gramfort, and D.A. Engemann.
Predictive regression modeling with MEG/EEG: from source power
to signals and cognitive states.
*NeuroImage*, page 116893,2020. ISSN 1053-8119.
https://doi.org/10.1016/j.neuroimage.2020.116893
"""
filter_bank_transformer = make_filter_bank_transformer(
names=names, method=method, projection_params=projection_params,
vectorization_params=vectorization_params,
categorical_interaction=categorical_interaction
)
scaling_ = scaling
if scaling_ is None:
scaling_ = StandardScaler()
estimator_ = estimator
if estimator_ is None:
estimator_ = RidgeCV(alphas=np.logspace(-3, 5, 100))
filter_bank_regressor = make_pipeline(
filter_bank_transformer,
scaling_,
estimator_
)
return filter_bank_regressor
def make_filter_bank_classifier(names, method='riemann',
projection_params=None,
vectorization_params=None,
categorical_interaction=None, scaling=None,
estimator=None):
"""Generate pipeline for classification with filter bank model.
Prepare filter bank models as used in [1]_. These models take as input
sensor-space covariance matrices computed from M/EEG signals in different
frequency bands. Then transformations are applied to improve the
applicability of linear regression techniques by reducing the impact of
field spread.
In terms of implementation, this involves 1) projection
(e.g. spatial filters) and 2) vectorization (e.g. taking the log on the
diagonal).
.. note::
The resulting model expects as inputs data frames in which different
covarances (e.g. for different frequencies) are stored inside columns
indexed by ``names``.
Other columns will be passed through by the underlying column
transformers.
The pipeline also supports fitting categorical interaction effects
after projection and vectorization steps are performed.
.. note::
All essential methods from [1]_ are implemented here. In practice,
we recommend comparing `riemann', `spoc' and `diag' as a baseline.
Parameters
----------
names : list of str
The column names of the data frame corresponding to different
covariances.
method : str
The method used for extracting features from covariances. Defaults
to ``'riemann'``. Can be ``'riemann'``, ``'lw_riemann'``, ``'diag'``,
``'log_diag'``, ``'random'``, ``'naive'``, ``'spoc'``,
``'riemann_wasserstein'``.
projection_params : dict | None
The parameters for the projection step.
vectorization_params : dict | None
The parameters for the vectorization step.
categorical_interaction : str
The column in the input data frame containing a binary descriptor
used to fit 2-way interaction effects.
scaling : scikit-learn Transformer object | None
Method for re-rescaling the features. Defaults to None. If None,
StandardScaler is used.
estimator : scikit-learn Estimator object.
The estimator object. Defaults to None. If None, LogisticRegression
is performed with default values.
References
----------
[1] D. Sabbagh, P. Ablin, G. Varoquaux, A. Gramfort, and D.A. Engemann.
Predictive regression modeling with MEG/EEG: from source power
to signals and cognitive states.
*NeuroImage*, page 116893,2020. ISSN 1053-8119.
https://doi.org/10.1016/j.neuroimage.2020.116893
"""
filter_bank_transformer = make_filter_bank_transformer(
names=names, method=method, projection_params=projection_params,
vectorization_params=vectorization_params,
categorical_interaction=categorical_interaction
)
scaling_ = scaling
if scaling_ is None:
scaling_ = StandardScaler()
estimator_ = estimator
if estimator_ is None:
estimator_ = LogisticRegression(solver='liblinear')
filter_bank_regressor = make_pipeline(
filter_bank_transformer,
scaling_,
estimator_
)
return filter_bank_regressor
| 37.685976 | 77 | 0.659574 |
fc78a988ef549d86a9021df34f1480ea70a43721
| 11,516 |
py
|
Python
|
submissions/Chouard/mygames.py
|
dysomni/aima-python
|
c67104e50007ec5ac2a9aa37f0cb972cb6315528
|
[
"MIT"
] | null | null | null |
submissions/Chouard/mygames.py
|
dysomni/aima-python
|
c67104e50007ec5ac2a9aa37f0cb972cb6315528
|
[
"MIT"
] | null | null | null |
submissions/Chouard/mygames.py
|
dysomni/aima-python
|
c67104e50007ec5ac2a9aa37f0cb972cb6315528
|
[
"MIT"
] | 1 |
2018-08-23T19:27:23.000Z
|
2018-08-23T19:27:23.000Z
|
from games import Game
from math import nan, isnan
from queue import PriorityQueue
from copy import deepcopy
from utils import isnumber
from grading.util import print_table
def q2list(mq):
list = []
while not mq.empty():
list.append(mq.get(1).rcv())
return list
def movesInRow(board, r):
mQueue = PriorityQueue()
row = board[r]
for c in range(len(row)):
if isnan(row[c]):
continue
v = row[c]
move = Move(r, c, v)
mQueue.put(move)
return q2list(mQueue)
def movesInCol(board, c):
mQueue = PriorityQueue()
for r in range(len(board)):
if isnan(board[r][c]):
continue
v = board[r][c]
move = Move(r, c, v)
mQueue.put(move)
return q2list(mQueue)
won = GameState(
to_move='H',
position=(0, 1),
board=[[nan, nan],
[9, nan]],
label='won'
)
won.scores = {'H': 9, 'V': 0}
lost = GameState(
to_move='V',
position=(0, 1),
board=[[nan, nan],
[9, nan]],
label='lost'
)
lost.scores = {'H': 0, 'V': 9}
winin1 = GameState(
to_move='H',
position=(1, 1),
board=[[nan, nan],
[9, nan]],
label='winin1'
)
losein1 = GameState(
to_move='V',
position=(0, 0),
board=[[nan, nan],
[9, nan]],
label='losein1'
)
winin2 = GameState(
to_move='H',
position=(0, 0),
board=[[nan, 3, 2],
[nan, 9, nan],
[nan, nan, 1]],
label='winin2'
)
losein2 = GameState(
to_move='V',
position=(0, 0),
board=[[nan, nan, nan],
[3, 9, nan],
[2, nan, 1]],
label='losein2'
)
losein2.maxDepth = 3
# http://www.kongregate.com/games/zolli/thinkahead-brain-trainer
stolen = GameState(
to_move='H',
position=(3, 1),
board=[[3, 8, 9, 5],
[9, 1, 3, 2],
[8, 6, 4, 4],
[9, nan, 1, 5]],
label='stolen'
)
choose1 = GameState(
to_move='H',
position=(1, 0),
board=[[3, 8, 9, 5],
[nan, 1, 3, 2],
[8, 6, 4, 4],
[nan, nan, 1, 5]],
label='choose1'
)
winby10 = GameState(
to_move='H',
position=(2, 0),
board=[[nan, nan, nan, nan],
[nan, nan, nan, nan],
[nan, 6, 4, 5],
[nan, nan, 1, 3]],
label='winby10'
)
thinkA = ThinkAhead(stolen)
board = '''
***
***
***
'''
'''
Board represents the squares, whether the top, bottom, left, and
right have been filled, and which player owns the square.
'''
dotLineBoard = [[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}],
[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'B', 'lines': ['T', 'B', 'L', 'R']}]]
won = DotLineState(board=dotLineBoard, to_move='A', label='Won', scores={'A': 3, 'B': 1})
dotLineBoard = [[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'B', 'lines': ['T', 'B', 'L', 'R']}],
[{'winner': 'B', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'B', 'lines': ['T', 'B', 'L', 'R']}]]
lost = DotLineState(board=dotLineBoard, to_move='A', label='Lost', scores={'A': 1, 'B': 3})
dotLineBoard = [[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'B', 'lines': ['T', 'B', 'L', 'R']}],
[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'B', 'lines': ['T', 'B', 'L', 'R']}]]
tied = DotLineState(board=dotLineBoard, to_move='A', label='Tied', scores={'A': 2, 'B': 2})
dotLineBoard = [[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}],
[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': '', 'lines': ['T', 'L']}]]
winin1Dots = DotLineState(board=dotLineBoard, to_move='A', label='Win in 1', scores={'A': 2, 'B': 1})
dotLineBoard = [[{'winner': '', 'lines': ['L', 'R']}, {'winner': '', 'lines': ['T', 'L']}, {'winner': '', 'lines': ['R']}],
[{'winner': '', 'lines': ['L', 'R']}, {'winner': '', 'lines': ['L', 'R']}, {'winner': '', 'lines': ['L', 'R']}],
[{'winner': '', 'lines': ['B', 'L', 'R']}, {'winner': '', 'lines': ['L', 'B']}, {'winner': '', 'lines': ['B', 'R']}],
]
winIn5_3x3 = DotLineState(board=dotLineBoard, to_move='A', label='Win in 5', scores={'A': 0, 'B': 0})
play = DotLineState(
board=[[{'winner': '', 'lines': []}, {'winner': '', 'lines': []}],
[{'winner': '', 'lines': []}, {'winner': '', 'lines': []}]],
to_move='A', label='Start')
#amended by whh
dotLine = DotsAndLines(play)
#dotLine = DotsAndLines(winIn5_3x3)
myGames = {
dotLine: [
won,
lost,
tied,
winin1Dots,
winIn5_3x3,
play
]
}
| 27.951456 | 133 | 0.500174 |
fc798568d1c4c7d74cc7db30deace979155e8ddb
| 4,797 |
py
|
Python
|
discordbot/stocks/options/opt_chain.py
|
minhhoang1023/GamestonkTerminal
|
195dc19b491052df080178c0cc6a9d535a91a704
|
[
"MIT"
] | 1 |
2022-02-18T04:02:52.000Z
|
2022-02-18T04:02:52.000Z
|
discordbot/stocks/options/opt_chain.py
|
minhhoang1023/GamestonkTerminal
|
195dc19b491052df080178c0cc6a9d535a91a704
|
[
"MIT"
] | null | null | null |
discordbot/stocks/options/opt_chain.py
|
minhhoang1023/GamestonkTerminal
|
195dc19b491052df080178c0cc6a9d535a91a704
|
[
"MIT"
] | null | null | null |
import os
import df2img
import disnake
import numpy as np
import pandas as pd
from menus.menu import Menu
from PIL import Image
import discordbot.config_discordbot as cfg
from discordbot.config_discordbot import gst_imgur, logger
from discordbot.helpers import autocrop_image
from gamestonk_terminal.stocks.options import yfinance_model
| 29.429448 | 94 | 0.518449 |
fc7a203231a6818121284acfef4c18f0b9192863
| 2,312 |
py
|
Python
|
scripts/get_lenderprofit.py
|
xujiahuayz/premfin
|
0e90d876ef7c9ef4f3da7d4842b8ec5ae9ac7e68
|
[
"MIT"
] | 4 |
2021-05-03T16:03:24.000Z
|
2022-02-17T16:08:49.000Z
|
scripts/get_lenderprofit.py
|
xujiahuayz/premfin
|
0e90d876ef7c9ef4f3da7d4842b8ec5ae9ac7e68
|
[
"MIT"
] | null | null | null |
scripts/get_lenderprofit.py
|
xujiahuayz/premfin
|
0e90d876ef7c9ef4f3da7d4842b8ec5ae9ac7e68
|
[
"MIT"
] | 1 |
2021-06-30T11:27:56.000Z
|
2021-06-30T11:27:56.000Z
|
#%% import packages
import numpy as np
import pandas as pd
import multiprocessing
from time import time
import json
from premiumFinance.constants import (
MORTALITY_TABLE_CLEANED_PATH,
PROCESSED_PROFITABILITY_PATH,
)
from premiumFinance.financing import calculate_lender_profit, yield_curve
mortality_experience = pd.read_excel(MORTALITY_TABLE_CLEANED_PATH)
#%% calculate profit rate
lender_coc_value = np.arange(start=0.01, stop=0.2, step=0.01)
#%% tbd
if __name__ == "__main__":
pool = multiprocessing.Pool()
start_time = time()
foo = []
for tempfunc in (tempfunc_t, tempfunc_f):
foo.append(
pool.map(
tempfunc,
lender_coc_value,
)
)
print(f"it took {time() - start_time}")
lender_profitability = {
"lender_coc": lender_coc_value.tolist(),
"profitability": foo,
}
with open(PROCESSED_PROFITABILITY_PATH, "w") as outfile:
json.dump(lender_profitability, outfile)
| 26.574713 | 78 | 0.675606 |
fc7acbb33a6b536b7d8fb8f0b3208c55dac034b1
| 5,700 |
py
|
Python
|
dashboard/dashboard/common/layered_cache.py
|
BearerPipelineTest/catapult
|
3800a67cd916200046a50748893bbd0dcf3d7f4a
|
[
"BSD-3-Clause"
] | null | null | null |
dashboard/dashboard/common/layered_cache.py
|
BearerPipelineTest/catapult
|
3800a67cd916200046a50748893bbd0dcf3d7f4a
|
[
"BSD-3-Clause"
] | 1 |
2022-01-12T14:28:55.000Z
|
2022-01-12T14:28:55.000Z
|
dashboard/dashboard/common/layered_cache.py
|
atuchin-m/catapult
|
108ea3e2ec108e68216b1250a3d79cc642600294
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Caches processed query results in memcache and datastore.
Memcache is not very reliable for the perf dashboard. Prometheus team explained
that memcache is LRU and shared between multiple applications, so their activity
may result in our data being evicted. To prevent this, we cache processed
query results in the data store. Using NDB, the values are also cached in
memcache if possible. This improves performance because doing a get()
for a key which has a single BlobProperty is much quicker than a complex query
over a large dataset.
(Background: http://g/prometheus-discuss/othVtufGIyM/wjAS5djyG8kJ)
When an item is cached, layered_cache does the following:
1) Namespaces the key based on whether datastore_hooks says the request is
internal_only.
2) Pickles the value (memcache does this internally), and adds a data store
entity with the key and a BlobProperty with the pickled value.
Retrieving values checks memcache via NDB first, and if datastore is used it
unpickles.
When an item is removed from the the cache, it is removed from both internal and
external caches, since removals are usually caused by large changes that affect
both caches.
Although this module contains ndb.Model classes, these are not intended
to be used directly by other modules.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import six.moves.cPickle as cPickle
import datetime
import logging
from google.appengine.api import datastore_errors
from google.appengine.runtime import apiproxy_errors
from google.appengine.ext import ndb
from dashboard.common import datastore_hooks
from dashboard.common import namespaced_stored_object
from dashboard.common import stored_object
def Get(key):
"""Gets the value from the datastore."""
if key is None:
return None
namespaced_key = namespaced_stored_object.NamespaceKey(key)
entity = ndb.Key('CachedPickledString',
namespaced_key).get(read_policy=ndb.EVENTUAL_CONSISTENCY)
if entity:
return cPickle.loads(entity.value)
return stored_object.Get(key)
def GetExternal(key):
"""Gets the value from the datastore for the externally namespaced key."""
if key is None:
return None
namespaced_key = namespaced_stored_object.NamespaceKey(
key, datastore_hooks.EXTERNAL)
entity = ndb.Key('CachedPickledString',
namespaced_key).get(read_policy=ndb.EVENTUAL_CONSISTENCY)
if entity:
return cPickle.loads(entity.value)
return stored_object.Get(key)
def Set(key, value, days_to_keep=None, namespace=None):
"""Sets the value in the datastore.
Args:
key: The key name, which will be namespaced.
value: The value to set.
days_to_keep: Number of days to keep entity in datastore, default is None.
Entity will not expire when this value is 0 or None.
namespace: Optional namespace, otherwise namespace will be retrieved
using datastore_hooks.GetNamespace().
"""
# When number of days to keep is given, calculate expiration time for
# the entity and store it in datastore.
# Once the entity expires, it will be deleted from the datastore.
expire_time = None
if days_to_keep:
expire_time = datetime.datetime.now() + datetime.timedelta(
days=days_to_keep)
namespaced_key = namespaced_stored_object.NamespaceKey(key, namespace)
try:
CachedPickledString(
id=namespaced_key, value=cPickle.dumps(value),
expire_time=expire_time).put()
except datastore_errors.BadRequestError as e:
logging.warning('BadRequestError for key %s: %s', key, e)
except apiproxy_errors.RequestTooLargeError as e:
stored_object.Set(key, value)
def SetExternal(key, value, days_to_keep=None):
"""Sets the value in the datastore for the externally namespaced key.
Needed for things like /add_point that update internal/external data at the
same time.
Args:
key: The key name, which will be namespaced as externally_visible.
value: The value to set.
days_to_keep: Number of days to keep entity in datastore, default is None.
Entity will not expire when this value is 0 or None.
"""
Set(key, value, days_to_keep, datastore_hooks.EXTERNAL)
def DeleteAllExpiredEntities():
"""Deletes all expired entities from the datastore."""
ndb.delete_multi(CachedPickledString.GetExpiredKeys())
| 35.403727 | 80 | 0.759649 |
fc7af78333f9477d4091f8e1379313d34cf45c32
| 2,241 |
py
|
Python
|
hypergbm/tests/cuml_/run_experiment_cuml.py
|
BigAndSweet/HyperGBM
|
f3bc4e0d877b82a264d35158f9bc974f43a2a5ee
|
[
"Apache-2.0"
] | null | null | null |
hypergbm/tests/cuml_/run_experiment_cuml.py
|
BigAndSweet/HyperGBM
|
f3bc4e0d877b82a264d35158f9bc974f43a2a5ee
|
[
"Apache-2.0"
] | null | null | null |
hypergbm/tests/cuml_/run_experiment_cuml.py
|
BigAndSweet/HyperGBM
|
f3bc4e0d877b82a264d35158f9bc974f43a2a5ee
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
"""
"""
import cudf
from hypergbm import make_experiment
from hypernets.tabular import get_tool_box
from hypernets.tabular.datasets import dsutils
if __name__ == '__main__':
main(target='y', reward_metric='auc', ensemble_size=10, pos_label='yes', log_level='info', max_trials=10)
# main(target='y', max_trials=10, cv=False, ensemble_size=0, verbose=0, pos_label='yes', )
# main(target='day', reward_metric='f1', ensemble_size=10, log_level='info', max_trials=5)
# main(target='day', dtype='str', reward_metric='f1', ensemble_size=0, log_level='info', max_trials=6)
# main(target='age', dtype='float', ensemble_size=10, log_level='info', max_trials=8)
| 35.571429 | 109 | 0.630076 |
fc7fb355e0004487d0ead15c251476f2cd39193b
| 2,658 |
py
|
Python
|
datasets/imagenet.py
|
xhchrn/open_lth
|
6b3d04a12a2f868ce851bd09b330ea57957c1de6
|
[
"MIT"
] | 9 |
2021-03-30T20:43:26.000Z
|
2021-12-28T06:25:17.000Z
|
datasets/imagenet.py
|
xhchrn/open_lth
|
6b3d04a12a2f868ce851bd09b330ea57957c1de6
|
[
"MIT"
] | null | null | null |
datasets/imagenet.py
|
xhchrn/open_lth
|
6b3d04a12a2f868ce851bd09b330ea57957c1de6
|
[
"MIT"
] | 2 |
2021-03-31T01:19:48.000Z
|
2021-08-02T13:41:32.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import concurrent.futures
import numpy as np
import os
from PIL import Image
import torchvision
from datasets import base
from platforms.platform import get_platform
DataLoader = base.DataLoader
| 33.225 | 115 | 0.677201 |
fc80bac77478aab3a7595a594bb0b4822d3d20bb
| 8,746 |
py
|
Python
|
sm4.py
|
ZelKnow/sm4
|
2bb232f46a5033b2d89ce097e004e53eb13d90d8
|
[
"MIT"
] | null | null | null |
sm4.py
|
ZelKnow/sm4
|
2bb232f46a5033b2d89ce097e004e53eb13d90d8
|
[
"MIT"
] | null | null | null |
sm4.py
|
ZelKnow/sm4
|
2bb232f46a5033b2d89ce097e004e53eb13d90d8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : sm4.py
@Description : sm4
@Date : 2021/10/28 15:59:51
@Author : ZelKnow
@Github : https://github.com/ZelKnow
"""
__author__ = "ZelKnow"
from argparse import ArgumentParser, ArgumentError
from binascii import hexlify, unhexlify
from utils import S_BOX, BLOCK_BYTE, FK, CK, BLOCK_HEX
from utils import rotl, num2hex, bytes_to_list, list_to_bytes, padding, unpadding
ENCRYPT = 0 #
DECRYPT = 1 #
if __name__ == '__main__':
parser = ArgumentParser(description="SM4")
parser.add_argument('crypt', choices=['encrypt', 'decrypt'], help='')
parser.add_argument('mode', choices=['ecb', 'cbc'], help='')
parser.add_argument('source', help='/')
parser.add_argument('key', help='')
parser.add_argument('--iv', help='cbc')
parser.add_argument('--source_type',
choices=['input', 'bin_file', 'image'],
help='',
default='input')
parser.add_argument('--output', help='')
args = parser.parse_args()
c = CryptSM4()
c.set_key(args.key)
if args.mode == 'cbc' and args.iv is None:
raise ArgumentError("")
if args.source_type == 'input':
input = args.source
if input[:2].lower() == '0x':
input = int(input[2:], 16)
elif args.source_type == 'bin_file':
with open(args.source, 'rb') as f:
input = f.read()
else:
from PIL import Image
import numpy as np
source = Image.open(args.source)
img = np.array(source.convert('RGBA'))
shape = img.shape
size = img.size
input = unhexlify(''.join([num2hex(i, width=2)
for i in img.flatten()]))
if args.crypt == 'encrypt':
output = c.encrypt_ECB(input) if args.mode == 'ecb' else c.encrypt_CBC(
input, args.iv)
else:
output = c.decrypt_ECB(input) if args.mode == 'ecb' else c.decrypt_CBC(
input, args.iv)
if args.source_type == 'image':
output = hexlify(output).decode()
output = output[:size * 2]
output = [[int(output[i + j:i + j + 2], 16) for j in range(0, 8, 2)]
for i in range(0, len(output), 8)]
output = np.array(output)
output = Image.fromarray(output.reshape(shape).astype('uint8'))
output.save(args.output)
elif args.output:
with open(args.output, "wb") as f:
f.write(output)
else:
try:
print(output.decode())
except:
print(hexlify(output).decode())
| 27.677215 | 81 | 0.511091 |
fc80de56a04c7ef4be7293dbeb997c760a19a788
| 1,750 |
py
|
Python
|
sendotp/sendotp.py
|
saadmk11/sendotp-python
|
b0cd5c3da969d00a753d9614c5bea0e2978859c9
|
[
"MIT"
] | 5 |
2017-05-15T07:21:29.000Z
|
2022-03-02T01:01:47.000Z
|
sendotp/sendotp.py
|
saadmk11/sendotp-python
|
b0cd5c3da969d00a753d9614c5bea0e2978859c9
|
[
"MIT"
] | 2 |
2017-05-15T07:57:36.000Z
|
2021-09-23T06:22:34.000Z
|
sendotp/sendotp.py
|
saadmk11/sendotp-python
|
b0cd5c3da969d00a753d9614c5bea0e2978859c9
|
[
"MIT"
] | 10 |
2017-05-29T06:53:42.000Z
|
2020-05-22T10:29:00.000Z
|
import json
import requests
from random import randint
| 26.515152 | 81 | 0.552 |
fc8179bb642e9880741040dd5588b31584a47da9
| 528 |
py
|
Python
|
leetcode/1021-remove-outermost-parentheses.py
|
tjeubaoit/algorithm
|
a1f2a30e0f736cc3d8b45ed845f724b9a4ed2e9a
|
[
"MIT"
] | null | null | null |
leetcode/1021-remove-outermost-parentheses.py
|
tjeubaoit/algorithm
|
a1f2a30e0f736cc3d8b45ed845f724b9a4ed2e9a
|
[
"MIT"
] | null | null | null |
leetcode/1021-remove-outermost-parentheses.py
|
tjeubaoit/algorithm
|
a1f2a30e0f736cc3d8b45ed845f724b9a4ed2e9a
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
# s = '(()())(())'
# s = '(()())(())(()(()))'
s = '()()'
ret = Solution().removeOuterParentheses(s)
print(ret)
| 22.956522 | 52 | 0.350379 |
fc8204c97c600cb469a44cb790b0f13a27763c8c
| 115 |
py
|
Python
|
venv/Scripts/ex049.py
|
SamuelNunesDev/starting_point_in_python
|
9a9e39cabb5f3526ee0037012e3943898c1d9dfa
|
[
"MIT"
] | null | null | null |
venv/Scripts/ex049.py
|
SamuelNunesDev/starting_point_in_python
|
9a9e39cabb5f3526ee0037012e3943898c1d9dfa
|
[
"MIT"
] | null | null | null |
venv/Scripts/ex049.py
|
SamuelNunesDev/starting_point_in_python
|
9a9e39cabb5f3526ee0037012e3943898c1d9dfa
|
[
"MIT"
] | null | null | null |
n = int(input('Digite um nmero para ver sua tabuada: '))
for c in range(0, 11):
print(f'{n} * {c} = {n * c}')
| 28.75 | 57 | 0.556522 |
fc821cb035c84e746a53eb83ce1e63b3b5c31ae6
| 9,911 |
py
|
Python
|
js2py/evaljs.py
|
inprod/Js2Py
|
0af8cb100b7840e23358d220c685507163f2344e
|
[
"MIT"
] | null | null | null |
js2py/evaljs.py
|
inprod/Js2Py
|
0af8cb100b7840e23358d220c685507163f2344e
|
[
"MIT"
] | null | null | null |
js2py/evaljs.py
|
inprod/Js2Py
|
0af8cb100b7840e23358d220c685507163f2344e
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from .translators import translate_js, DEFAULT_HEADER
from .es6 import js6_to_js5
import sys
import time
import json
import six
import os
import hashlib
import codecs
__all__ = [
'EvalJs', 'translate_js', 'import_js', 'eval_js', 'translate_file',
'eval_js6', 'translate_js6', 'run_file', 'disable_pyimport',
'get_file_contents', 'write_file_contents'
]
DEBUG = False
def import_js(path, lib_name, globals):
"""Imports from javascript source file.
globals is your globals()"""
with codecs.open(path_as_local(path), "r", "utf-8") as f:
js = f.read()
e = EvalJs()
e.execute(js)
var = e.context['var']
globals[lib_name] = var.to_python()
def translate_file(input_path, output_path):
'''
Translates input JS file to python and saves the it to the output path.
It appends some convenience code at the end so that it is easy to import JS objects.
For example we have a file 'example.js' with: var a = function(x) {return x}
translate_file('example.js', 'example.py')
Now example.py can be easily importend and used:
>>> from example import example
>>> example.a(30)
30
'''
js = get_file_contents(input_path)
py_code = translate_js(js)
lib_name = os.path.basename(output_path).split('.')[0]
head = '__all__ = [%s]\n\n# Don\'t look below, you will not understand this Python code :) I don\'t.\n\n' % repr(
lib_name)
tail = '\n\n# Add lib to the module scope\n%s = var.to_python()' % lib_name
out = head + py_code + tail
write_file_contents(output_path, out)
def run_file(path_or_file, context=None):
''' Context must be EvalJS object. Runs given path as a JS program. Returns (eval_value, context).
'''
if context is None:
context = EvalJs()
if not isinstance(context, EvalJs):
raise TypeError('context must be the instance of EvalJs')
eval_value = context.eval(get_file_contents(path_or_file))
return eval_value, context
def eval_js(js):
"""Just like javascript eval. Translates javascript to python,
executes and returns python object.
js is javascript source code
EXAMPLE:
>>> import js2py
>>> add = js2py.eval_js('function add(a, b) {return a + b}')
>>> add(1, 2) + 3
6
>>> add('1', 2, 3)
u'12'
>>> add.constructor
function Function() { [python code] }
NOTE: For Js Number, String, Boolean and other base types returns appropriate python BUILTIN type.
For Js functions and objects, returns Python wrapper - basically behaves like normal python object.
If you really want to convert object to python dict you can use to_dict method.
"""
e = EvalJs()
return e.eval(js)
def eval_js6(js):
"""Just like eval_js but with experimental support for js6 via babel."""
return eval_js(js6_to_js5(js))
def translate_js6(js):
"""Just like translate_js but with experimental support for js6 via babel."""
return translate_js(js6_to_js5(js))
#print x
if __name__ == '__main__':
#with open('C:\Users\Piotrek\Desktop\esprima.js', 'rb') as f:
# x = f.read()
e = EvalJs()
e.execute('square(x)')
#e.execute(x)
e.console()
| 34.058419 | 117 | 0.613258 |
fc82e467705d5e2af949a070febf617a72de9774
| 152 |
py
|
Python
|
setup.py
|
mvduin/py-uio
|
1ad5eb6e1cfeae722535fd6ed7e485a0afd84683
|
[
"MIT"
] | 38 |
2016-04-23T06:43:00.000Z
|
2022-03-17T17:06:59.000Z
|
setup.py
|
mvduin/py-uio
|
1ad5eb6e1cfeae722535fd6ed7e485a0afd84683
|
[
"MIT"
] | 6 |
2016-05-13T04:42:38.000Z
|
2020-10-16T13:16:02.000Z
|
setup.py
|
mvduin/py-uio
|
1ad5eb6e1cfeae722535fd6ed7e485a0afd84683
|
[
"MIT"
] | 8 |
2016-05-11T16:56:07.000Z
|
2019-09-11T09:54:22.000Z
|
#!/usr/bin/python3
from setuptools import setup, find_packages
setup(
package_dir = { '': 'src' },
packages = find_packages( where='src' ),
)
| 16.888889 | 44 | 0.651316 |
fc836898d52446a26166934c5d0c314e5b3ac86f
| 24,883 |
py
|
Python
|
tools/verity_utils.py
|
FabriSC/Alioth-SC
|
bbe9723401b351c2a34b09a30978373d456d20a2
|
[
"MIT"
] | 3 |
2022-03-16T12:31:10.000Z
|
2022-03-23T04:20:20.000Z
|
bin/verity_utils.py
|
affggh/NH4RomTool
|
84b06f9cc5f268c14c7af25e91c8b242188c70f7
|
[
"Apache-2.0"
] | null | null | null |
bin/verity_utils.py
|
affggh/NH4RomTool
|
84b06f9cc5f268c14c7af25e91c8b242188c70f7
|
[
"Apache-2.0"
] | 1 |
2022-03-30T04:47:35.000Z
|
2022-03-30T04:47:35.000Z
|
#!/usr/bin/env python
#
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import logging
import os.path
import shlex
import struct
import common
import sparse_img
from rangelib import RangeSet
logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS
BLOCK_SIZE = common.BLOCK_SIZE
FIXED_SALT = "aee087a5be3b982978c923f566a94613496b417f2af592639bc80d141e34dfe7"
def Append2Simg(sparse_image_path, unsparse_image_path, error_message):
"""Appends the unsparse image to the given sparse image.
Args:
sparse_image_path: the path to the (sparse) image
unsparse_image_path: the path to the (unsparse) image
Raises:
BuildVerityImageError: On error.
"""
cmd = ["append2simg", sparse_image_path, unsparse_image_path]
try:
common.RunAndCheckOutput(cmd)
except:
logger.exception(error_message)
raise BuildVerityImageError(error_message)
def Append(target, file_to_append, error_message):
"""Appends file_to_append to target.
Raises:
BuildVerityImageError: On error.
"""
try:
with open(target, 'ab') as out_file, \
open(file_to_append, 'rb') as input_file:
for line in input_file:
out_file.write(line)
except IOError:
logger.exception(error_message)
raise BuildVerityImageError(error_message)
def CreateVerityImageBuilder(prop_dict):
"""Returns a verity image builder based on the given build properties.
Args:
prop_dict: A dict that contains the build properties. In particular, it will
look for verity-related property values.
Returns:
A VerityImageBuilder instance for Verified Boot 1.0 or Verified Boot 2.0; or
None if the given build doesn't support Verified Boot.
"""
partition_size = prop_dict.get("partition_size")
# partition_size could be None at this point, if using dynamic partitions.
if partition_size:
partition_size = int(partition_size)
# Verified Boot 1.0
verity_supported = prop_dict.get("verity") == "true"
is_verity_partition = "verity_block_device" in prop_dict
if verity_supported and is_verity_partition:
if OPTIONS.verity_signer_path is not None:
signer_path = OPTIONS.verity_signer_path
else:
signer_path = prop_dict["verity_signer_cmd"]
return Version1VerityImageBuilder(
partition_size,
prop_dict["verity_block_device"],
prop_dict.get("verity_fec") == "true",
signer_path,
prop_dict["verity_key"] + ".pk8",
OPTIONS.verity_signer_args,
"verity_disable" in prop_dict)
# Verified Boot 2.0
if (prop_dict.get("avb_hash_enable") == "true" or
prop_dict.get("avb_hashtree_enable") == "true"):
# key_path and algorithm are only available when chain partition is used.
key_path = prop_dict.get("avb_key_path")
algorithm = prop_dict.get("avb_algorithm")
# Image uses hash footer.
if prop_dict.get("avb_hash_enable") == "true":
return VerifiedBootVersion2VerityImageBuilder(
prop_dict["partition_name"],
partition_size,
VerifiedBootVersion2VerityImageBuilder.AVB_HASH_FOOTER,
prop_dict["avb_avbtool"],
key_path,
algorithm,
prop_dict.get("avb_salt"),
prop_dict["avb_add_hash_footer_args"])
# Image uses hashtree footer.
return VerifiedBootVersion2VerityImageBuilder(
prop_dict["partition_name"],
partition_size,
VerifiedBootVersion2VerityImageBuilder.AVB_HASHTREE_FOOTER,
prop_dict["avb_avbtool"],
key_path,
algorithm,
prop_dict.get("avb_salt"),
prop_dict["avb_add_hashtree_footer_args"])
return None
def CreateHashtreeInfoGenerator(partition_name, block_size, info_dict):
generator = None
if (info_dict.get("verity") == "true" and
info_dict.get("{}_verity_block_device".format(partition_name))):
partition_size = info_dict["{}_size".format(partition_name)]
fec_supported = info_dict.get("verity_fec") == "true"
generator = VerifiedBootVersion1HashtreeInfoGenerator(
partition_size, block_size, fec_supported)
return generator
def CreateCustomImageBuilder(info_dict, partition_name, partition_size,
key_path, algorithm, signing_args):
builder = None
if info_dict.get("avb_enable") == "true":
builder = VerifiedBootVersion2VerityImageBuilder(
partition_name,
partition_size,
VerifiedBootVersion2VerityImageBuilder.AVB_HASHTREE_FOOTER,
info_dict.get("avb_avbtool"),
key_path,
algorithm,
# Salt is None because custom images have no fingerprint property to be
# used as the salt.
None,
signing_args)
return builder
| 34.704324 | 80 | 0.710405 |
fc840d0dac6246e96ea6db4d9f0daf705ae65cf7
| 4,091 |
py
|
Python
|
orgviz/dones.py
|
tkf/orgviz
|
81a436265daa1fb8294a0186f50df76d9599ae38
|
[
"MIT"
] | 8 |
2015-02-04T23:03:36.000Z
|
2021-05-02T10:56:24.000Z
|
orgviz/dones.py
|
tkf/orgviz
|
81a436265daa1fb8294a0186f50df76d9599ae38
|
[
"MIT"
] | null | null | null |
orgviz/dones.py
|
tkf/orgviz
|
81a436265daa1fb8294a0186f50df76d9599ae38
|
[
"MIT"
] | 3 |
2018-04-23T08:18:13.000Z
|
2019-10-12T17:32:54.000Z
|
#!/usr/bin/env python
"""org archive to html table converter"""
import os
import datetime
import itertools
from .utils.date import minutestr, total_minutes
def rootname_from_archive_olpath(node):
"""
Find rootname from ARCHIVE_OLPATH property.
Return None if not found.
"""
olpath = node.get_property('ARCHIVE_OLPATH')
if olpath:
olpathlist = olpath.split('/', 1)
if len(olpathlist) > 1:
(rootname, dummy) = olpathlist
else:
rootname = olpath
return rootname
return None
def find_rootname(node):
"""
Find rootname given node
"""
rootname = rootname_from_archive_olpath(node)
if not rootname:
n = node
p = node.parent
while not p.is_root():
n = p
p = p.parent
# n is root node
rootname = rootname_from_archive_olpath(n) or n.heading
return rootname
def key_row_from_node(node):
"""
Return three tuple (key, row) whose elemens are
key object for sorting table and dictionary which has following
keywords: heading, closed, scheduled, effort, clocksum, rootname.
"""
heading = node.heading
# find rootname
rootname = find_rootname(node)
if heading == rootname:
rootname = ""
# calc clocksum if CLOCK exists
clocksum = ''
clocklist = node.clock
if clocklist:
clocksum = sum([total_minutes(c.duration) for c in clocklist])
closed = node.closed
scheduled = node.scheduled
effort = node.get_property('Effort')
row = dict(
heading=heading,
closed=closed and closed.start.strftime('%a %d %b %H:%M'),
scheduled=scheduled and scheduled.start.strftime('%a %d %b %H:%M'),
effort=effort and minutestr(effort),
clocksum=clocksum and minutestr(clocksum),
rootname=rootname,
)
return (closed.start if closed else None, row)
def table_add_oddday(key_table):
"""
Add oddday key in each rows of key_table *IN PLACE*.
Note that key should be a ``datetime.date`` object.
"""
previous = None
odd = True
for (key, row) in key_table:
this = key
if not sameday(this, previous):
odd = not odd
row['oddday'] = odd
previous = this
def get_data(orgnodes_list, orgpath_list, done, num=100):
"""
Get data for rendering jinja2 template. Data is dictionary like this:
table: list of `row`
list of row generated by ``row_from_node``
orgpathname_list: list of `orgpathname`
orgpathname: dict
contains `orgpath` and `orgname`.
`orgname` is short and unique name for `orgpath`.
title: str
a title
"""
key_table = []
orgname_list = unique_name_from_paths(orgpath_list)
for (nodelist, orgname) in zip(orgnodes_list, orgname_list):
for node in nodelist:
if node.todo == done:
(key, row) = key_row_from_node(node)
if key:
row['orgname'] = orgname
key_table.append((key, row))
orgpathname_list = [
dict(orgpath=orgpath, orgname=orgname)
for (orgpath, orgname) in zip(orgpath_list, orgname_list)]
key_table.sort(reverse=True)
table_add_oddday(key_table)
table = list(itertools.islice((row for (key, row) in key_table), num))
return dict(table=table, orgpathname_list=orgpathname_list,
title='Recently archived tasks')
| 29.014184 | 75 | 0.60792 |
fc884ea5bc9215fa52b9d78882591e5166747f7f
| 2,927 |
py
|
Python
|
tests/python/unittest/test_lang_tag.py
|
ravikumarvc/incubator-tvm
|
9826947ffce0ed40e9d47a0db2abb033e394279e
|
[
"Apache-2.0"
] | 3 |
2021-02-23T22:06:01.000Z
|
2021-09-30T09:59:17.000Z
|
tests/python/unittest/test_lang_tag.py
|
ravikumarvc/incubator-tvm
|
9826947ffce0ed40e9d47a0db2abb033e394279e
|
[
"Apache-2.0"
] | 4 |
2021-03-30T11:59:59.000Z
|
2022-03-12T00:40:23.000Z
|
tests/python/unittest/test_lang_tag.py
|
ravikumarvc/incubator-tvm
|
9826947ffce0ed40e9d47a0db2abb033e394279e
|
[
"Apache-2.0"
] | 3 |
2021-07-20T07:40:15.000Z
|
2021-08-03T08:39:17.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import tvm
if __name__ == "__main__":
test_with()
test_decorator()
test_nested()
| 31.138298 | 80 | 0.60164 |
fc8877ba165d31590d2a4b69afb04a74049c312a
| 2,374 |
py
|
Python
|
doepy/case_studies/discrete_time/MSFB2014.py
|
scwolof/doepy
|
acb2cad95428de2c14b28563cff1aa30679e1f39
|
[
"MIT"
] | 1 |
2020-04-23T13:43:35.000Z
|
2020-04-23T13:43:35.000Z
|
doepy/case_studies/discrete_time/MSFB2014.py
|
scwolof/doepy
|
acb2cad95428de2c14b28563cff1aa30679e1f39
|
[
"MIT"
] | null | null | null |
doepy/case_studies/discrete_time/MSFB2014.py
|
scwolof/doepy
|
acb2cad95428de2c14b28563cff1aa30679e1f39
|
[
"MIT"
] | 1 |
2021-06-13T14:38:32.000Z
|
2021-06-13T14:38:32.000Z
|
"""
MIT License
Copyright (c) 2019 Simon Olofsson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
from scipy.integrate import odeint
from ..continuous_time import MSFB2014
"""
A. Mesbah, S. Streif, R. Findeisen and R. D. Braatz (2014)
"Active fault diagnosis for nonlinear systems with probabilistic uncertainties"
IFAC Proceedings (2014): 7079-7084
"""
def get ():
return DataGen(), [M1(), M2(), M3()]
| 27.287356 | 79 | 0.717776 |
fc88c81b50b3710bc62276602ff44a775a8cb6eb
| 11,840 |
py
|
Python
|
house_code/tutorials_altered/3D_positioning_and_orientation.py
|
mukobi/Pozyx-Gabe
|
a8b444c2013b1df5043cd25106b72562409b5130
|
[
"MIT"
] | 1 |
2020-06-12T07:21:56.000Z
|
2020-06-12T07:21:56.000Z
|
house_code/tutorials_altered/3D_positioning_and_orientation.py
|
mukobi/Pozyx-Gabe
|
a8b444c2013b1df5043cd25106b72562409b5130
|
[
"MIT"
] | null | null | null |
house_code/tutorials_altered/3D_positioning_and_orientation.py
|
mukobi/Pozyx-Gabe
|
a8b444c2013b1df5043cd25106b72562409b5130
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
The pozyx ranging demo (c) Pozyx Labs
please check out https://www.pozyx.io/Documentation/Tutorials/getting_started/Python
This demo requires one (or two) pozyx shields. It demonstrates the 3D orientation and the functionality
to remotely read register data from a pozyx device. Connect one of the Pozyx devices with USB and run this script.
This demo reads the following sensor data:
- pressure
- acceleration
- magnetic field strength
- angular velocity
- the heading, roll and pitch
- the quaternion rotation describing the 3D orientation of the device. This can be used to transform from the body coordinate system to the world coordinate system.
- the linear acceleration (the acceleration excluding gravity)
- the gravitational vector
The data can be viewed in the Processing sketch orientation_3D.pde
"""
from time import time
from time import sleep
from pypozyx import *
from pypozyx.definitions.bitmasks import POZYX_INT_MASK_IMU
from pythonosc.osc_message_builder import OscMessageBuilder
from pythonosc.udp_client import SimpleUDPClient
from modules.user_input_config_functions import UserInputConfigFunctions as UserInput
from modules.file_writing import SensorAndPositionFileWriting as FileWriting
from modules.console_logging_functions import ConsoleLoggingFunctions as ConsoleLogging
import time as t
if __name__ == '__main__':
# shortcut to not have to find out the port yourself
serial_port = get_serial_ports()[0].device
remote_id = 0x6110 # remote device network ID
remote = True # whether to use a remote device
# if not remote:
# remote_id = None
index = 0
previous_cycle_time = 0
current_cycle_time = 0
attributes_to_log = ["acceleration"]
to_use_file = False
filename = None
"""User input configuration section, comment out to use above settings"""
remote = UserInput.use_remote()
remote_id = UserInput.get_remote_id(remote)
to_use_file = UserInput.use_file()
filename = UserInput.get_filename(to_use_file)
attributes_to_log = UserInput.get_multiple_attributes_to_log()
use_processing = True
ip = "127.0.0.1"
network_port = 8888
anchors = [DeviceCoordinates(0x6863, 1, Coordinates(0, 0, 2000)),
DeviceCoordinates(0x615a, 1, Coordinates(0, 18288, 1000)),
DeviceCoordinates(0x607c, 1, Coordinates(18288, 0, 1000)),
DeviceCoordinates(0x6134, 1, Coordinates(18288, 18288, 2000))]
# algorithm = POZYX_POS_ALG_UWB_ONLY # positioning algorithm to use
algorithm = POZYX_POS_ALG_TRACKING # tracking positioning algorithm
dimension = POZYX_3D # positioning dimension
height = 1000 # height of device, required in 2.5D positioning
pozyx = PozyxSerial(serial_port)
osc_udp_client = SimpleUDPClient(ip, network_port)
o = Orientation3D(pozyx, osc_udp_client, anchors, algorithm, dimension, height, remote_id)
o.setup()
logfile = None
if to_use_file:
logfile = open(filename, 'a')
FileWriting.write_sensor_and_position_header_to_file(logfile)
start = ConsoleLogging.get_time()
try:
while True:
# updates elapsed time and time difference
elapsed = ConsoleLogging.get_elapsed_time(ConsoleLogging, start)
previous_cycle_time = current_cycle_time
current_cycle_time = elapsed
time_difference = current_cycle_time - previous_cycle_time
# store iterate_file returns as a tuple or an error message
loop_results = o.loop()
if type(loop_results) == tuple:
one_cycle_sensor_data, one_cycle_position = loop_results
formatted_data_dictionary = ConsoleLogging.format_sensor_data(
one_cycle_sensor_data, attributes_to_log)
if type(formatted_data_dictionary) == dict:
formatted_data_dictionary["Position"] = [
"x:", one_cycle_position.x, "y:", one_cycle_position.y, "z:", one_cycle_position.z]
ConsoleLogging.log_sensor_data_to_console(index, elapsed, formatted_data_dictionary)
if to_use_file:
FileWriting.write_sensor_and_position_data_to_file(
index, elapsed, time_difference,
logfile, one_cycle_sensor_data, one_cycle_position)
# if the iterate_file didn't return a tuple, it returned an error string
else:
error_string = loop_results
ConsoleLogging.print_data_error_message(index, elapsed, error_string)
index += 1 # increment data index
# this allows Windows users to exit the while iterate_file by pressing ctrl+c
except KeyboardInterrupt:
pass
if to_use_file:
logfile.close()
| 44.179104 | 164 | 0.661064 |
fc8903dace15225a2f4484e8807d8da8761b6a96
| 2,761 |
py
|
Python
|
hdfs_kernel/exceptions.py
|
Jasper912/jupyter-hdfs-kernel
|
4b933cab675cb908a1d2332f040c7fce697fce61
|
[
"MIT"
] | 3 |
2019-10-28T02:52:46.000Z
|
2019-12-24T09:11:48.000Z
|
hdfs_kernel/exceptions.py
|
Jasper912/jupyter-hdfs-kernel
|
4b933cab675cb908a1d2332f040c7fce697fce61
|
[
"MIT"
] | null | null | null |
hdfs_kernel/exceptions.py
|
Jasper912/jupyter-hdfs-kernel
|
4b933cab675cb908a1d2332f040c7fce697fce61
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding=utf-8 -*-
#
# Author: huangnj
# Time: 2019/09/27
import traceback
from functools import wraps
from hdfs_kernel.constants import EXPECTED_ERROR_MSG, INTERNAL_ERROR_MSG
from hdfs.util import HdfsError
# == EXCEPTIONS ==
# option parse Error
# == DECORATORS FOR EXCEPTION HANDLING ==
EXPECTED_EXCEPTIONS = [HdfsError, SessionManagementException, CommandNotAllowedException,
CommandExecuteException, OptionParsingExit, OptionParsingError]
def handle_expected_exceptions(f):
"""A decorator that handles expected exceptions. Self can be any object with
an "ipython_display" attribute.
Usage:
@handle_expected_exceptions
def fn(self, ...):
etc..."""
exceptions_to_handle = tuple(EXPECTED_EXCEPTIONS)
# Notice that we're NOT handling e.DataFrameParseException here. That's because DataFrameParseException
# is an internal error that suggests something is wrong with LivyClientLib's implementation.
return wrapped
def wrap_unexpected_exceptions(f, execute_if_error=None):
"""A decorator that catches all exceptions from the function f and alerts the user about them.
Self can be any object with a "logger" attribute and a "ipython_display" attribute.
All exceptions are logged as "unexpected" exceptions, and a request is made to the user to file an issue
at the Github repository. If there is an error, returns None if execute_if_error is None, or else
returns the output of the function execute_if_error.
Usage:
@wrap_unexpected_exceptions
def fn(self, ...):
..etc """
return wrapped
| 32.482353 | 119 | 0.694314 |
fc898bad2efa60cee2c4ef24696c39c99d84411b
| 8,634 |
py
|
Python
|
dashboard/tests/test_inventory.py
|
vishalvvr/transtats
|
ec71f40b338cab36eb907f6faba262dfeb858b80
|
[
"Apache-2.0"
] | null | null | null |
dashboard/tests/test_inventory.py
|
vishalvvr/transtats
|
ec71f40b338cab36eb907f6faba262dfeb858b80
|
[
"Apache-2.0"
] | null | null | null |
dashboard/tests/test_inventory.py
|
vishalvvr/transtats
|
ec71f40b338cab36eb907f6faba262dfeb858b80
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import patch
from fixture import DjangoFixture
from fixture.style import NamedDataStyle
from fixture.django_testcase import FixtureTestCase
from dashboard.managers.inventory import InventoryManager
from dashboard.models import Product
from dashboard.tests.testdata.db_fixtures import (
LanguageData, LanguageSetData, PlatformData, ProductData, ReleaseData
)
db_fixture = DjangoFixture(style=NamedDataStyle())
| 38.717489 | 101 | 0.67848 |
fc8994bb32a375675e5f3f534446419ae71a9b08
| 9,221 |
py
|
Python
|
web_console_v2/api/fedlearner_webconsole/rpc/server.py
|
nolanliou/fedlearner
|
54127c465b3b5d77ae41b823e42efbc1b707e826
|
[
"Apache-2.0"
] | null | null | null |
web_console_v2/api/fedlearner_webconsole/rpc/server.py
|
nolanliou/fedlearner
|
54127c465b3b5d77ae41b823e42efbc1b707e826
|
[
"Apache-2.0"
] | null | null | null |
web_console_v2/api/fedlearner_webconsole/rpc/server.py
|
nolanliou/fedlearner
|
54127c465b3b5d77ae41b823e42efbc1b707e826
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
# pylint: disable=broad-except, cyclic-import
import logging
import threading
from concurrent import futures
import grpc
from fedlearner_webconsole.proto import (
service_pb2, service_pb2_grpc,
common_pb2
)
from fedlearner_webconsole.db import db
from fedlearner_webconsole.project.models import Project
from fedlearner_webconsole.workflow.models import (
Workflow, WorkflowState, TransactionState
)
from fedlearner_webconsole.exceptions import (
UnauthorizedException
)
rpc_server = RpcServer()
| 39.74569 | 78 | 0.615335 |
fc8d8f892ae337d196878475837e1f9cc4ae1047
| 956 |
py
|
Python
|
chapter15/async_aiohttp.py
|
haru-256/ExpertPython3_Source
|
5ef412ef217c6078248ff9546e23ed9b69aadcff
|
[
"MIT"
] | 9 |
2021-07-30T07:57:55.000Z
|
2021-12-30T12:38:21.000Z
|
chapter15/async_aiohttp.py
|
haru-256/ExpertPython3_Source
|
5ef412ef217c6078248ff9546e23ed9b69aadcff
|
[
"MIT"
] | null | null | null |
chapter15/async_aiohttp.py
|
haru-256/ExpertPython3_Source
|
5ef412ef217c6078248ff9546e23ed9b69aadcff
|
[
"MIT"
] | 2 |
2021-09-05T11:39:50.000Z
|
2021-09-17T05:27:37.000Z
|
"""
aiohttpHTTP
"""
import asyncio
import time
import aiohttp
from asyncrates import get_rates
SYMBOLS = ('USD', 'EUR', 'PLN', 'NOK', 'CZK')
BASES = ('USD', 'EUR', 'PLN', 'NOK', 'CZK')
if __name__ == "__main__":
started = time.time()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
elapsed = time.time() - started
print()
print(f": {elapsed:.2f}s")
| 20.782609 | 75 | 0.641213 |
fc8f2034e17191e49d05923f9d20c0bfa677c7bb
| 4,433 |
py
|
Python
|
experiments/nginx/run.py
|
OleksiiOleksenko/intel_mpx_explained
|
dd6da57e0fcf22df358d1a742079b414620a7c88
|
[
"MIT"
] | 15 |
2017-02-08T04:02:50.000Z
|
2021-02-20T16:47:25.000Z
|
experiments/nginx/run.py
|
OleksiiOleksenko/intel_mpx_explained
|
dd6da57e0fcf22df358d1a742079b414620a7c88
|
[
"MIT"
] | 1 |
2020-02-01T00:29:32.000Z
|
2020-02-04T14:25:57.000Z
|
experiments/nginx/run.py
|
OleksiiOleksenko/intel_mpx_explained
|
dd6da57e0fcf22df358d1a742079b414620a7c88
|
[
"MIT"
] | 3 |
2017-02-08T04:02:51.000Z
|
2018-03-30T07:58:45.000Z
|
#!/usr/bin/env python
from __future__ import print_function
import logging
import os
import signal
from time import sleep
from subprocess import Popen, PIPE
import socket
from core.common_functions import *
from core.run import Runner
| 36.336066 | 268 | 0.551771 |
fc9015f57ef9e7d95a77ff43b17dddea2b3da5aa
| 698 |
py
|
Python
|
tavi/test/unit/base/document_no_fields_test.py
|
verdammelt/tavi
|
3bb39a6e6ab936f6e9511a4058817697e3df098b
|
[
"MIT"
] | null | null | null |
tavi/test/unit/base/document_no_fields_test.py
|
verdammelt/tavi
|
3bb39a6e6ab936f6e9511a4058817697e3df098b
|
[
"MIT"
] | null | null | null |
tavi/test/unit/base/document_no_fields_test.py
|
verdammelt/tavi
|
3bb39a6e6ab936f6e9511a4058817697e3df098b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import unittest
from tavi.base.documents import BaseDocument
| 27.92 | 64 | 0.7149 |
fc9074fa7981c7335753a01097902625525ccf9a
| 17,367 |
py
|
Python
|
seqparse/test/test_seqparse.py
|
hoafaloaf/seqparse
|
1d2446070c5627a5cb880d00ef327b892b4dedef
|
[
"MIT"
] | 1 |
2021-06-08T17:24:41.000Z
|
2021-06-08T17:24:41.000Z
|
seqparse/test/test_seqparse.py
|
hoafaloaf/seqparse
|
1d2446070c5627a5cb880d00ef327b892b4dedef
|
[
"MIT"
] | null | null | null |
seqparse/test/test_seqparse.py
|
hoafaloaf/seqparse
|
1d2446070c5627a5cb880d00ef327b892b4dedef
|
[
"MIT"
] | 1 |
2021-10-05T15:44:07.000Z
|
2021-10-05T15:44:07.000Z
|
"""Test file sequence discovery on disk."""
# "Future" Libraries
from __future__ import print_function
# Standard Libraries
import os
import unittest
# Third Party Libraries
import mock
from builtins import range
from future.utils import lrange
from . import (DirEntry, generate_entries, initialise_mock_scandir_data,
mock_scandir_deep)
from .. import (__version__, get_parser, get_sequence, get_version, invert,
validate_frame_sequence)
from ..sequences import FileSequence, FrameChunk, FrameSequence
###############################################################################
# class: TestSeqparseModule
def test_add_file_sequence(self):
"""Seqparse: Test file sequence addition via seqparse.add_file."""
input_file = ".".join((self._test_file_name, "0005", self._test_ext))
input_file = os.path.join(self._test_root, input_file)
# Expected outputs ...
input_frame_seq = "0000-0004"
output_frame_seq = "0000-0005"
input_file_seq = ".".join(
(self._test_file_name, input_frame_seq, self._test_ext))
input_file_seq = os.path.join(self._test_root, input_file_seq)
output_file_seq = ".".join(
(self._test_file_name, output_frame_seq, self._test_ext))
output_file_seq = os.path.join(self._test_root, output_file_seq)
print("\n\n INPUT FILES\n -----------")
print(" o", input_file_seq)
print(" o", input_file)
parser = get_parser()
parser.add_file(input_file_seq)
parser.add_file(input_file)
output = list(parser.output())
print("\n OUTPUT FILES\n ------------")
for line in output:
print(" o", line)
print("\n EXPECTED OUTPUT\n ---------------")
print(" o", output_file_seq)
print("")
self.assertEqual(len(output), 1)
self.assertEqual(str(output[0]), output_file_seq)
input_frame_seq = "0000-0002,,0003-0005"
input_file_seq = ".".join(
(self._test_file_name, input_frame_seq, self._test_ext))
input_file_seq = os.path.join(self._test_root, input_file_seq)
print("\n INPUT FILES\n -----------")
print(" o", input_file_seq)
print(" o", input_file)
parser = get_parser()
parser.add_file(input_file_seq)
parser.add_file(input_file)
output = list(parser.output())
print("\n OUTPUT FILES\n ------------")
for line in output:
print(" o", line)
print("\n EXPECTED OUTPUT\n ---------------")
print(" o", output_file_seq)
print("")
self.assertEqual(len(output), 1)
self.assertEqual(str(output[0]), output_file_seq)
def test_api_calls(self):
"""Seqparse: Test API calls at root of module."""
chunk = FrameChunk(first=1, last=7, step=2, pad=4)
seq = get_sequence(lrange(1, 8, 2), pad=4)
self.assertTrue(isinstance(seq, FrameSequence))
self.assertEqual(str(seq), "0001-0007x2")
expected = FrameChunk(first=2, last=6, step=2, pad=4)
inverted = invert(chunk)
self.assertEqual(str(inverted), str(expected))
inverted = invert(seq)
self.assertEqual(str(inverted), str(expected))
with self.assertRaises(TypeError):
invert(get_parser())
self.assertEqual(get_version(), __version__)
| 35.442857 | 79 | 0.604653 |
fc918b97de90432887a40a5ee37a55b41149a19b
| 12,666 |
py
|
Python
|
deliveroo_scraping.py
|
ragreener1/deliveroo-scraping
|
c8e3de2503a6198734904fb937a77dd38ef05581
|
[
"MIT"
] | null | null | null |
deliveroo_scraping.py
|
ragreener1/deliveroo-scraping
|
c8e3de2503a6198734904fb937a77dd38ef05581
|
[
"MIT"
] | null | null | null |
deliveroo_scraping.py
|
ragreener1/deliveroo-scraping
|
c8e3de2503a6198734904fb937a77dd38ef05581
|
[
"MIT"
] | 1 |
2021-03-16T16:43:34.000Z
|
2021-03-16T16:43:34.000Z
|
import urllib.request
import pandas as pd
import sqlite3
import re
from bs4 import BeautifulSoup
# Parameters
postcodes_list = ["W1F7EY"]
db_name = "scraped.db"
# This is so that Deliveroo think the scraper is Google Chrome
# as opposed to a web scraper
hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11' +
'(KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*' +
';q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
if __name__ == "__main__":
postcodes_df = pd.DataFrame({
'post_code': postcodes_list
})
process_all_restaurants(postcodes_df, db_name)
| 41.527869 | 107 | 0.614243 |
fc91ef07b59bde91306bd73bcec484e360b1298a
| 108 |
py
|
Python
|
wouso/core/security/admin.py
|
AlexandruGhergut/wouso
|
f26244ff58ae626808ae8c58ccc93d21f9f2666f
|
[
"Apache-2.0"
] | 117 |
2015-01-02T18:07:33.000Z
|
2021-01-06T22:36:25.000Z
|
wouso/core/security/admin.py
|
AlexandruGhergut/wouso
|
f26244ff58ae626808ae8c58ccc93d21f9f2666f
|
[
"Apache-2.0"
] | 229 |
2015-01-12T07:07:58.000Z
|
2019-10-12T08:27:01.000Z
|
wouso/core/security/admin.py
|
AlexandruGhergut/wouso
|
f26244ff58ae626808ae8c58ccc93d21f9f2666f
|
[
"Apache-2.0"
] | 96 |
2015-01-07T05:26:09.000Z
|
2020-06-25T07:28:51.000Z
|
from django.contrib import admin
from wouso.core.security.models import Report
admin.site.register(Report)
| 21.6 | 45 | 0.833333 |
fc91ff3844434233ea3b10e4ec6bd4eb042adfbd
| 2,455 |
py
|
Python
|
DataWrangling/TTNData2Gsheet_Auto.py
|
diliprk/SmartCityVisualization
|
618cd433c2f6bb55042c643ccaef12b5814ccb77
|
[
"MIT"
] | null | null | null |
DataWrangling/TTNData2Gsheet_Auto.py
|
diliprk/SmartCityVisualization
|
618cd433c2f6bb55042c643ccaef12b5814ccb77
|
[
"MIT"
] | null | null | null |
DataWrangling/TTNData2Gsheet_Auto.py
|
diliprk/SmartCityVisualization
|
618cd433c2f6bb55042c643ccaef12b5814ccb77
|
[
"MIT"
] | null | null | null |
#### Reading Data from The Things Network Data and Automatically Storing it to a Google Spreadsheet
# Author: Dilip Rajkumar
# Email: [email protected]
# Date: 19/01/2018
# Revision: version#1
# License: MIT License
import pandas as pd
import requests
from df2gspread import df2gspread as d2g
import time
## Set Initial Time Duration in mins to query TTN Data:
time_duration = 5
# Insert spreadsheet file id of Google Spreadsheet
spreadsheet = '1ftXlebCTDp5tTxvlm5K3Sv1oNttDHR7s1xTi-i-ZR_o' ## Google SpreadSheet Title: TTN_Live_DataLogger
# Insert Sheet Name
wks_name = 'Sheet1'
def queryttndata(time_duration):
'''
This function queries data from TTN Swagger API based on a time duration which is given as an input
'''
headers = {'Accept': 'application/json','Authorization': 'key ttn-account-v2.P4kRaEqenNGbIdFSgSLDJGMav5K9YrekkMm_F1lOVrw'}
## Set query duration in minutes
querytime = str(time_duration) + 'm'
params = (('last', querytime),)
response = requests.get('https://vehiclecounter.data.thethingsnetwork.org/api/v2/query', headers=headers, params=params).json()
df_raw = pd.DataFrame.from_dict(response)
return df_raw
def cleandf(df):
'''
In this function we pass as input the raw dataframe from TTN in JSON format to clean and optimize the data.
This function is customized and unique to every dataset
'''
df.rename(columns={'time': 'TTNTimeStamp'}, inplace=True)
df['TTNTimeStamp'] = pd.to_datetime(df['TTNTimeStamp'])
df['TTNTimeStamp'] = df['TTNTimeStamp'] + pd.Timedelta(hours=1) ## Offset Time by 1 hour to fix TimeZone Error of Swagger API TimeStamps
df['TTNTimeStamp'] = df['TTNTimeStamp'].values.astype('datetime64[s]')
drop_cols = ['raw','device_id']
df = df.drop(drop_cols, 1)
df.reset_index()
df = df.reindex(['TTNTimeStamp','Count'], axis=1)
print("Latest Data:")
print(df.tail(1),'\n')
return df
while True:
#begin your infinite loop
df_raw = queryttndata(time_duration)
df_clean = cleandf(df_raw)
d2g.upload(df_clean, spreadsheet,wks_name,col_names=True,clean=True) # Write dataframe to Google Spreadsheet
df_clean.to_csv('TTN_VehicleCountData.csv', date_format="%d/%m/%Y %H:%M:%S",index=True) # Save DataFrame locally
time.sleep(60) # Call function every 60 seconds
time_duration += 1 ## Increment query duration by 1 mins at the end of every function call
| 41.610169 | 140 | 0.714053 |
fc92382e8eb5b5b2b839d82c3970e59959dd78f5
| 5,870 |
py
|
Python
|
tests/share/normalize/test_xml.py
|
felliott/SHARE
|
8fd60ff4749349c9b867f6188650d71f4f0a1a56
|
[
"Apache-2.0"
] | 87 |
2015-01-06T18:24:45.000Z
|
2021-08-08T07:59:40.000Z
|
tests/share/normalize/test_xml.py
|
fortress-biotech/SHARE
|
9c5a05dd831447949fa6253afec5225ff8ab5d4f
|
[
"Apache-2.0"
] | 442 |
2015-01-01T19:16:01.000Z
|
2022-03-30T21:10:26.000Z
|
tests/share/normalize/test_xml.py
|
fortress-biotech/SHARE
|
9c5a05dd831447949fa6253afec5225ff8ab5d4f
|
[
"Apache-2.0"
] | 67 |
2015-03-10T16:32:58.000Z
|
2021-11-12T16:33:41.000Z
|
import xmltodict
from share.transform.chain import * # noqa
EXAMPLE = '''
<entry>
<id>http://arxiv.org/abs/cond-mat/0102536v1</id>
<updated>2001-02-28T20:12:09Z</updated>
<published>2001-02-28T20:12:09Z</published>
<title>Impact of Electron-Electron Cusp
on Configuration Interaction Energies</title>
<summary> The effect of the electron-electron cusp on the convergence of configuration
interaction (CI) wave functions is examined. By analogy with the
pseudopotential approach for electron-ion interactions, an effective
electron-electron interaction is developed which closely reproduces the
scattering of the Coulomb interaction but is smooth and finite at zero
electron-electron separation. The exact many-electron wave function for this
smooth effective interaction has no cusp at zero electron-electron separation.
We perform CI and quantum Monte Carlo calculations for He and Be atoms, both
with the Coulomb electron-electron interaction and with the smooth effective
electron-electron interaction. We find that convergence of the CI expansion of
the wave function for the smooth electron-electron interaction is not
significantly improved compared with that for the divergent Coulomb interaction
for energy differences on the order of 1 mHartree. This shows that, contrary to
popular belief, description of the electron-electron cusp is not a limiting
factor, to within chemical accuracy, for CI calculations.
</summary>
<author>
<name>David Prendergast</name>
<arxiv:affiliation xmlns:arxiv="http://arxiv.org/schemas/atom">Department of Physics</arxiv:affiliation>
</author>
<author>
<name>M. Nolan</name>
<arxiv:affiliation xmlns:arxiv="http://arxiv.org/schemas/atom">NMRC, University College, Cork, Ireland</arxiv:affiliation>
</author>
<author>
<name>Claudia Filippi</name>
<arxiv:affiliation xmlns:arxiv="http://arxiv.org/schemas/atom">Department of Physics</arxiv:affiliation>
</author>
<author>
<name>Stephen Fahy</name>
<arxiv:affiliation xmlns:arxiv="http://arxiv.org/schemas/atom">Department of Physics</arxiv:affiliation>
</author>
<author>
<name>J. C. Greer</name>
<arxiv:affiliation xmlns:arxiv="http://arxiv.org/schemas/atom">NMRC, University College, Cork, Ireland</arxiv:affiliation>
</author>
<arxiv:doi xmlns:arxiv="http://arxiv.org/schemas/atom">10.1063/1.1383585</arxiv:doi>
<link title="doi" href="http://dx.doi.org/10.1063/1.1383585" rel="related"/>
<arxiv:comment xmlns:arxiv="http://arxiv.org/schemas/atom">11 pages, 6 figures, 3 tables, LaTeX209, submitted to The Journal of
Chemical Physics</arxiv:comment>
<arxiv:journal_ref xmlns:arxiv="http://arxiv.org/schemas/atom">J. Chem. Phys. 115, 1626 (2001)</arxiv:journal_ref>
<link href="http://arxiv.org/abs/cond-mat/0102536v1" rel="alternate" type="text/html"/>
<link title="pdf" href="http://arxiv.org/pdf/cond-mat/0102536v1" rel="related" type="application/pdf"/>
<arxiv:primary_category xmlns:arxiv="http://arxiv.org/schemas/atom" term="cond-mat.str-el" scheme="http://arxiv.org/schemas/atom"/>
<category term="cond-mat.str-el" scheme="http://arxiv.org/schemas/atom"/>
</entry>
'''
| 52.410714 | 1,152 | 0.729131 |
fc92c4f874ac07a82bec9855ab06be0ea305a134
| 4,323 |
py
|
Python
|
alleycat/reactive/property.py
|
mysticfall/alleycat-reactive
|
69ff2f283627a6c613b084677be707234b29164c
|
[
"MIT"
] | 14 |
2020-07-13T08:15:27.000Z
|
2021-02-17T21:22:22.000Z
|
alleycat/reactive/property.py
|
mysticfall/alleycat-reactive
|
69ff2f283627a6c613b084677be707234b29164c
|
[
"MIT"
] | 4 |
2020-08-18T18:50:00.000Z
|
2021-12-04T07:09:12.000Z
|
alleycat/reactive/property.py
|
mysticfall/alleycat-reactive
|
69ff2f283627a6c613b084677be707234b29164c
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import TypeVar, Generic, Callable, Optional, Any, cast, Tuple
import rx
from returns import pipeline
from returns.functions import identity
from returns.maybe import Maybe, Nothing
from rx import Observable
from rx.subject import BehaviorSubject
from . import ReactiveValue, ReactiveView
from .value import Modifier
T = TypeVar("T")
| 30.443662 | 115 | 0.609762 |
fc933a1a213897fe8cf98ce98bd1c72358bf800c
| 16,945 |
py
|
Python
|
sdks/python/client/openapi_client/model/github_com_argoproj_labs_argo_dataflow_api_v1alpha1_dedupe.py
|
Siebjee/argo-workflows
|
1a3b87bdf8edba02ba5e5aed20f3942be1d6f46c
|
[
"Apache-2.0"
] | null | null | null |
sdks/python/client/openapi_client/model/github_com_argoproj_labs_argo_dataflow_api_v1alpha1_dedupe.py
|
Siebjee/argo-workflows
|
1a3b87bdf8edba02ba5e5aed20f3942be1d6f46c
|
[
"Apache-2.0"
] | 3 |
2022-02-22T19:39:40.000Z
|
2022-02-28T14:34:19.000Z
|
sdks/python/client/openapi_client/model/github_com_argoproj_labs_argo_dataflow_api_v1alpha1_dedupe.py
|
Siebjee/argo-workflows
|
1a3b87bdf8edba02ba5e5aed20f3942be1d6f46c
|
[
"Apache-2.0"
] | null | null | null |
"""
Argo Server API
You can get examples of requests and responses by using the CLI with `--gloglevel=9`, e.g. `argo list --gloglevel=9` # noqa: E501
The version of the OpenAPI document: VERSION
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from openapi_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from openapi_client.exceptions import ApiAttributeError
| 62.759259 | 2,465 | 0.618353 |
fc9452d9c2ee3ebb7757e04a3373872166150cb1
| 3,700 |
py
|
Python
|
utils/mask/converter.py
|
csgcmai/cvat
|
074500de7bf638fdf66f3874b80df9e87d58a746
|
[
"MIT"
] | 4 |
2019-01-12T07:32:48.000Z
|
2019-08-01T12:11:33.000Z
|
utils/mask/converter.py
|
csgcmai/cvat
|
074500de7bf638fdf66f3874b80df9e87d58a746
|
[
"MIT"
] | 12 |
2019-08-06T02:45:31.000Z
|
2022-02-10T00:16:32.000Z
|
utils/mask/converter.py
|
csgcmai/cvat
|
074500de7bf638fdf66f3874b80df9e87d58a746
|
[
"MIT"
] | 11 |
2018-11-04T19:04:59.000Z
|
2018-12-02T13:30:22.000Z
|
#!/usr/bin/env python
#
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
from __future__ import absolute_import, division, print_function
import argparse
import os
import glog as log
import numpy as np
import cv2
from lxml import etree
from tqdm import tqdm
def parse_args():
"""Parse arguments of command line"""
parser = argparse.ArgumentParser(
fromfile_prefix_chars='@',
description='Convert CVAT XML annotations to masks'
)
parser.add_argument(
'--cvat-xml', metavar='FILE', required=True,
help='input file with CVAT annotation in xml format'
)
parser.add_argument(
'--background-color', metavar='COLOR_BGR', default="0,0,0",
help='specify background color (by default: 0,0,0)'
)
parser.add_argument(
'--label-color', metavar='LABEL:COLOR_BGR', action='append',
default=[],
help="specify a label's color (e.g. 255 or 255,0,0). The color will " +
"be interpreted in accordance with the mask format."
)
parser.add_argument(
'--mask-bitness', type=int, choices=[8, 24], default=8,
help='choose bitness for masks'
)
parser.add_argument(
'--output-dir', metavar='DIRECTORY', required=True,
help='directory for output masks'
)
return parser.parse_args()
if __name__ == "__main__":
main()
| 31.355932 | 94 | 0.603784 |
fc95ea517dd66fa33c7319705ecac90ffc97a9fe
| 2,099 |
py
|
Python
|
examples/plot_afq_callosal.py
|
gkiar/pyAFQ
|
fb6985c2a9715a378e1ca94dc89f6bc966c60ab5
|
[
"BSD-2-Clause"
] | null | null | null |
examples/plot_afq_callosal.py
|
gkiar/pyAFQ
|
fb6985c2a9715a378e1ca94dc89f6bc966c60ab5
|
[
"BSD-2-Clause"
] | null | null | null |
examples/plot_afq_callosal.py
|
gkiar/pyAFQ
|
fb6985c2a9715a378e1ca94dc89f6bc966c60ab5
|
[
"BSD-2-Clause"
] | null | null | null |
"""
==========================
Callosal bundles using AFQ API
==========================
An example using the AFQ API to find callosal bundles using the templates from:
http://hdl.handle.net/1773/34926
"""
import os.path as op
import plotly
from AFQ import api
from AFQ.mask import RoiMask
import AFQ.data as afd
##########################################################################
# Get some example data
# ---------------------
#
# Retrieves `Stanford HARDI dataset <https://purl.stanford.edu/ng782rw8378>`_.
#
afd.organize_stanford_data(clear_previous_afq=True)
##########################################################################
# Set tractography parameters (optional)
# ---------------------
# We make this tracking_params which we will pass to the AFQ object
# which specifies that we want 100,000 seeds randomly distributed
# in the ROIs of every bundle.
#
# We only do this to make this example faster and consume less space.
tracking_params = dict(seed_mask=RoiMask(),
n_seeds=10000,
random_seeds=True,
rng_seed=42)
##########################################################################
# Initialize an AFQ object:
# -------------------------
#
# We specify bundle_info as the default bundles list (api.BUNDLES) plus the
# callosal bundle list. This tells the AFQ object to use bundles from both
# the standard and callosal templates.
myafq = api.AFQ(bids_path=op.join(afd.afq_home,
'stanford_hardi'),
dmriprep='vistasoft',
bundle_info=api.BUNDLES + api.CALLOSUM_BUNDLES,
tracking_params=tracking_params)
##########################################################################
# Visualizing bundles and tract profiles:
# ---------------------------------------
# This would run the script and visualize the bundles using the plotly
# interactive visualization, which should automatically open in a
# new browser window.
bundle_html = myafq.viz_bundles(export=True, n_points=50)
plotly.io.show(bundle_html[0])
| 34.409836 | 79 | 0.552644 |
fc97538b8a2ee01ca9533565fe27426b9b8b241a
| 7,170 |
py
|
Python
|
latest/probe.py
|
Soldie/Nscan-scanner-ip
|
4a507ca97a9f8b7f3fa4766c835f108671dbbcd6
|
[
"Apache-2.0"
] | 574 |
2015-01-30T13:02:42.000Z
|
2022-03-13T17:12:12.000Z
|
latest/probe.py
|
DiamondLink/Nscan
|
21a8986358107e5b86952cf9276510d14afc5ab6
|
[
"Apache-2.0"
] | 10 |
2015-01-31T15:36:21.000Z
|
2021-11-17T10:46:33.000Z
|
latest/probe.py
|
DiamondLink/Nscan
|
21a8986358107e5b86952cf9276510d14afc5ab6
|
[
"Apache-2.0"
] | 173 |
2015-01-30T13:05:36.000Z
|
2022-01-22T10:18:10.000Z
|
import time
import Queue
import random
import socket
import struct
import logging
import threading
from convert import *
from protocol import ethernet, ip, tcp, udp
ETH_P_IP = 0x0800 # IP protocol
ETH_P_ALL = 0x0003 # Every packet
NSCRIPT_PATH = 'nscript' # NSCRIPT PATH
PAYLOAD = {
53:('\x5d\x0d\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x06'
'google\x03com\x00\x00\x01\x00\x01'), # 'google.com' DNS Lookup
161:('\x30\x26\x02\x01\x01\x04\x06public\xa1\x19\x02'
'\x04\x56\x9f\x5a\xdd\x02\x01\x00\x02\x01\x00\x30\x0b\x30\x09\x06'
'\x05\x2b\x06\x01\x02\x01\x05\x00'), # SNMP GetNextRequest|public|2c version|1.3.6.1.2.1
123:('\x17\x00\x02\x05'), # NTP systats commands lacks 38 null bytes (just to save bandwidth)
1900:('M-SEARCH * HTTP/1.1\r\nHOST: 239.255.255.250:1900\r\n'
'MAN: "ssdp:discover"\r\nMX: 2\r\nST: ssdp:all\r\n\r\n')
}
def Extract(packet):
src = socket.inet_ntoa(packet[12:16])
srcp = struct.unpack('!H', packet[20:22])[0]
return src, srcp
def Alive(thread_list):
''' check if thread is alive '''
alive = False
for t in thread_list:
if t.isAlive():
alive = True
break
return alive
| 28.228346 | 103 | 0.64728 |
fc991b843e076978c3f0a3ec13e52528bd5dcb1e
| 2,157 |
py
|
Python
|
parsy-backend/flaskApp/assignment/views.py
|
dstambler17/Parsy.io
|
14c4905809f79f191efbbbdfbd0e8d9e838478e7
|
[
"MIT"
] | null | null | null |
parsy-backend/flaskApp/assignment/views.py
|
dstambler17/Parsy.io
|
14c4905809f79f191efbbbdfbd0e8d9e838478e7
|
[
"MIT"
] | null | null | null |
parsy-backend/flaskApp/assignment/views.py
|
dstambler17/Parsy.io
|
14c4905809f79f191efbbbdfbd0e8d9e838478e7
|
[
"MIT"
] | null | null | null |
import sys
from flask import Blueprint, request, jsonify
from flaskApp import db
from flaskApp.assignment.utils import *
from flaskApp.error.error_handlers import *
import json
from flaskApp.helpers import getAssignmentData
assignment = Blueprint('assignment', __name__)
'''Test method, keep just in case. Will prob be moved to seperate API designed to
interact with just the MySQL database that the data pipeline will drop stuff into'''
| 39.944444 | 88 | 0.72369 |
fc992600a7f421e186b8dbe2ed6b420847313d4c
| 1,473 |
py
|
Python
|
python/patterns/slidingwindow/longest_substring_no_repeating_char.py
|
dharmik-thakkar/dsapatterns
|
fc5890a86c5d49097b73b6afd14e1a4e81cff7a0
|
[
"Apache-2.0"
] | null | null | null |
python/patterns/slidingwindow/longest_substring_no_repeating_char.py
|
dharmik-thakkar/dsapatterns
|
fc5890a86c5d49097b73b6afd14e1a4e81cff7a0
|
[
"Apache-2.0"
] | null | null | null |
python/patterns/slidingwindow/longest_substring_no_repeating_char.py
|
dharmik-thakkar/dsapatterns
|
fc5890a86c5d49097b73b6afd14e1a4e81cff7a0
|
[
"Apache-2.0"
] | null | null | null |
#######################################################################################################################
# Given a string, find the length of the longest substring which has no repeating characters.
#
# Input: String="aabccbb"
# Output: 3
# Explanation: The longest substring without any repeating characters is "abc".
#
# Input: String="abbbb"
# Output: 2
# Explanation: The longest substring without any repeating characters is "ab".
#
# Input: String="abccde"
# Output: 3
# Explanation: Longest substrings without any repeating characters are "abc" & "cde".
#######################################################################################################################
print(longest_substring_no_repeating_char('aabccbb'))
print(longest_substring_no_repeating_char('abbbb'))
print(longest_substring_no_repeating_char('abccde'))
print(longest_substring_no_repeating_char('abcabcbb'))
print(longest_substring_no_repeating_char('bbbbb'))
print(longest_substring_no_repeating_char('pwwkew'))
| 40.916667 | 119 | 0.620502 |
fc998d9236fe5233cce504679e6058e6b30ca879
| 1,411 |
py
|
Python
|
Apache Spark with Python - Big Data with PySpark and Spark/6-PairRDD/filter/AirportsNotInUsa.py
|
jrderek/Big_Data_Engineering_Portfolio
|
bf7a5efb24f2c6e860e5ead544dadc08f791814e
|
[
"MIT"
] | null | null | null |
Apache Spark with Python - Big Data with PySpark and Spark/6-PairRDD/filter/AirportsNotInUsa.py
|
jrderek/Big_Data_Engineering_Portfolio
|
bf7a5efb24f2c6e860e5ead544dadc08f791814e
|
[
"MIT"
] | null | null | null |
Apache Spark with Python - Big Data with PySpark and Spark/6-PairRDD/filter/AirportsNotInUsa.py
|
jrderek/Big_Data_Engineering_Portfolio
|
bf7a5efb24f2c6e860e5ead544dadc08f791814e
|
[
"MIT"
] | null | null | null |
import sys
sys.path.insert(0, '.')
from pyspark import SparkContext, SparkConf
from commons.Utils import Utils
if __name__ == "__main__":
'''
Create a Spark program to read the airport data from in/airports.text;
generate a pair RDD with airport name being the key and country name being the value.
Then remove all the airports which are located in United States and output the pair RDD to out/airports_not_in_usa_pair_rdd.text
Each row of the input file contains the following columns:
Airport ID, Name of airport, Main city served by airport, Country where airport is located,
IATA/FAA code, ICAO Code, Latitude, Longitude, Altitude, Timezone, DST, Timezone in Olson format
Sample output:
("Kamloops", "Canada")
("Wewak Intl", "Papua New Guinea")
...
'''
conf = SparkConf().setAppName("airports").setMaster("local[*]")
sc = SparkContext(conf=conf)
airportsRDD = sc.textFile("inputs/airports.text")
airportPairRDD = airportsRDD.map(lambda line:
(Utils.COMMA_DELIMITER.split(line)[1],
Utils.COMMA_DELIMITER.split(line)[3]))
airportsNotInUSA = airportPairRDD.filter(
lambda keyValue: keyValue[1] != "\"United States\"")
airportsNotInUSA.saveAsTextFile(
"outputs/airports_not_in_usa_pair_rdd.text")
| 35.275 | 133 | 0.659816 |
fc99f86b822d06474cc1888107ef3e865f27a1cd
| 16,668 |
py
|
Python
|
linux/keyman-config/keyman_config/keyboard_details.py
|
srl295/keyman
|
4dfd0f71f3f4ccf81d1badbd824900deee1bb6d1
|
[
"MIT"
] | null | null | null |
linux/keyman-config/keyman_config/keyboard_details.py
|
srl295/keyman
|
4dfd0f71f3f4ccf81d1badbd824900deee1bb6d1
|
[
"MIT"
] | null | null | null |
linux/keyman-config/keyman_config/keyboard_details.py
|
srl295/keyman
|
4dfd0f71f3f4ccf81d1badbd824900deee1bb6d1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# Keyboard details window
import logging
import json
from os import path
import qrcode
import tempfile
import gi
from gi.repository import Gtk
from keyman_config import KeymanComUrl, _, secure_lookup
from keyman_config.accelerators import init_accel
from keyman_config.kmpmetadata import parsemetadata
gi.require_version('Gtk', '3.0')
# basics: keyboard name, package version, description
# other things: filename (of kmx), ,
# OSK availability, documentation availability, package copyright
# also: supported languages, fonts
# from kmx?: keyboard version, encoding, layout type
# there is data in kmp.inf/kmp.json
# there is possibly data in kbid.json (downloaded from api)
| 45.917355 | 146 | 0.567075 |
fc9cde2fe4aa2f639814b61454d79983712258b4
| 918 |
py
|
Python
|
build_osx/copy_runtime.py
|
ozsolarwind/SAM
|
0967b0a4be8f8924ec1ad915a14575ac22c4ec3c
|
[
"MIT"
] | null | null | null |
build_osx/copy_runtime.py
|
ozsolarwind/SAM
|
0967b0a4be8f8924ec1ad915a14575ac22c4ec3c
|
[
"MIT"
] | null | null | null |
build_osx/copy_runtime.py
|
ozsolarwind/SAM
|
0967b0a4be8f8924ec1ad915a14575ac22c4ec3c
|
[
"MIT"
] | 1 |
2019-05-21T23:16:17.000Z
|
2019-05-21T23:16:17.000Z
|
import os
import shutil
SOURCE_DIR = '../deploy/runtime'
TARGET_DIR = 'SAM.app/Contents/runtime'
if os.path.exists(TARGET_DIR):
shutil.rmtree(TARGET_DIR)
shutil.copytree(SOURCE_DIR, TARGET_DIR, ignore=shutil.ignore_patterns('.git'))
SOURCE_DIR = '../deploy/solar_resource'
TARGET_DIR = 'SAM.app/Contents/solar_resource'
if os.path.exists(TARGET_DIR):
shutil.rmtree(TARGET_DIR)
shutil.copytree(SOURCE_DIR, TARGET_DIR, ignore=shutil.ignore_patterns('.git'))
SOURCE_DIR = '../deploy/wind_resource'
TARGET_DIR = 'SAM.app/Contents/wind_resource'
if os.path.exists(TARGET_DIR):
shutil.rmtree(TARGET_DIR)
shutil.copytree(SOURCE_DIR, TARGET_DIR, ignore=shutil.ignore_patterns('.git'))
SOURCE_DIR = '../deploy/libraries'
TARGET_DIR = 'SAM.app/Contents/libraries'
if os.path.exists(TARGET_DIR):
shutil.rmtree(TARGET_DIR)
shutil.copytree(SOURCE_DIR, TARGET_DIR, ignore=shutil.ignore_patterns('.git'))
| 26.228571 | 78 | 0.769063 |
fc9e17c4eb3df368c94743c5840b9e4da127a474
| 11,837 |
py
|
Python
|
codalab/lib/path_util.py
|
kl-chou/codalab-worksheets
|
101d1d9f86d3f7b8dae3b4fc3e2335fcf8d7c3d7
|
[
"Apache-2.0"
] | 236 |
2015-12-29T22:50:03.000Z
|
2022-03-28T21:12:34.000Z
|
codalab/lib/path_util.py
|
kl-chou/codalab-worksheets
|
101d1d9f86d3f7b8dae3b4fc3e2335fcf8d7c3d7
|
[
"Apache-2.0"
] | 2,628 |
2015-12-27T09:45:13.000Z
|
2022-03-30T16:18:25.000Z
|
codalab/lib/path_util.py
|
kl-chou/codalab-worksheets
|
101d1d9f86d3f7b8dae3b4fc3e2335fcf8d7c3d7
|
[
"Apache-2.0"
] | 87 |
2015-12-30T01:36:46.000Z
|
2022-03-08T15:21:30.000Z
|
"""
path_util contains helpers for working with local filesystem paths.
There are a few classes of methods provided here:
Functions to normalize paths and check that they are in normal form:
normalize, check_isvalid, check_isdir, check_isfile, path_is_url
Functions to list directories and to deal with subpaths of paths:
safe_join, get_relative_path, ls, recursive_ls
Functions to read files to compute hashes, write results to stdout, etc:
getmtime, get_size, hash_directory, hash_file_contents
Functions that modify that filesystem in controlled ways:
copy, make_directory, set_write_permissions, rename, remove
"""
import errno
import hashlib
import itertools
import os
import shutil
import subprocess
import sys
from typing import Optional
from codalab.common import precondition, UsageError, parse_linked_bundle_url
from codalab.lib import file_util
from codalab.worker.file_util import get_path_size
# Block sizes and canonical strings used when hashing files.
BLOCK_SIZE = 0x40000
FILE_PREFIX = 'file'
LINK_PREFIX = 'link'
def path_error(message, path):
"""
Raised when a user-supplied path causes an exception.
"""
return UsageError(message + ': ' + path)
################################################################################
# Functions to normalize paths and check that they are in normal form.
################################################################################
def normalize(path):
"""
Return the absolute path of the location specified by the given path.
This path is returned in a "canonical form", without ~'s, .'s, ..'s.
"""
if path == '-':
return '/dev/stdin'
elif path_is_url(path):
return path
else:
return os.path.abspath(os.path.expanduser(path))
def check_isvalid(path, fn_name):
"""
Raise a PreconditionViolation if the path is not absolute or normalized.
Raise a UsageError if the file at that path does not exist.
"""
precondition(os.path.isabs(path), '%s got relative path: %s' % (fn_name, path))
# Broken symbolic links are valid paths, so we use lexists instead of exists.
if not os.path.lexists(path):
raise path_error('%s got non-existent path:' % (fn_name,), path)
def check_isdir(path, fn_name):
"""
Check that the path is valid, then raise UsageError if the path is a file.
"""
check_isvalid(path, fn_name)
if not os.path.isdir(path):
raise path_error('%s got non-directory:' % (fn_name,), path)
def check_isfile(path, fn_name):
"""
Check that the path is valid, then raise UsageError if the path is a file.
"""
check_isvalid(path, fn_name)
if os.path.isdir(path):
raise path_error('%s got directory:' % (fn_name,), path)
################################################################################
# Functions to list directories and to deal with subpaths of paths.
################################################################################
def safe_join(*paths):
"""
Join a sequence of paths but filter out any that are empty. Used for targets.
Note that os.path.join has this functionality EXCEPT at the end of the list,
which causes problems when a target subpath is empty.
"""
return os.path.join(*[_f for _f in paths if _f])
def get_relative_path(root, path):
"""
Return the relative path from root to path, which should be nested under root.
"""
precondition(path.startswith(root), '%s is not under %s' % (path, root))
return path[len(root) :]
def ls(path):
"""
Return a (list of directories, list of files) in the given directory.
"""
check_isdir(path, 'ls')
(directories, files) = ([], [])
for file_name in os.listdir(path):
if os.path.isfile(os.path.join(path, file_name)):
files.append(file_name)
else:
directories.append(file_name)
return (directories, files)
def recursive_ls(path):
"""
Return a (list of directories, list of files) in the given directory and
all of its nested subdirectories. All paths returned are absolute.
Symlinks are returned in the list of files, even if they point to directories.
This makes it possible to distinguish between real and symlinked directories
when computing the hash of a directory. This function will NOT descend into
symlinked directories.
"""
check_isdir(path, 'recursive_ls')
(directories, files) = ([], [])
for (root, _, file_names) in os.walk(path):
assert os.path.isabs(root), 'Got relative root in os.walk: %s' % (root,)
directories.append(root)
for file_name in file_names:
files.append(os.path.join(root, file_name))
# os.walk ignores symlinks to directories, but we should count them as files.
# However, we can't used the followlinks parameter, because a) we don't want
# to descend into directories and b) we could end up in an infinite loop if
# we were to pass that flag. Instead, we handle symlinks here:
for subpath in os.listdir(root):
full_subpath = os.path.join(root, subpath)
if os.path.islink(full_subpath) and os.path.isdir(full_subpath):
files.append(full_subpath)
return (directories, files)
################################################################################
# Functions to read files to compute hashes, write results to stdout, etc.
################################################################################
def getmtime(path):
"""
Like os.path.getmtime, but does not follow symlinks.
"""
return os.lstat(path).st_mtime
def get_size(path, dirs_and_files=None):
"""
Get the size (in bytes) of the file or directory at or under the given path.
Does not include symlinked files and directories.
"""
if parse_linked_bundle_url(path).uses_beam:
return get_path_size(path)
if os.path.islink(path) or not os.path.isdir(path):
return os.lstat(path).st_size
dirs_and_files = dirs_and_files or recursive_ls(path)
return sum(os.lstat(path).st_size for path in itertools.chain(*dirs_and_files))
def hash_directory(path, dirs_and_files=None):
"""
Return the hash of the contents of the folder at the given path.
This hash is independent of the path itself - if you were to move the
directory and call get_hash again, you would get the same result.
"""
if parse_linked_bundle_url(path).uses_beam:
# On Azure Blob Storage, we just use the directory size for the hashed contents.
return get_size(path)
(directories, files) = dirs_and_files or recursive_ls(path)
# Sort and then hash all directories and then compute a hash of the hashes.
# This two-level hash is necessary so that the overall hash is unambiguous -
# if we updated directory_hash with the directory names themselves, then
# we'd be hashing the concatenation of these names, which could be generated
# in multiple ways.
directory_hash = hashlib.sha1()
for directory in sorted(directories):
relative_path = get_relative_path(path, directory)
directory_hash.update(hashlib.sha1(relative_path.encode()).hexdigest().encode())
# Use a similar two-level hashing scheme for all files, but incorporate a
# hash of both the file name and contents.
file_hash = hashlib.sha1()
for file_name in sorted(files):
relative_path = get_relative_path(path, file_name)
file_hash.update(hashlib.sha1(relative_path.encode()).hexdigest().encode())
file_hash.update(hash_file_contents(file_name).encode())
# Return a hash of the two hashes.
overall_hash = hashlib.sha1(directory_hash.hexdigest().encode())
overall_hash.update(file_hash.hexdigest().encode())
return overall_hash.hexdigest()
def hash_file_contents(path):
"""
Return the hash of the file's contents, read in blocks of size BLOCK_SIZE.
"""
message = 'hash_file called with relative path: %s' % (path,)
precondition(os.path.isabs(path), message)
if os.path.islink(path):
contents_hash = hashlib.sha1(LINK_PREFIX.encode())
contents_hash.update(os.readlink(path).encode())
else:
contents_hash = hashlib.sha1(FILE_PREFIX.encode())
with open(path, 'rb') as file_handle:
while True:
data = file_handle.read(BLOCK_SIZE)
if not data:
break
contents_hash.update(data)
return contents_hash.hexdigest()
################################################################################
# Functions that modify that filesystem in controlled ways.
################################################################################
def copy(source_path: str, dest_path: str, follow_symlinks: Optional[bool] = False):
"""
Copy |source_path| to |dest_path|.
Assume dest_path doesn't exist.
|follow_symlinks|: whether to follow symlinks
Note: this only works in Linux.
"""
if os.path.exists(dest_path):
raise path_error('already exists', dest_path)
if source_path == '/dev/stdin':
with open(dest_path, 'wb') as dest:
file_util.copy(
sys.stdin,
dest,
autoflush=False,
print_status='Copying %s to %s' % (source_path, dest_path),
)
else:
if not follow_symlinks and os.path.islink(source_path):
raise path_error('not following symlinks', source_path)
if not os.path.exists(source_path):
raise path_error('does not exist', source_path)
command = [
'rsync',
'-pr%s' % ('L' if follow_symlinks else 'l'),
source_path
+ ('/' if not os.path.islink(source_path) and os.path.isdir(source_path) else ''),
dest_path,
]
if subprocess.call(command) != 0:
raise path_error('Unable to copy %s to' % source_path, dest_path)
def make_directory(path):
"""
Create the directory at the given path.
"""
try:
os.mkdir(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
check_isdir(path, 'make_directory')
def remove(path):
"""
Remove the given path, whether it is a directory, file, or link.
"""
if parse_linked_bundle_url(path).uses_beam:
from apache_beam.io.filesystems import FileSystems
if not FileSystems.exists(path):
FileSystems.delete([path])
return
check_isvalid(path, 'remove')
set_write_permissions(path) # Allow permissions
if os.path.islink(path):
os.unlink(path)
elif os.path.isdir(path):
try:
shutil.rmtree(path)
except shutil.Error:
pass
else:
os.remove(path)
if os.path.exists(path):
print('Failed to remove %s' % path)
def soft_link(source, path):
"""
Create a symbolic link to source at path. This is basically the same as doing "ln -s $source $path"
"""
check_isvalid(source, 'soft_link')
os.symlink(source, path)
| 35.334328 | 103 | 0.630396 |
fc9e3eb40f996353595bada2ec265eba3f86bf6f
| 25,948 |
py
|
Python
|
statsmodels/regression/tests/test_glsar_gretl.py
|
aliavni/statsmodels
|
ef5d57a8d45de76a895e9401705280d558d688ad
|
[
"BSD-3-Clause"
] | 1 |
2022-01-24T15:17:37.000Z
|
2022-01-24T15:17:37.000Z
|
statsmodels/regression/tests/test_glsar_gretl.py
|
aliavni/statsmodels
|
ef5d57a8d45de76a895e9401705280d558d688ad
|
[
"BSD-3-Clause"
] | null | null | null |
statsmodels/regression/tests/test_glsar_gretl.py
|
aliavni/statsmodels
|
ef5d57a8d45de76a895e9401705280d558d688ad
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Tests of GLSAR and diagnostics against Gretl
Created on Thu Feb 02 21:15:47 2012
Author: Josef Perktold
License: BSD-3
"""
import os
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal,
assert_allclose, assert_array_less)
from statsmodels.regression.linear_model import OLS, GLSAR
from statsmodels.tools.tools import add_constant
from statsmodels.datasets import macrodata
import statsmodels.stats.sandwich_covariance as sw
import statsmodels.stats.diagnostic as smsdia
import statsmodels.stats.outliers_influence as oi
if __name__ == '__main__':
t = TestGLSARGretl()
t.test_all()
'''
Model 5: OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: ds_l_realinv
HAC standard errors, bandwidth 4 (Bartlett kernel)
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const -9.48167 1.17709 -8.055 7.17e-014 ***
ds_l_realgdp 4.37422 0.328787 13.30 2.62e-029 ***
realint_1 -0.613997 0.293619 -2.091 0.0378 **
Mean dependent var 3.257395 S.D. dependent var 18.73915
Sum squared resid 22799.68 S.E. of regression 10.70380
R-squared 0.676978 Adjusted R-squared 0.673731
F(2, 199) 90.79971 P-value(F) 9.53e-29
Log-likelihood -763.9752 Akaike criterion 1533.950
Schwarz criterion 1543.875 Hannan-Quinn 1537.966
rho -0.107341 Durbin-Watson 2.213805
QLR test for structural break -
Null hypothesis: no structural break
Test statistic: max F(3, 196) = 3.01985 at observation 2001:4
(10 percent critical value = 4.09)
Non-linearity test (logs) -
Null hypothesis: relationship is linear
Test statistic: LM = 1.68351
with p-value = P(Chi-square(2) > 1.68351) = 0.430953
Non-linearity test (squares) -
Null hypothesis: relationship is linear
Test statistic: LM = 7.52477
with p-value = P(Chi-square(2) > 7.52477) = 0.0232283
LM test for autocorrelation up to order 4 -
Null hypothesis: no autocorrelation
Test statistic: LMF = 1.17928
with p-value = P(F(4,195) > 1.17928) = 0.321197
CUSUM test for parameter stability -
Null hypothesis: no change in parameters
Test statistic: Harvey-Collier t(198) = 0.494432
with p-value = P(t(198) > 0.494432) = 0.621549
Chow test for structural break at observation 1984:1 -
Null hypothesis: no structural break
Asymptotic test statistic: Chi-square(3) = 13.1897
with p-value = 0.00424384
Test for ARCH of order 4 -
Null hypothesis: no ARCH effect is present
Test statistic: LM = 3.43473
with p-value = P(Chi-square(4) > 3.43473) = 0.487871:
#ANOVA
Analysis of Variance:
Sum of squares df Mean square
Regression 47782.7 2 23891.3
Residual 22799.7 199 114.571
Total 70582.3 201 351.156
R^2 = 47782.7 / 70582.3 = 0.676978
F(2, 199) = 23891.3 / 114.571 = 208.528 [p-value 1.47e-049]
#LM-test autocorrelation
Breusch-Godfrey test for autocorrelation up to order 4
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: uhat
coefficient std. error t-ratio p-value
------------------------------------------------------------
const 0.0640964 1.06719 0.06006 0.9522
ds_l_realgdp -0.0456010 0.217377 -0.2098 0.8341
realint_1 0.0511769 0.293136 0.1746 0.8616
uhat_1 -0.104707 0.0719948 -1.454 0.1475
uhat_2 -0.00898483 0.0742817 -0.1210 0.9039
uhat_3 0.0837332 0.0735015 1.139 0.2560
uhat_4 -0.0636242 0.0737363 -0.8629 0.3893
Unadjusted R-squared = 0.023619
Test statistic: LMF = 1.179281,
with p-value = P(F(4,195) > 1.17928) = 0.321
Alternative statistic: TR^2 = 4.771043,
with p-value = P(Chi-square(4) > 4.77104) = 0.312
Ljung-Box Q' = 5.23587,
with p-value = P(Chi-square(4) > 5.23587) = 0.264:
RESET test for specification (squares and cubes)
Test statistic: F = 5.219019,
with p-value = P(F(2,197) > 5.21902) = 0.00619
RESET test for specification (squares only)
Test statistic: F = 7.268492,
with p-value = P(F(1,198) > 7.26849) = 0.00762
RESET test for specification (cubes only)
Test statistic: F = 5.248951,
with p-value = P(F(1,198) > 5.24895) = 0.023
#heteroscedasticity White
White's test for heteroskedasticity
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: uhat^2
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const 104.920 21.5848 4.861 2.39e-06 ***
ds_l_realgdp -29.7040 6.24983 -4.753 3.88e-06 ***
realint_1 -6.93102 6.95607 -0.9964 0.3203
sq_ds_l_realg 4.12054 0.684920 6.016 8.62e-09 ***
X2_X3 2.89685 1.38571 2.091 0.0379 **
sq_realint_1 0.662135 1.10919 0.5970 0.5512
Unadjusted R-squared = 0.165860
Test statistic: TR^2 = 33.503723,
with p-value = P(Chi-square(5) > 33.503723) = 0.000003:
#heteroscedasticity Breusch-Pagan (original)
Breusch-Pagan test for heteroskedasticity
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: scaled uhat^2
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const 1.09468 0.192281 5.693 4.43e-08 ***
ds_l_realgdp -0.0323119 0.0386353 -0.8363 0.4040
realint_1 0.00410778 0.0512274 0.08019 0.9362
Explained sum of squares = 2.60403
Test statistic: LM = 1.302014,
with p-value = P(Chi-square(2) > 1.302014) = 0.521520
#heteroscedasticity Breusch-Pagan Koenker
Breusch-Pagan test for heteroskedasticity
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: scaled uhat^2 (Koenker robust variant)
coefficient std. error t-ratio p-value
------------------------------------------------------------
const 10.6870 21.7027 0.4924 0.6230
ds_l_realgdp -3.64704 4.36075 -0.8363 0.4040
realint_1 0.463643 5.78202 0.08019 0.9362
Explained sum of squares = 33174.2
Test statistic: LM = 0.709924,
with p-value = P(Chi-square(2) > 0.709924) = 0.701200
########## forecast
#forecast mean y
For 95% confidence intervals, t(199, 0.025) = 1.972
Obs ds_l_realinv prediction std. error 95% interval
2008:3 -7.134492 -17.177905 2.946312 -22.987904 - -11.367905
2008:4 -27.665860 -36.294434 3.036851 -42.282972 - -30.305896
2009:1 -70.239280 -44.018178 4.007017 -51.919841 - -36.116516
2009:2 -27.024588 -12.284842 1.427414 -15.099640 - -9.470044
2009:3 8.078897 4.483669 1.315876 1.888819 - 7.078520
Forecast evaluation statistics
Mean Error -3.7387
Mean Squared Error 218.61
Root Mean Squared Error 14.785
Mean Absolute Error 12.646
Mean Percentage Error -7.1173
Mean Absolute Percentage Error -43.867
Theil's U 0.4365
Bias proportion, UM 0.06394
Regression proportion, UR 0.13557
Disturbance proportion, UD 0.80049
#forecast actual y
For 95% confidence intervals, t(199, 0.025) = 1.972
Obs ds_l_realinv prediction std. error 95% interval
2008:3 -7.134492 -17.177905 11.101892 -39.070353 - 4.714544
2008:4 -27.665860 -36.294434 11.126262 -58.234939 - -14.353928
2009:1 -70.239280 -44.018178 11.429236 -66.556135 - -21.480222
2009:2 -27.024588 -12.284842 10.798554 -33.579120 - 9.009436
2009:3 8.078897 4.483669 10.784377 -16.782652 - 25.749991
Forecast evaluation statistics
Mean Error -3.7387
Mean Squared Error 218.61
Root Mean Squared Error 14.785
Mean Absolute Error 12.646
Mean Percentage Error -7.1173
Mean Absolute Percentage Error -43.867
Theil's U 0.4365
Bias proportion, UM 0.06394
Regression proportion, UR 0.13557
Disturbance proportion, UD 0.80049
'''
| 40.1051 | 100 | 0.581509 |
fc9e5fe1655adc75064f69de338759361c073b11
| 2,563 |
py
|
Python
|
core/views.py
|
tweeprint/api.tweeprint.com
|
248525f2cffffb20765e7eca1e7a63f359adfc1b
|
[
"MIT"
] | 1 |
2021-03-15T07:24:10.000Z
|
2021-03-15T07:24:10.000Z
|
core/views.py
|
tweeprint/api.tweeprint.com
|
248525f2cffffb20765e7eca1e7a63f359adfc1b
|
[
"MIT"
] | 1 |
2021-04-11T01:22:24.000Z
|
2021-04-11T01:22:24.000Z
|
core/views.py
|
tweeprint/api.tweeprint.com
|
248525f2cffffb20765e7eca1e7a63f359adfc1b
|
[
"MIT"
] | null | null | null |
import requests
import django.contrib.auth as auth
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse, JsonResponse, Http404
from django.contrib.auth.decorators import login_required
from django.core.serializers import serialize
from core.serializers import *
from core.models import *
from core.secrets import API_TOKEN, STRIPE_API_KEY
import json
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import get_object_or_404
| 46.6 | 188 | 0.717909 |
fc9e61eaa32db4519807b4a581c10259cbb744fa
| 342 |
py
|
Python
|
src/framed/bioreactor/__init__.py
|
cdanielmachado/framed
|
36d56437685cbf5c7c3c8ee4f6d85b8f05f4d345
|
[
"Apache-2.0"
] | 25 |
2015-01-07T16:17:03.000Z
|
2022-01-24T09:11:50.000Z
|
src/framed/bioreactor/__init__.py
|
cdanielmachado/framed
|
36d56437685cbf5c7c3c8ee4f6d85b8f05f4d345
|
[
"Apache-2.0"
] | 12 |
2016-02-18T12:50:09.000Z
|
2020-12-18T08:56:44.000Z
|
src/framed/bioreactor/__init__.py
|
cdanielmachado/framed
|
36d56437685cbf5c7c3c8ee4f6d85b8f05f4d345
|
[
"Apache-2.0"
] | 14 |
2015-02-17T14:55:27.000Z
|
2021-08-09T17:57:57.000Z
|
from __future__ import absolute_import
__author__ = 'kaizhuang'
"""
Package implementing features for simulating bioreactor operation.
"""
from .base import Organism, Bioreactor
from .bioreactors import ANAEROBIC, AEROBIC, MICROAEROBIC
from .bioreactors import Bioreactor_ox, IdealBatch, IdealFedbatch
from framed.bioreactor.dfba import *
| 26.307692 | 66 | 0.821637 |
fc9f46c8893676c05776e06d3412234d7de6b4e4
| 311 |
py
|
Python
|
shared/templates/coreos_kernel_option/template.py
|
deperrone/content
|
caaff27f01a1d6c15da461f9fafe26090e8fdd18
|
[
"BSD-3-Clause"
] | 1,138 |
2018-09-05T06:31:44.000Z
|
2022-03-31T03:38:24.000Z
|
shared/templates/coreos_kernel_option/template.py
|
deperrone/content
|
caaff27f01a1d6c15da461f9fafe26090e8fdd18
|
[
"BSD-3-Clause"
] | 4,743 |
2018-09-04T15:14:04.000Z
|
2022-03-31T23:17:57.000Z
|
shared/templates/coreos_kernel_option/template.py
|
deperrone/content
|
caaff27f01a1d6c15da461f9fafe26090e8fdd18
|
[
"BSD-3-Clause"
] | 400 |
2018-09-08T20:08:49.000Z
|
2022-03-30T20:54:32.000Z
|
from ssg.utils import parse_template_boolean_value
| 38.875 | 108 | 0.794212 |
fc9fa91745bf91e0ba0d83de869daec634544f40
| 377 |
py
|
Python
|
pondus/backends/__init__.py
|
enicklas/pondus
|
c94edce0351697c96f2ad046e8f602448d2e0df0
|
[
"MIT"
] | 1 |
2021-12-20T18:18:52.000Z
|
2021-12-20T18:18:52.000Z
|
pondus/backends/__init__.py
|
enicklas/pondus
|
c94edce0351697c96f2ad046e8f602448d2e0df0
|
[
"MIT"
] | null | null | null |
pondus/backends/__init__.py
|
enicklas/pondus
|
c94edce0351697c96f2ad046e8f602448d2e0df0
|
[
"MIT"
] | 2 |
2021-12-20T18:18:57.000Z
|
2022-01-11T10:28:22.000Z
|
# -*- coding: UTF-8 -*-
"""
This file is part of Pondus, a personal weight manager.
Copyright (C) 2011 Eike Nicklas <[email protected]>
This program is free software licensed under the MIT license. For details
see LICENSE or http://www.opensource.org/licenses/mit-license.php
"""
__all__ = ['csv_backend', 'sportstracker_backend', 'xml_backend',
'xml_backend_old']
| 29 | 73 | 0.713528 |
fca036e2b91d7c4599177ff08add5e2065d64d53
| 1,053 |
py
|
Python
|
setup.py
|
specialprocedures/chpy
|
3bbe66da96abe95653722682754b4d48f9c8eba1
|
[
"MIT"
] | null | null | null |
setup.py
|
specialprocedures/chpy
|
3bbe66da96abe95653722682754b4d48f9c8eba1
|
[
"MIT"
] | null | null | null |
setup.py
|
specialprocedures/chpy
|
3bbe66da96abe95653722682754b4d48f9c8eba1
|
[
"MIT"
] | null | null | null |
import pathlib
from setuptools import find_packages, setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# This call to setup() does all the work
setup(
name="chpy",
version="0.1.1",
description="Build networks from the Companies House API",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/specialprocedures/chpy",
author="Ian Goodrich",
# author_email="[email protected]",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
packages=find_packages(exclude=["collections", "time", "math", "re", "os"]),
include_package_data=True,
# install_requires=["networkx", "pandas", "progressbar", "fuzzywuzzy",
# "os", "requests", "math", "time", "collections", "re"]
)
| 31.909091 | 81 | 0.633428 |
fca03cd82b40377a907a5c97cfd27492d8e5ee1d
| 2,275 |
py
|
Python
|
src/sentry/eventtypes/error.py
|
boblail/sentry
|
71127331e58791d4651e480b65dd66f06cadc1c8
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/eventtypes/error.py
|
boblail/sentry
|
71127331e58791d4651e480b65dd66f06cadc1c8
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/eventtypes/error.py
|
boblail/sentry
|
71127331e58791d4651e480b65dd66f06cadc1c8
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
import six
from sentry.utils.safe import get_path, trim
from sentry.utils.strings import truncatechars
from .base import BaseEvent
| 31.597222 | 90 | 0.586813 |
fca090b48edd697b40f1657878d06ed9d6efca81
| 909 |
py
|
Python
|
keras_en_parser_and_analyzer/library/tests/test_detect_date.py
|
Sultan91/keras-english-resume-parser-and-analyzer
|
221407cb0231e4c21f8edc61a2b19b74f9585d6a
|
[
"MIT"
] | null | null | null |
keras_en_parser_and_analyzer/library/tests/test_detect_date.py
|
Sultan91/keras-english-resume-parser-and-analyzer
|
221407cb0231e4c21f8edc61a2b19b74f9585d6a
|
[
"MIT"
] | null | null | null |
keras_en_parser_and_analyzer/library/tests/test_detect_date.py
|
Sultan91/keras-english-resume-parser-and-analyzer
|
221407cb0231e4c21f8edc61a2b19b74f9585d6a
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from datetime import date
from keras_en_parser_and_analyzer.library.pipmp_my_cv_classify import detect_date
| 36.36 | 86 | 0.669967 |
fca0ce34b50df500e879e8841c0ceca83a278655
| 8,333 |
py
|
Python
|
capirca/lib/ipset.py
|
google-admin/capirca
|
8c9e66456fedb3c0fc1c641dbefc41793e5c68d5
|
[
"Apache-2.0"
] | 604 |
2015-08-08T22:44:25.000Z
|
2022-03-30T11:51:23.000Z
|
capirca/lib/ipset.py
|
google-admin/capirca
|
8c9e66456fedb3c0fc1c641dbefc41793e5c68d5
|
[
"Apache-2.0"
] | 213 |
2015-08-04T20:11:22.000Z
|
2022-03-30T18:08:15.000Z
|
capirca/lib/ipset.py
|
google-admin/capirca
|
8c9e66456fedb3c0fc1c641dbefc41793e5c68d5
|
[
"Apache-2.0"
] | 207 |
2015-08-07T10:55:00.000Z
|
2022-03-02T17:07:34.000Z
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Ipset iptables generator. This is a subclass of Iptables generator.
ipset is a system inside the Linux kernel, which can very efficiently store
and match IPv4 and IPv6 addresses. This can be used to dramatically increase
performace of iptables firewall.
"""
import string
from capirca.lib import iptables
from capirca.lib import nacaddr
| 36.709251 | 80 | 0.657626 |
fca0fd3742b37f4cd3594891d70e849a0cb23d56
| 5,580 |
py
|
Python
|
straxen/analyses/records_matrix.py
|
zhut19/straxen
|
20dea986790ef168ba7052d652a7aa19ab836943
|
[
"BSD-3-Clause"
] | 14 |
2019-06-06T21:38:05.000Z
|
2022-02-16T16:35:16.000Z
|
straxen/analyses/records_matrix.py
|
zhut19/straxen
|
20dea986790ef168ba7052d652a7aa19ab836943
|
[
"BSD-3-Clause"
] | 613 |
2018-10-04T09:15:55.000Z
|
2022-03-31T10:48:04.000Z
|
straxen/analyses/records_matrix.py
|
ahiguera-mx/straxen
|
25b92dd4f18b51700e6df83b230e58ec3bbb7163
|
[
"BSD-3-Clause"
] | 48 |
2019-02-01T12:40:25.000Z
|
2022-02-28T16:59:18.000Z
|
import warnings
import numba
import numpy as np
import strax
import straxen
DEFAULT_MAX_SAMPLES = 20_000
| 37.702703 | 86 | 0.577061 |
fca1f7c596dc848fb292eeb62a44200456a36f90
| 596 |
py
|
Python
|
bdbc/lib/python3.5/site-packages/bigchaindb_driver/crypto.py
|
entropyx/fiduchain-blockchain-interface
|
07336a5eebfaa9cddb148edb94461a8fd57562b1
|
[
"MIT"
] | null | null | null |
bdbc/lib/python3.5/site-packages/bigchaindb_driver/crypto.py
|
entropyx/fiduchain-blockchain-interface
|
07336a5eebfaa9cddb148edb94461a8fd57562b1
|
[
"MIT"
] | null | null | null |
bdbc/lib/python3.5/site-packages/bigchaindb_driver/crypto.py
|
entropyx/fiduchain-blockchain-interface
|
07336a5eebfaa9cddb148edb94461a8fd57562b1
|
[
"MIT"
] | null | null | null |
from collections import namedtuple
from cryptoconditions import crypto
CryptoKeypair = namedtuple('CryptoKeypair', ('signing_key', 'verifying_key'))
def generate_keypair():
"""Generates a cryptographic key pair.
Returns:
:class:`~bigchaindb_driver.crypto.CryptoKeypair`: A
:obj:`collections.namedtuple` with named fields
:attr:`~bigchaindb_driver.crypto.CryptoKeypair.signing_key` and
:attr:`~bigchaindb_driver.crypto.CryptoKeypair.verifying_key`.
"""
return CryptoKeypair(
*(k.decode() for k in crypto.ed25519_generate_key_pair()))
| 28.380952 | 77 | 0.721477 |
fca3b5ff0625e2c4785f87d64f205243acd800d3
| 4,311 |
py
|
Python
|
reviewboard/webapi/resources/change.py
|
mnoorenberghe/reviewboard
|
b8ba9d662c250cb5ec704a50f619adbf3be8cbf0
|
[
"MIT"
] | null | null | null |
reviewboard/webapi/resources/change.py
|
mnoorenberghe/reviewboard
|
b8ba9d662c250cb5ec704a50f619adbf3be8cbf0
|
[
"MIT"
] | null | null | null |
reviewboard/webapi/resources/change.py
|
mnoorenberghe/reviewboard
|
b8ba9d662c250cb5ec704a50f619adbf3be8cbf0
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from django.utils import six
from djblets.util.decorators import augment_method_from
from reviewboard.changedescs.models import ChangeDescription
from reviewboard.reviews.fields import get_review_request_field
from reviewboard.webapi.base import WebAPIResource
from reviewboard.webapi.decorators import webapi_check_local_site
from reviewboard.webapi.mixins import MarkdownFieldsMixin
from reviewboard.webapi.resources import resources
change_resource = ChangeResource()
| 33.944882 | 78 | 0.64579 |
fca4787d63d5c744297f12e8eaf44573826eecbb
| 1,990 |
py
|
Python
|
controllers/notes/NewNote.py
|
heminsatya/free_notes
|
88272a34c48e60d1a82e28b0b2d56883fa724bb3
|
[
"MIT"
] | null | null | null |
controllers/notes/NewNote.py
|
heminsatya/free_notes
|
88272a34c48e60d1a82e28b0b2d56883fa724bb3
|
[
"MIT"
] | null | null | null |
controllers/notes/NewNote.py
|
heminsatya/free_notes
|
88272a34c48e60d1a82e28b0b2d56883fa724bb3
|
[
"MIT"
] | null | null | null |
# Dependencies
from aurora import Controller, View, Forms
from models import Users, Notes
from aurora.security import login_required, get_session
from flask import request
from datetime import datetime
# The controller class
| 29.264706 | 105 | 0.529648 |
fca52f1665be31c3ef3732af8d77d4f70b20bc49
| 2,762 |
py
|
Python
|
EDA-&-Data-Preprocessing/code.py
|
udayraj-gupta/ga-learner-dsmp-repo
|
90b16345fb3fd4f6f4f201012995eea7ff1e73e9
|
[
"MIT"
] | null | null | null |
EDA-&-Data-Preprocessing/code.py
|
udayraj-gupta/ga-learner-dsmp-repo
|
90b16345fb3fd4f6f4f201012995eea7ff1e73e9
|
[
"MIT"
] | null | null | null |
EDA-&-Data-Preprocessing/code.py
|
udayraj-gupta/ga-learner-dsmp-repo
|
90b16345fb3fd4f6f4f201012995eea7ff1e73e9
|
[
"MIT"
] | null | null | null |
# --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#Code starts here
data = pd.read_csv(path)
data['Rating'].hist()
data = data[data['Rating']<=5]
data['Rating'].hist()
#Code ends here
# --------------
# code starts here
total_null = data.isnull().sum()
percent_null = (total_null/data.isnull().count())*100
missing_data = pd.concat([total_null,percent_null],axis=1,keys=['Total','Percentage'])
print(missing_data)
data = data.dropna()
total_null_1 = data.isnull().sum()
percent_null_1 = (total_null_1/data.isnull().count())*100
missing_data_1 = pd.concat([total_null_1,percent_null_1],axis=1,keys=['Total','Percentage'])
print(missing_data_1)
# code ends here
# --------------
#Code starts here
a = sns.catplot(x='Category',y='Rating',data=data, kind="box", height = 10)
a.set_xticklabels(rotation=90)
a.set_titles('Rating vs Category [BoxPlot]')
#Code ends here
# --------------
#Importing header files
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
#Code starts here
le = LabelEncoder()
#data['Installs'] = data['Installs'].str.replace(',','').str.replace('+','')
data['Installs'] = data['Installs'].apply(lambda x : x.replace(',','')).apply(lambda x : x.replace('+',''))
data['Installs'] =data['Installs'].astype(int)
print(data['Installs'])
data['Installs'] = le.fit_transform(data['Installs'])
a = sns.regplot(x="Installs", y="Rating" , data=data)
a.set_title('Rating vs Installs [RegPlot]')
#Code ends here
# --------------
#Code starts here
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
import seaborn as sns
#Code starts here
d=data['Price'].value_counts()
print(d)
data['Price']=data['Price'].apply(lambda x : x.replace('$',''))
d=data['Price'].value_counts()
print(d)
data['Price']=data['Price'].astype(float)
#le=LabelEncoder()
#data['Installs'] = le.fit_transform(data['Installs'])
y=sns.regplot(data=data,x='Price',y='Rating')
y.set_title('Rating vs Installs [RegPlot]')
#Code ends here
# --------------
#Code starts here
data['Genres']=data['Genres'].str.split(';').str[0]
#print(data['Genres'])
df=data[['Genres','Rating']]
gr_mean=df.groupby(['Genres'],as_index=False).mean()
gr_mean=gr_mean.sort_values(by=['Rating'])
gr_mean=pd.DataFrame(gr_mean)
print(gr_mean)#,gr_mean[-1,:])
#Code ends heree
# --------------
#Code starts here
import seaborn as sns
data['Last Updated'] = pd.to_datetime(data['Last Updated'])
print(data['Last Updated'].max())
max_date=data['Last Updated'].max()
data['Last Updated Days']=max_date-data['Last Updated']
data['Last Updated Days']=data['Last Updated Days'].dt.days
sns.regplot(data=data,x='Last Updated Days',y='Rating').set_title('Rating vs Last Updated [RegPlot]')
#Code ends here
| 24.442478 | 107 | 0.685373 |
fca54a750bbd17151c5163b01b4d00722314de04
| 4,008 |
py
|
Python
|
openpnm/algorithms/ChargeConservation.py
|
rguan-uoft/OpenPNM
|
b3873d35270b0acaad019264368d0055c677d159
|
[
"MIT"
] | 1 |
2020-02-06T19:21:20.000Z
|
2020-02-06T19:21:20.000Z
|
openpnm/algorithms/ChargeConservation.py
|
ChahatAggarwal/OpenPNM
|
b3873d35270b0acaad019264368d0055c677d159
|
[
"MIT"
] | null | null | null |
openpnm/algorithms/ChargeConservation.py
|
ChahatAggarwal/OpenPNM
|
b3873d35270b0acaad019264368d0055c677d159
|
[
"MIT"
] | null | null | null |
import numpy as np
from openpnm.algorithms import ReactiveTransport
from openpnm.models.physics import generic_source_term as gst
from openpnm.utils import logging
logger = logging.getLogger(__name__)
| 39.683168 | 79 | 0.560629 |
fca7c37bc274f186e6ec2be9680fe84ac2ca179f
| 2,318 |
py
|
Python
|
jno/commands/upload.py
|
Kosinkadink/jno
|
773806dd737c1ef0b0a89a7e4086da9c2c1260c1
|
[
"MIT"
] | 1 |
2017-03-07T20:15:44.000Z
|
2017-03-07T20:15:44.000Z
|
jno/commands/upload.py
|
Kosinkadink/jno
|
773806dd737c1ef0b0a89a7e4086da9c2c1260c1
|
[
"MIT"
] | null | null | null |
jno/commands/upload.py
|
Kosinkadink/jno
|
773806dd737c1ef0b0a89a7e4086da9c2c1260c1
|
[
"MIT"
] | null | null | null |
from jno.util import interpret_configs
from jno.util import run_arduino_process
from jno.util import create_build_directory
from jno.util import get_common_parameters
from jno.util import verify_arduino_dir
from jno.util import verify_and_get_port
from jno.util import JnoException
from jno.commands.command import Command
import getopt
from colorama import Fore
| 34.597015 | 118 | 0.718723 |
fca94cd64ba4f1d65d89141aae52e93be1d8a3f6
| 11,440 |
py
|
Python
|
modelling/inference_multi_attribute.py
|
rizwan09/hydra-sum
|
42088dde4e2b109fdb222ad4c329ca7bbfe9db2f
|
[
"BSD-3-Clause"
] | 5 |
2021-11-12T12:03:47.000Z
|
2022-02-09T11:07:23.000Z
|
modelling/inference_multi_attribute.py
|
rizwan09/hydra-sum
|
42088dde4e2b109fdb222ad4c329ca7bbfe9db2f
|
[
"BSD-3-Clause"
] | null | null | null |
modelling/inference_multi_attribute.py
|
rizwan09/hydra-sum
|
42088dde4e2b109fdb222ad4c329ca7bbfe9db2f
|
[
"BSD-3-Clause"
] | 1 |
2021-10-22T04:20:34.000Z
|
2021-10-22T04:20:34.000Z
|
import argparse
import json
import logging
import os
import torch
from transformers.file_utils import ModelOutput
from typing import Dict, Optional, Tuple
from torch.utils.data import DataLoader, SequentialSampler
from transformers.modeling_outputs import Seq2SeqLMOutput
import train_seq2seq_utils
import single_head_utils
import multi_head_utils
from torch import nn
from generation_utils_multi_attribute import GenerationMixinCustomCombined
from transformers import (
PreTrainedModel,
PreTrainedTokenizer,
BartConfig,
BartTokenizer
)
logger = logging.getLogger(__name__)
MODEL_CLASSES = {"bart_mult_heads_2": (BartConfig,
multi_head_utils.ConditionalGenerationCustomBartMultHeads,
BartTokenizer),
}
def load_model(path):
args = json.load(open(path))
config_class, model_class = BartConfig, multi_head_utils.ConditionalGenerationCustomBartMultHeads
config = config_class.from_pretrained(args['path'])
model = model_class.from_pretrained(
args['path'],
from_tf=bool(".ckpt" in args['path']),
config=config)
return model, args, config
def evaluate(args, eval_dataset, model: PreTrainedModel, args1, args2, tokenizer: PreTrainedTokenizer,
suffix="") -> Dict:
eval_output_dir = args.output_dir
if not os.path.exists(eval_output_dir):
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Eval!
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
if args.generate:
f_out = open(os.path.join(eval_output_dir, 'test_out%s.txt' % suffix), 'w')
print(eval_output_dir)
k = 0
with torch.no_grad():
model.eval()
for batch in eval_dataloader:
batch = tuple(t.to(args.device) for t in batch)
input_ids, input_attention_mask, decoder_ids = batch[0], batch[1], batch[2]
for j in range(input_ids.shape[0]):
gold = tokenizer.decode(decoder_ids[j], skip_special_tokens=True)
input = tokenizer.decode(input_ids[j], skip_special_tokens=True)
input_args = {'input_ids': input_ids[j].unsqueeze(0),
'attention_mask': input_attention_mask[j].unsqueeze(0), 'num_beams': 6,
'length_penalty': 2, 'no_repeat_ngram_size': 3, 'max_length': 200, 'min_length': 12,
'top_k': 30, 'top_p': 0.5, 'do_sample': True,
'decoder_start_token_id': tokenizer.bos_token_id, 'num_return_sequences': 1,
'gate_prob': args.gate_probability, 'use_head_1': args1['use_head'],
'use_head_2': args2['use_head']}
gen = model.generate(**input_args)
gen = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=True) for g in
gen]
# gen = gen[0]
print(gen[0].strip())
f_out.write(input + '\n')
f_out.write(gold + '\n')
for g in gen:
f_out.write(g.strip() + '\n')
f_out.write('\n')
k += 1
if k > 1000:
break
f_out.close()
if __name__ == "__main__":
main()
| 34.878049 | 118 | 0.600699 |
fca96624113002ffa1bf51ca5fff111307a9a56b
| 2,199 |
py
|
Python
|
stp_core/common/logging/handlers.py
|
andkononykhin/plenum
|
28dc1719f4b7e80d31dafbadb38cfec4da949886
|
[
"Apache-2.0"
] | 148 |
2017-07-11T19:05:25.000Z
|
2022-03-16T21:31:20.000Z
|
stp_core/common/logging/handlers.py
|
andkononykhin/plenum
|
28dc1719f4b7e80d31dafbadb38cfec4da949886
|
[
"Apache-2.0"
] | 561 |
2017-06-29T17:59:56.000Z
|
2022-03-09T15:47:14.000Z
|
stp_core/common/logging/handlers.py
|
andkononykhin/plenum
|
28dc1719f4b7e80d31dafbadb38cfec4da949886
|
[
"Apache-2.0"
] | 378 |
2017-06-29T17:45:27.000Z
|
2022-03-26T07:27:59.000Z
|
import logging
| 28.558442 | 71 | 0.530241 |
5d75f5d30780e5997d5df3ca87b964d9add7b705
| 47 |
py
|
Python
|
blog/migrations/__init__.py
|
Amohammadi2/django-SPA-blog
|
5dc10894ba360569b4849cfda0c3340ea5a15fb8
|
[
"MIT"
] | 2 |
2020-12-14T08:46:35.000Z
|
2021-06-03T17:26:45.000Z
|
blog/migrations/__init__.py
|
Amohammadi2/django-SPA-blog
|
5dc10894ba360569b4849cfda0c3340ea5a15fb8
|
[
"MIT"
] | null | null | null |
blog/migrations/__init__.py
|
Amohammadi2/django-SPA-blog
|
5dc10894ba360569b4849cfda0c3340ea5a15fb8
|
[
"MIT"
] | null | null | null |
# you just need to add some informations here
| 23.5 | 46 | 0.765957 |
5d77f5c8748dabbe0cc911d4482f70143a174f14
| 43 |
py
|
Python
|
amocrm_asterisk_ng/crm/amocrm/kernel/calls/call_records/file_converters/core/__init__.py
|
iqtek/amocrn_asterisk_ng
|
429a8d0823b951c855a49c1d44ab0e05263c54dc
|
[
"MIT"
] | null | null | null |
amocrm_asterisk_ng/crm/amocrm/kernel/calls/call_records/file_converters/core/__init__.py
|
iqtek/amocrn_asterisk_ng
|
429a8d0823b951c855a49c1d44ab0e05263c54dc
|
[
"MIT"
] | null | null | null |
amocrm_asterisk_ng/crm/amocrm/kernel/calls/call_records/file_converters/core/__init__.py
|
iqtek/amocrn_asterisk_ng
|
429a8d0823b951c855a49c1d44ab0e05263c54dc
|
[
"MIT"
] | null | null | null |
from .IFileConverter import IFileConverter
| 21.5 | 42 | 0.883721 |
5d78b08ed15d1550fa9397049ff76029d3869bce
| 555 |
py
|
Python
|
tests/blueprint/test_decorators.py
|
cuenca-mx/agave
|
d4719bdbab8e200c98d206475df6adb275e9fdcc
|
[
"MIT"
] | 3 |
2020-12-11T16:48:44.000Z
|
2021-03-29T00:05:57.000Z
|
tests/blueprint/test_decorators.py
|
cuenca-mx/agave
|
d4719bdbab8e200c98d206475df6adb275e9fdcc
|
[
"MIT"
] | 115 |
2020-08-26T13:26:07.000Z
|
2022-03-31T23:58:22.000Z
|
tests/blueprint/test_decorators.py
|
cuenca-mx/agave
|
d4719bdbab8e200c98d206475df6adb275e9fdcc
|
[
"MIT"
] | null | null | null |
from functools import wraps
from agave.blueprints.decorators import copy_attributes
| 19.821429 | 55 | 0.677477 |
5d78de49bd48a1e6e3f364af456fa6175d8f4166
| 10,534 |
py
|
Python
|
tools/python/utils/config_parser.py
|
hanhan9449/mace
|
63feaf5055bab6a081d36edfab8f963a624899aa
|
[
"Apache-2.0"
] | 1 |
2020-09-07T02:40:28.000Z
|
2020-09-07T02:40:28.000Z
|
tools/python/utils/config_parser.py
|
hanhan9449/mace
|
63feaf5055bab6a081d36edfab8f963a624899aa
|
[
"Apache-2.0"
] | null | null | null |
tools/python/utils/config_parser.py
|
hanhan9449/mace
|
63feaf5055bab6a081d36edfab8f963a624899aa
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The MACE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import os
import copy
import yaml
from enum import Enum
from utils.util import mace_check
from utils.util import MaceLogger
from py_proto import mace_pb2
CPP_KEYWORDS = [
'alignas', 'alignof', 'and', 'and_eq', 'asm', 'atomic_cancel',
'atomic_commit', 'atomic_noexcept', 'auto', 'bitand', 'bitor',
'bool', 'break', 'case', 'catch', 'char', 'char16_t', 'char32_t',
'class', 'compl', 'concept', 'const', 'constexpr', 'const_cast',
'continue', 'co_await', 'co_return', 'co_yield', 'decltype', 'default',
'delete', 'do', 'double', 'dynamic_cast', 'else', 'enum', 'explicit',
'export', 'extern', 'false', 'float', 'for', 'friend', 'goto', 'if',
'import', 'inline', 'int', 'long', 'module', 'mutable', 'namespace',
'new', 'noexcept', 'not', 'not_eq', 'nullptr', 'operator', 'or', 'or_eq',
'private', 'protected', 'public', 'register', 'reinterpret_cast',
'requires', 'return', 'short', 'signed', 'sizeof', 'static',
'static_assert', 'static_cast', 'struct', 'switch', 'synchronized',
'template', 'this', 'thread_local', 'throw', 'true', 'try', 'typedef',
'typeid', 'typename', 'union', 'unsigned', 'using', 'virtual', 'void',
'volatile', 'wchar_t', 'while', 'xor', 'xor_eq', 'override', 'final',
'transaction_safe', 'transaction_safe_dynamic', 'if', 'elif', 'else',
'endif', 'defined', 'ifdef', 'ifndef', 'define', 'undef', 'include',
'line', 'error', 'pragma',
]
def parse_data_format(str):
str = str.upper()
mace_check(str in [e.name for e in DataFormat],
"unknown data format %s" % str)
return DataFormat[str]
DEVICE_MAP = {
"cpu": DeviceType.CPU,
"gpu": DeviceType.GPU,
"hexagon": DeviceType.HEXAGON,
"dsp": DeviceType.HEXAGON,
"hta": DeviceType.HTA,
"apu": DeviceType.APU,
"cpu+gpu": DeviceType.CPU_GPU
}
DATA_TYPE_MAP = {
'float32': mace_pb2.DT_FLOAT,
'int32': mace_pb2.DT_INT32,
}
| 34.424837 | 78 | 0.638599 |
5d7980863ca4f07807819ec6305da79cbf107a53
| 14,852 |
py
|
Python
|
main_cross_testing_iseg.py
|
sami-ets/DeepNormalize
|
5ed53280d98a201d45bb9973e79736136273eaea
|
[
"MIT"
] | 1 |
2020-05-21T20:52:48.000Z
|
2020-05-21T20:52:48.000Z
|
main_cross_testing_iseg.py
|
sami-ets/DeepNormalize
|
5ed53280d98a201d45bb9973e79736136273eaea
|
[
"MIT"
] | null | null | null |
main_cross_testing_iseg.py
|
sami-ets/DeepNormalize
|
5ed53280d98a201d45bb9973e79736136273eaea
|
[
"MIT"
] | 1 |
2020-05-21T20:52:54.000Z
|
2020-05-21T20:52:54.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2019 Pierre-Luc Delisle. All Rights Reserved.
#
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
import multiprocessing
import numpy as np
import os
import random
import torch
import torch.backends.cudnn as cudnn
from kerosene.configs.configs import RunConfiguration, DatasetConfiguration
from kerosene.configs.parsers import YamlConfigurationParser
from kerosene.loggers.visdom import PlotType, PlotFrequency
from kerosene.loggers.visdom.config import VisdomConfiguration
from kerosene.loggers.visdom.visdom import VisdomLogger, VisdomData
from kerosene.training.trainers import ModelTrainerFactory
from samitorch.inputs.utils import augmented_sample_collate
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import DataLoader
from deepNormalize.config.parsers import ArgsParserFactory, ArgsParserType
from deepNormalize.factories.customModelFactory import CustomModelFactory
from deepNormalize.factories.customTrainerFactory import TrainerFactory
from deepNormalize.inputs.datasets import iSEGSliceDatasetFactory, MRBrainSSliceDatasetFactory, ABIDESliceDatasetFactory
from deepNormalize.nn.criterions import CustomCriterionFactory
from deepNormalize.utils.constants import *
from deepNormalize.utils.image_slicer import ImageReconstructor
cudnn.benchmark = True
cudnn.enabled = True
np.random.seed(42)
random.seed(42)
if __name__ == '__main__':
# Basic settings
logging.basicConfig(level=logging.INFO)
torch.set_num_threads(multiprocessing.cpu_count())
torch.set_num_interop_threads(multiprocessing.cpu_count())
args = ArgsParserFactory.create_parser(ArgsParserType.MODEL_TRAINING).parse_args()
# Create configurations.
run_config = RunConfiguration(use_amp=args.use_amp, local_rank=args.local_rank, amp_opt_level=args.amp_opt_level)
model_trainer_configs, training_config = YamlConfigurationParser.parse(args.config_file)
if not isinstance(model_trainer_configs, list):
model_trainer_configs = [model_trainer_configs]
dataset_configs = YamlConfigurationParser.parse_section(args.config_file, "dataset")
dataset_configs = {k: DatasetConfiguration(v) for k, v, in dataset_configs.items()}
data_augmentation_config = YamlConfigurationParser.parse_section(args.config_file, "data_augmentation")
config_html = [training_config.to_html(), list(map(lambda config: config.to_html(), dataset_configs.values())),
list(map(lambda config: config.to_html(), model_trainer_configs))]
# Prepare the data.
train_datasets = list()
valid_datasets = list()
test_datasets = list()
reconstruction_datasets = list()
iSEG_train = None
iSEG_CSV = None
MRBrainS_train = None
MRBrainS_CSV = None
ABIDE_train = None
ABIDE_CSV = None
iSEG_augmentation_strategy = None
MRBrainS_augmentation_strategy = None
ABIDE_augmentation_strategy = None
# Initialize the model trainers
model_trainer_factory = ModelTrainerFactory(model_factory=CustomModelFactory(),
criterion_factory=CustomCriterionFactory())
model_trainers = model_trainer_factory.create(model_trainer_configs)
if not isinstance(model_trainers, list):
model_trainers = [model_trainers]
# Create datasets
if dataset_configs.get("iSEG", None) is not None:
iSEG_train, iSEG_valid, iSEG_test, iSEG_reconstruction = iSEGSliceDatasetFactory.create_train_valid_test(
source_dir=dataset_configs["iSEG"].path,
modalities=dataset_configs["iSEG"].modalities,
dataset_id=ISEG_ID,
test_size=dataset_configs["iSEG"].validation_split,
max_subjects=dataset_configs["iSEG"].max_subjects,
max_num_patches=dataset_configs["iSEG"].max_num_patches,
augment=dataset_configs["iSEG"].augment,
patch_size=dataset_configs["iSEG"].patch_size,
step=dataset_configs["iSEG"].step,
test_patch_size=dataset_configs["iSEG"].test_patch_size,
test_step=dataset_configs["iSEG"].test_step,
data_augmentation_config=data_augmentation_config)
train_datasets.append(iSEG_train)
valid_datasets.append(iSEG_valid)
reconstruction_datasets.append(iSEG_reconstruction)
if dataset_configs.get("MRBrainS", None) is not None:
MRBrainS_train, MRBrainS_valid, MRBrainS_test, MRBrainS_reconstruction = MRBrainSSliceDatasetFactory.create_train_valid_test(
source_dir=dataset_configs["MRBrainS"].path,
modalities=dataset_configs["MRBrainS"].modalities,
dataset_id=MRBRAINS_ID,
test_size=dataset_configs["MRBrainS"].validation_split,
max_subjects=dataset_configs["MRBrainS"].max_subjects,
max_num_patches=dataset_configs["MRBrainS"].max_num_patches,
augment=dataset_configs["MRBrainS"].augment,
patch_size=dataset_configs["MRBrainS"].patch_size,
step=dataset_configs["MRBrainS"].step,
test_patch_size=dataset_configs["MRBrainS"].test_patch_size,
test_step=dataset_configs["MRBrainS"].test_step,
data_augmentation_config=data_augmentation_config)
test_datasets.append(MRBrainS_test)
reconstruction_datasets.append(MRBrainS_reconstruction)
if dataset_configs.get("ABIDE", None) is not None:
ABIDE_train, ABIDE_valid, ABIDE_test, ABIDE_reconstruction = ABIDESliceDatasetFactory.create_train_valid_test(
source_dir=dataset_configs["ABIDE"].path,
modalities=dataset_configs["ABIDE"].modalities,
dataset_id=ABIDE_ID,
sites=dataset_configs["ABIDE"].sites,
max_subjects=dataset_configs["ABIDE"].max_subjects,
test_size=dataset_configs["ABIDE"].validation_split,
max_num_patches=dataset_configs["ABIDE"].max_num_patches,
augment=dataset_configs["ABIDE"].augment,
patch_size=dataset_configs["ABIDE"].patch_size,
step=dataset_configs["ABIDE"].step,
test_patch_size=dataset_configs["ABIDE"].test_patch_size,
test_step=dataset_configs["ABIDE"].test_step,
data_augmentation_config=data_augmentation_config)
train_datasets.append(ABIDE_train)
valid_datasets.append(ABIDE_valid)
test_datasets.append(ABIDE_test)
reconstruction_datasets.append(ABIDE_reconstruction)
if len(list(dataset_configs.keys())) == 2:
segmentation_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs["iSEG"].test_step,
models=[model_trainers[0]],
segment=True,
batch_size=8)
input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50)
gt_reconstructor = ImageReconstructor(
[iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
is_ground_truth=True,
batch_size=50)
if dataset_configs["iSEG"].augment:
augmented_input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50,
alpha=data_augmentation_config["test"]["bias_field"]["alpha"][0],
prob_bias=data_augmentation_config["test"]["bias_field"]["prob_bias"],
snr=data_augmentation_config["test"]["noise"]["snr"],
prob_noise=data_augmentation_config["test"]["noise"]["prob_noise"])
else:
augmented_input_reconstructor = None
augmented_normalized_input_reconstructor = None
else:
segmentation_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0],
ABIDE_reconstruction._source_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
models=[model_trainers[0]],
normalize_and_segment=True,
batch_size=4)
input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0],
ABIDE_reconstruction._source_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50)
gt_reconstructor = ImageReconstructor(
[iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0],
ABIDE_reconstruction._target_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50,
is_ground_truth=True)
if dataset_configs["iSEG"].augment:
augmented_input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0],
ABIDE_reconstruction._source_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50,
alpha=data_augmentation_config["test"]["bias_field"]["alpha"][0],
prob_bias=data_augmentation_config["test"]["bias_field"]["prob_bias"],
snr=data_augmentation_config["test"]["noise"]["snr"],
prob_noise=data_augmentation_config["test"]["noise"]["prob_noise"])
else:
augmented_input_reconstructor = None
augmented_normalized_input_reconstructor = None
# Concat datasets.
if len(dataset_configs) > 1:
train_dataset = torch.utils.data.ConcatDataset(train_datasets)
valid_dataset = torch.utils.data.ConcatDataset(valid_datasets)
test_dataset = torch.utils.data.ConcatDataset(test_datasets)
else:
train_dataset = train_datasets[0]
valid_dataset = valid_datasets[0]
test_dataset = test_datasets[0]
# Create loaders.
dataloaders = list(map(lambda dataset: DataLoader(dataset,
training_config.batch_size,
sampler=None,
shuffle=True,
num_workers=args.num_workers,
collate_fn=augmented_sample_collate,
drop_last=True,
pin_memory=True),
[train_dataset, valid_dataset, test_dataset]))
# Initialize the loggers.
visdom_config = VisdomConfiguration.from_yml(args.config_file, "visdom")
exp = args.config_file.split("/")[-3:]
if visdom_config.save_destination is not None:
save_folder = visdom_config.save_destination + os.path.join(exp[0], exp[1],
os.path.basename(
os.path.normpath(visdom_config.env)))
else:
save_folder = "saves/{}".format(os.path.basename(os.path.normpath(visdom_config.env)))
[os.makedirs("{}/{}".format(save_folder, model), exist_ok=True)
for model in
["Discriminator", "Generator", "Segmenter"]]
visdom_logger = VisdomLogger(visdom_config)
visdom_logger(VisdomData("Experiment", "Experiment Config", PlotType.TEXT_PLOT, PlotFrequency.EVERY_EPOCH, None,
config_html))
visdom_logger(VisdomData("Experiment", "Patch count", PlotType.BAR_PLOT, PlotFrequency.EVERY_EPOCH,
x=[len(iSEG_train) if iSEG_train is not None else 0,
len(MRBrainS_train) if MRBrainS_train is not None else 0,
len(ABIDE_train) if ABIDE_train is not None else 0],
y=["iSEG", "MRBrainS", "ABIDE"], params={"opts": {"title": "Patch count"}}))
trainer = TrainerFactory(training_config.trainer).create(training_config,
model_trainers,
dataloaders,
reconstruction_datasets,
None,
input_reconstructor,
segmentation_reconstructor,
augmented_input_reconstructor,
None,
gt_reconstructor,
run_config,
dataset_configs,
save_folder,
visdom_logger)
trainer.train(training_config.nb_epochs)
| 52.295775 | 133 | 0.637961 |
5d799babef8aac803fb3da8b5588e54e7c3ffd6d
| 10,137 |
py
|
Python
|
docs/10.level3_demo_streaming/pc_server/server.py
|
FaBoPlatform/RobotCarAI
|
c89d3330a2beda0f253733d3252b2b035b153b6b
|
[
"Apache-2.0"
] | 10 |
2017-12-27T20:51:26.000Z
|
2020-05-27T05:29:13.000Z
|
docs/10.level3_demo_streaming/pc_server/server.py
|
FaBoPlatform/RobotCarAI
|
c89d3330a2beda0f253733d3252b2b035b153b6b
|
[
"Apache-2.0"
] | null | null | null |
docs/10.level3_demo_streaming/pc_server/server.py
|
FaBoPlatform/RobotCarAI
|
c89d3330a2beda0f253733d3252b2b035b153b6b
|
[
"Apache-2.0"
] | 3 |
2017-12-27T20:51:30.000Z
|
2019-03-15T02:49:25.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ClientOpenCV
# Server: Jetson TX2
# Client: Jetson TX2/Raspberry Pi3 Docker
# 1. FFMPEG UDP StreamingClientAWS10FPS,Jetson TX21FPS
# 2. Server
# 3. Client
#
# lib/camera.py: vid = cv2.VideoCapture()
# lib/object_detection.py: /home/ubuntu/notebooks/github/SSD-Tensorflow/
'''
Python 3.6
message.encode('ascii').encode('utf-8')
ClientOpenCV BGR'ascii''ascii'
'''
print("wait. launching...")
import socket, select
import time
import cv2
import numpy as np
import time
import os
import sys
import logging
import threading
import numpy as np
from lib.functions import *
from lib.object_detection import ObjectDetection
from lib.opencv_lane_detection import LaneDetection
from lib.webcam import WebcamVideoStream
#
logging.basicConfig(level=logging.DEBUG,
format='[%(levelname)s] time:%(created).8f pid:%(process)d pn:%(processName)-10s tid:%(thread)d tn:%(threadName)-10s fn:%(funcName)-10s %(message)s',
)
#
is_analyze_running = False
sock = None
out = None
# IPMx,y()
X_METER=1.5
Y_METER=1
#
ld = None
#
od = None
if __name__ == '__main__':
main()
print("end server")
| 32.594855 | 169 | 0.487126 |
5d79ef8df5146da3723b7ca2139e31f87b3fe948
| 5,498 |
py
|
Python
|
client.py
|
zackorndorff/revsync
|
17255aebd281edffb3f3330c21cda00039bc51a3
|
[
"MIT"
] | 94 |
2017-05-13T05:39:06.000Z
|
2022-01-11T18:14:54.000Z
|
client.py
|
zackorndorff/revsync
|
17255aebd281edffb3f3330c21cda00039bc51a3
|
[
"MIT"
] | 5 |
2020-06-11T19:09:43.000Z
|
2021-05-01T05:01:55.000Z
|
client.py
|
zackorndorff/revsync
|
17255aebd281edffb3f3330c21cda00039bc51a3
|
[
"MIT"
] | 25 |
2017-05-13T18:15:23.000Z
|
2022-02-03T22:32:41.000Z
|
from collections import defaultdict
import json
import re
import redis
import threading
import time
import traceback
import uuid
import base64
import binascii
TTL = 2
hash_keys = ('cmd', 'user')
cmd_hash_keys = {
'comment': ('addr',),
'extra_comment': ('addr',),
'area_comment': ('addr',),
'rename': ('addr',),
'stackvar_renamed': ('addr', 'offset', 'name',),
'struc_created': ('struc_name', 'is_union',),
'struc_deleted': ('struc_name',),
'struc_renamed': ('old_name', 'new_name',),
'struc_member_created': ('struc_name', 'offset', 'member_name', 'size', 'flag',),
'struc_member_deleted': ('struc_name', 'offset',),
'struc_member_renamed': ('struc_name', 'offset', 'member_name',),
'struc_member_changed': ('struc_name', 'offset', 'size',),
}
key_dec = {
'c': 'cmd',
'a': 'addr',
'u': 'user',
't': 'text',
'i': 'uuid',
'b': 'blocks'
}
key_enc = dict((v, k) for k, v in key_dec.items())
nick_filter = re.compile(r'[^a-zA-Z0-9_\-]')
| 34.797468 | 101 | 0.483812 |
5d7c0c4ed976d906f1c11a70f28b08240f91a61e
| 1,109 |
py
|
Python
|
ontask/condition/urls.py
|
pinheiroo27/ontask_b
|
23fee8caf4e1c5694a710a77f3004ca5d9effeac
|
[
"MIT"
] | 33 |
2017-12-02T04:09:24.000Z
|
2021-11-07T08:41:57.000Z
|
ontask/condition/urls.py
|
pinheiroo27/ontask_b
|
23fee8caf4e1c5694a710a77f3004ca5d9effeac
|
[
"MIT"
] | 189 |
2017-11-16T04:06:29.000Z
|
2022-03-11T23:35:59.000Z
|
ontask/condition/urls.py
|
pinheiroo27/ontask_b
|
23fee8caf4e1c5694a710a77f3004ca5d9effeac
|
[
"MIT"
] | 30 |
2017-11-30T03:35:44.000Z
|
2022-01-31T03:08:08.000Z
|
# -*- coding: utf-8 -*-
"""URLs to manipulate columns."""
from django.urls import path
from ontask.condition import views
app_name = 'condition'
urlpatterns = [
#
# FILTERS
#
path(
'<int:pk>/create_filter/',
views.FilterCreateView.as_view(),
name='create_filter'),
path('<int:pk>/edit_filter/', views.edit_filter, name='edit_filter'),
path('<int:pk>/delete_filter/', views.delete_filter, name='delete_filter'),
#
# CONDITIONS
#
path(
'<int:pk>/create_condition/',
views.ConditionCreateView.as_view(),
name='create_condition'),
path(
'<int:pk>/edit_condition/',
views.edit_condition,
name='edit_condition'),
path(
'<int:pk>/delete_condition/',
views.delete_condition,
name='delete_condition'),
# Clone the condition
path(
'<int:pk>/clone_condition/',
views.clone_condition,
name='clone_condition'),
path(
'<int:pk>/<int:action_pk>/clone_condition/',
views.clone_condition,
name='clone_condition'),
]
| 23.595745 | 79 | 0.598738 |
5d7c6de5b4a074a83b3f637a428933b749ec22a8
| 13,437 |
py
|
Python
|
VideoClassification/SegmentLevelClassifier/model.py
|
googleinterns/via-content-understanding
|
ca12ebe6aa4da16224a8ca86dc45aaaaa7cfda09
|
[
"Apache-2.0"
] | 1 |
2020-05-22T14:51:28.000Z
|
2020-05-22T14:51:28.000Z
|
VideoClassification/SegmentLevelClassifier/model.py
|
googleinterns/via-content-understanding
|
ca12ebe6aa4da16224a8ca86dc45aaaaa7cfda09
|
[
"Apache-2.0"
] | 4 |
2020-05-31T21:57:44.000Z
|
2020-07-23T23:32:52.000Z
|
VideoClassification/SegmentLevelClassifier/model.py
|
googleinterns/via-content-understanding
|
ca12ebe6aa4da16224a8ca86dc45aaaaa7cfda09
|
[
"Apache-2.0"
] | 1 |
2020-05-19T17:28:10.000Z
|
2020-05-19T17:28:10.000Z
|
"""Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Defines the architecture of the Video Classifier.
"""
import math
import tensorflow as tf
| 39.520588 | 172 | 0.723599 |
5d7cd2c4d715d5bc952ad3e374f1d2268aa25788
| 310 |
py
|
Python
|
ivy/functional/backends/jax/old/math.py
|
faruq2021/ivy
|
1b24beadbd673d6a9dd504e037c68547e5640627
|
[
"Apache-2.0"
] | null | null | null |
ivy/functional/backends/jax/old/math.py
|
faruq2021/ivy
|
1b24beadbd673d6a9dd504e037c68547e5640627
|
[
"Apache-2.0"
] | null | null | null |
ivy/functional/backends/jax/old/math.py
|
faruq2021/ivy
|
1b24beadbd673d6a9dd504e037c68547e5640627
|
[
"Apache-2.0"
] | null | null | null |
"""
Collection of Jax math functions, wrapped to fit Ivy syntax and signature.
"""
# global
import jax as _jax
import jax.numpy as _jnp
tan = _jnp.tan
acos = _jnp.arccos
atan = _jnp.arctan
atan2 = _jnp.arctan2
cosh = _jnp.cosh
atanh = _jnp.arctanh
log = _jnp.log
exp = _jnp.exp
erf = _jax.scipy.special.erf
| 16.315789 | 74 | 0.729032 |
5d7ce8af330d95b04a5584c878164fe2af01973b
| 8,279 |
py
|
Python
|
neaten_db.py
|
Adoni/ZhihuCrawler
|
c275192ced3a344d7b93b7cfd3ebf87ed179400d
|
[
"MIT"
] | null | null | null |
neaten_db.py
|
Adoni/ZhihuCrawler
|
c275192ced3a344d7b93b7cfd3ebf87ed179400d
|
[
"MIT"
] | null | null | null |
neaten_db.py
|
Adoni/ZhihuCrawler
|
c275192ced3a344d7b93b7cfd3ebf87ed179400d
|
[
"MIT"
] | null | null | null |
from pymongo import MongoClient
from pyltp import Segmentor
if __name__ == '__main__':
# insert_questions_from_answered_question()
# insert_questions_from_followed_question()
# insert_questions_from_asked_question()
# insert_questions_from_collected_question()
#delete_noise_question()
#remove_enger_inline()
# insert_user_list()
insert_user_follow_user_list()
# insert_user_follow_question_list()
# insert_user_ask_question_list()
# insert_user_collect_question_list()
# insert_user_answer_question_list()
| 39.42381 | 77 | 0.595603 |
5d7d4f2cec08d8e71851176546c2923392d1f51a
| 7,675 |
py
|
Python
|
test/tst_vlen.py
|
timgates42/netcdf4-python
|
d8b1cb11454f9beec674a29904c91f48db608c2c
|
[
"MIT"
] | 574 |
2015-01-16T02:21:19.000Z
|
2022-03-27T14:05:55.000Z
|
test/tst_vlen.py
|
timgates42/netcdf4-python
|
d8b1cb11454f9beec674a29904c91f48db608c2c
|
[
"MIT"
] | 681 |
2015-01-02T20:26:17.000Z
|
2022-03-24T00:59:15.000Z
|
test/tst_vlen.py
|
timgates42/netcdf4-python
|
d8b1cb11454f9beec674a29904c91f48db608c2c
|
[
"MIT"
] | 257 |
2015-01-20T16:42:17.000Z
|
2022-03-29T03:49:37.000Z
|
import sys
import unittest
import os
import tempfile
from netCDF4 import Dataset
import numpy as np
from numpy.testing import assert_array_equal
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
VL_NAME = 'vlen_type'
VL_BASETYPE = np.int16
DIM1_NAME = 'lon'
DIM2_NAME = 'lat'
nlons = 5; nlats = 5
VAR1_NAME = 'ragged'
VAR2_NAME = 'strings'
VAR3_NAME = 'strings_alt'
VAR4_NAME = 'string_scalar'
VAR5_NAME = 'vlen_scalar'
data = np.empty(nlats*nlons,object)
datas = np.empty(nlats*nlons,object)
nn = 0
for n in range(nlats*nlons):
nn = nn + 1
data[n] = np.arange(nn,dtype=VL_BASETYPE)
datas[n] = ''.join([chr(i) for i in range(97,97+nn+1)])
data = np.reshape(data,(nlats,nlons))
datas = np.reshape(datas,(nlats,nlons))
if __name__ == '__main__':
unittest.main()
| 33.515284 | 86 | 0.580456 |
5d7e286fce65b02bbb505a551034d0638886042d
| 2,764 |
py
|
Python
|
sonnet/src/once.py
|
ScriptBox99/deepmind-sonnet
|
5cbfdc356962d9b6198d5b63f0826a80acfdf35b
|
[
"Apache-2.0"
] | 10,287 |
2017-04-07T12:33:37.000Z
|
2022-03-30T03:32:16.000Z
|
sonnet/src/once.py
|
ScriptBox99/deepmind-sonnet
|
5cbfdc356962d9b6198d5b63f0826a80acfdf35b
|
[
"Apache-2.0"
] | 209 |
2017-04-07T15:57:11.000Z
|
2022-03-27T10:43:03.000Z
|
sonnet/src/once.py
|
ScriptBox99/deepmind-sonnet
|
5cbfdc356962d9b6198d5b63f0826a80acfdf35b
|
[
"Apache-2.0"
] | 1,563 |
2017-04-07T13:15:06.000Z
|
2022-03-29T15:26:04.000Z
|
# Copyright 2019 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utility to run functions and methods once."""
import uuid
from sonnet.src import utils
_ONCE_PROPERTY = "_snt_once"
def once(f):
"""Decorator which ensures a wrapped method is only ever run once.
>>> @snt.once
... def f():
... print('Hello, world!')
>>> f()
Hello, world!
>>> f()
>>> f()
If `f` is a method then it will be evaluated once per instance:
>>> class MyObject:
... @snt.once
... def f(self):
... print('Hello, world!')
>>> o = MyObject()
>>> o.f()
Hello, world!
>>> o.f()
>>> o2 = MyObject()
>>> o2.f()
Hello, world!
>>> o.f()
>>> o2.f()
If an error is raised during execution of `f` it will be raised to the user.
Next time the method is run, it will be treated as not having run before.
Args:
f: A function to wrap which should only be called once.
Returns:
Wrapped version of `f` which will only evaluate `f` the first time it is
called.
"""
# TODO(tomhennigan) Perhaps some more human friendly identifier?
once_id = uuid.uuid4()
wrapper.seen_none = False
decorated = wrapper(f) # pylint: disable=no-value-for-parameter,assignment-from-none
decorated.__snt_once_wrapped__ = f
return decorated
| 28.494845 | 87 | 0.634949 |
5d7e5adfacc7c05430120bb4ddda519a5d8edcca
| 9,774 |
py
|
Python
|
env.py
|
DGarciaMedina/PiArmDiego
|
cb4664796aa99b0717145f9e4889bfba5190059f
|
[
"MIT"
] | null | null | null |
env.py
|
DGarciaMedina/PiArmDiego
|
cb4664796aa99b0717145f9e4889bfba5190059f
|
[
"MIT"
] | null | null | null |
env.py
|
DGarciaMedina/PiArmDiego
|
cb4664796aa99b0717145f9e4889bfba5190059f
|
[
"MIT"
] | null | null | null |
import piarm
import time
import numpy as np
import cv2
import random
| 32.151316 | 137 | 0.563024 |
5d7e67724340bd64fb013a94027277d42f8215af
| 10,105 |
py
|
Python
|
src/python/src/grpc/_adapter/_links_test.py
|
jonywtf/grpc
|
124f3c5a4b65bb88f13be7c68482eb83d945ad02
|
[
"BSD-3-Clause"
] | 1 |
2022-01-14T04:25:01.000Z
|
2022-01-14T04:25:01.000Z
|
src/python/src/grpc/_adapter/_links_test.py
|
jonywtf/grpc
|
124f3c5a4b65bb88f13be7c68482eb83d945ad02
|
[
"BSD-3-Clause"
] | null | null | null |
src/python/src/grpc/_adapter/_links_test.py
|
jonywtf/grpc
|
124f3c5a4b65bb88f13be7c68482eb83d945ad02
|
[
"BSD-3-Clause"
] | 1 |
2022-01-14T04:25:02.000Z
|
2022-01-14T04:25:02.000Z
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test of the GRPC-backed ForeLink and RearLink."""
import threading
import unittest
from grpc._adapter import _proto_scenarios
from grpc._adapter import _test_links
from grpc._adapter import fore
from grpc._adapter import rear
from grpc.framework.base import interfaces
from grpc.framework.base.packets import packets as tickets
from grpc.framework.foundation import logging_pool
_IDENTITY = lambda x: x
_TIMEOUT = 2
if __name__ == '__main__':
unittest.main()
| 40.42 | 80 | 0.740129 |
5d7f67f5f4955fd254f6885d027ea817e96c1e91
| 620 |
py
|
Python
|
tests/_test_progress_board.py
|
stjordanis/Hyperactive
|
5acf247d8023ff6761593b9d0954bdd912d20aed
|
[
"MIT"
] | 382 |
2019-07-16T13:30:15.000Z
|
2022-03-30T22:29:07.000Z
|
tests/_test_progress_board.py
|
stjordanis/Hyperactive
|
5acf247d8023ff6761593b9d0954bdd912d20aed
|
[
"MIT"
] | 46 |
2019-08-27T18:07:47.000Z
|
2022-03-16T16:28:10.000Z
|
tests/_test_progress_board.py
|
stjordanis/Hyperactive
|
5acf247d8023ff6761593b9d0954bdd912d20aed
|
[
"MIT"
] | 35 |
2019-08-03T00:51:09.000Z
|
2021-12-03T19:06:07.000Z
|
import os, glob
import subprocess
from subprocess import DEVNULL, STDOUT
abspath = os.path.abspath(__file__)
dir_ = os.path.dirname(abspath)
files = glob.glob(dir_ + "/_progress_board_tests/_test_progress_board_*.py")
for file_path in files:
file_name = str(file_path.rsplit("/", maxsplit=1)[1])
try:
print("\033[0;33;40m Testing", file_name, end="...\r")
subprocess.check_call(["pytest", file_path], stdout=DEVNULL, stderr=STDOUT)
except subprocess.CalledProcessError:
print("\033[0;31;40m Error in", file_name)
else:
print("\033[0;32;40m", file_name, "is correct")
| 29.52381 | 83 | 0.68871 |
5d7f92bfcfd4bf66faaae6ec55c26244db6de591
| 8,695 |
py
|
Python
|
pages/forest_pages.py
|
jhalljhall/beiwe-backend
|
06d28926a2830c7ad53c32ec41ff49320932aeed
|
[
"BSD-3-Clause"
] | 1 |
2022-03-09T03:20:37.000Z
|
2022-03-09T03:20:37.000Z
|
pages/forest_pages.py
|
jhalljhall/beiwe-backend
|
06d28926a2830c7ad53c32ec41ff49320932aeed
|
[
"BSD-3-Clause"
] | null | null | null |
pages/forest_pages.py
|
jhalljhall/beiwe-backend
|
06d28926a2830c7ad53c32ec41ff49320932aeed
|
[
"BSD-3-Clause"
] | null | null | null |
import csv
import datetime
from collections import defaultdict
from django.contrib import messages
from django.http.response import FileResponse
from django.shortcuts import redirect, render
from django.utils import timezone
from django.views.decorators.http import require_GET, require_http_methods, require_POST
from authentication.admin_authentication import (authenticate_admin,
authenticate_researcher_study_access, forest_enabled)
from constants.data_access_api_constants import CHUNK_FIELDS
from constants.forest_constants import ForestTaskStatus, ForestTree
from database.data_access_models import ChunkRegistry
from database.study_models import Study
from database.tableau_api_models import ForestTask
from database.user_models import Participant
from forms.django_forms import CreateTasksForm
from libs.http_utils import easy_url
from libs.internal_types import ParticipantQuerySet, ResearcherRequest
from libs.streaming_zip import zip_generator
from libs.utils.date_utils import daterange
from middleware.abort_middleware import abort
from serializers.forest_serializers import ForestTaskCsvSerializer, ForestTaskSerializer
def stream_forest_task_log_csv(forest_tasks):
buffer = CSVBuffer()
writer = csv.DictWriter(buffer, fieldnames=ForestTaskCsvSerializer.Meta.fields)
writer.writeheader()
yield buffer.read()
for forest_task in forest_tasks:
writer.writerow(ForestTaskCsvSerializer(forest_task).data)
yield buffer.read()
| 36.533613 | 105 | 0.702588 |
5d7fb293f532babd1479825528080b1664689540
| 6,138 |
py
|
Python
|
data_scout/transformations/math_custom.py
|
janthiemen/data_scout
|
6366eedfb20ed429bc96100de4dd2c7409e5dd88
|
[
"MIT"
] | null | null | null |
data_scout/transformations/math_custom.py
|
janthiemen/data_scout
|
6366eedfb20ed429bc96100de4dd2c7409e5dd88
|
[
"MIT"
] | null | null | null |
data_scout/transformations/math_custom.py
|
janthiemen/data_scout
|
6366eedfb20ed429bc96100de4dd2c7409e5dd88
|
[
"MIT"
] | null | null | null |
from __future__ import division
from .transformation import Transformation
from pyparsing import (Literal, CaselessLiteral, Word, Combine, Group, Optional,
ZeroOrMore, Forward, nums, alphas, oneOf)
import math
import re
import operator
__author__ = 'Paul McGuire'
__version__ = '$Revision: 0.0 $'
__date__ = '$Date: 2009-03-20 $'
__source__ = '''http://pyparsing.wikispaces.com/file/view/fourFn.py
http://pyparsing.wikispaces.com/message/view/home/15549426
'''
__note__ = '''
All I've done is rewrap Paul McGuire's fourFn.py as a class, so I can use it
more easily in other places.
'''
| 37.2 | 144 | 0.515477 |
5d8266dee4d58a4d6dfa39e78c02bb8ed2be2717
| 6,261 |
py
|
Python
|
project/cloudmesh-storage/cloudmesh/vdir/api/manager.py
|
cybertraining-dsc/fa19-516-171
|
1dba8cde09f7b05c80557ea7ae462161c590568b
|
[
"Apache-2.0"
] | null | null | null |
project/cloudmesh-storage/cloudmesh/vdir/api/manager.py
|
cybertraining-dsc/fa19-516-171
|
1dba8cde09f7b05c80557ea7ae462161c590568b
|
[
"Apache-2.0"
] | 2 |
2019-12-02T03:11:42.000Z
|
2021-02-08T20:37:15.000Z
|
project/cloudmesh-storage/cloudmesh/vdir/api/manager.py
|
cybertraining-dsc/fa19-516-171
|
1dba8cde09f7b05c80557ea7ae462161c590568b
|
[
"Apache-2.0"
] | 2 |
2019-09-10T00:56:11.000Z
|
2020-05-05T02:54:31.000Z
|
#
# this manager stores directly into the db wit Database update
from cloudmesh.mongo.DataBaseDecorator import DatabaseUpdate
from cloudmesh.mongo.CmDatabase import CmDatabase
from cloudmesh.common.console import Console
from cloudmesh.storage.Provider import Provider
import os
from datetime import datetime
| 40.921569 | 119 | 0.50008 |
5d830daa46eeb8bf91aea2f52e4b5f3c2d74b15e
| 2,605 |
py
|
Python
|
redash/query_runner/influx_db.py
|
cjpit/redash
|
27aafdb07e3a427da8f88d55a0c0d7cc64379da2
|
[
"BSD-2-Clause"
] | 1 |
2018-09-13T13:50:17.000Z
|
2018-09-13T13:50:17.000Z
|
redash/query_runner/influx_db.py
|
cjpit/redash
|
27aafdb07e3a427da8f88d55a0c0d7cc64379da2
|
[
"BSD-2-Clause"
] | null | null | null |
redash/query_runner/influx_db.py
|
cjpit/redash
|
27aafdb07e3a427da8f88d55a0c0d7cc64379da2
|
[
"BSD-2-Clause"
] | 1 |
2018-10-25T12:09:32.000Z
|
2018-10-25T12:09:32.000Z
|
import json
import logging
from redash.query_runner import *
from redash.utils import JSONEncoder
logger = logging.getLogger(__name__)
try:
from influxdb import InfluxDBClusterClient
enabled = True
except ImportError:
enabled = False
register(InfluxDB)
| 26.313131 | 74 | 0.542035 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.