hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
11 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
251
max_stars_repo_name
stringlengths
4
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
251
max_issues_repo_name
stringlengths
4
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
251
max_forks_repo_name
stringlengths
4
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.05M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.04M
alphanum_fraction
float64
0
1
5dcda9f4b87b5d8b72500b6efa77e38a5d14806f
1,438
py
Python
tests/transformations/local_storage_test.py
am-ivanov/dace
c35f0b3cecc04a2c9fb668bd42a72045891e7a42
[ "BSD-3-Clause" ]
1
2021-09-13T06:36:18.000Z
2021-09-13T06:36:18.000Z
tests/transformations/local_storage_test.py
1C4nfaN/dace
4d65e0951c112160fe783766404a806b6043b521
[ "BSD-3-Clause" ]
null
null
null
tests/transformations/local_storage_test.py
1C4nfaN/dace
4d65e0951c112160fe783766404a806b6043b521
[ "BSD-3-Clause" ]
null
null
null
import unittest import dace import numpy as np from dace.transformation.dataflow import MapTiling, OutLocalStorage N = dace.symbol('N') if __name__ == '__main__': unittest.main()
30.595745
74
0.535466
5dce8eb43814f4b1a92f8e04cfdb8ab66b1647ad
7,705
py
Python
astropy/io/fits/hdu/streaming.py
jayvdb/astropy
bc6d8f106dd5b60bf57a8e6e29c4e2ae2178991f
[ "BSD-3-Clause" ]
445
2019-01-26T13:50:26.000Z
2022-03-18T05:17:38.000Z
astropy/io/fits/hdu/streaming.py
jayvdb/astropy
bc6d8f106dd5b60bf57a8e6e29c4e2ae2178991f
[ "BSD-3-Clause" ]
242
2019-01-29T15:48:27.000Z
2022-03-31T22:09:21.000Z
astropy/io/fits/hdu/streaming.py
jayvdb/astropy
bc6d8f106dd5b60bf57a8e6e29c4e2ae2178991f
[ "BSD-3-Clause" ]
31
2019-03-10T09:51:27.000Z
2022-02-14T23:11:12.000Z
# Licensed under a 3-clause BSD style license - see PYFITS.rst import gzip import os from .base import _BaseHDU, BITPIX2DTYPE from .hdulist import HDUList from .image import PrimaryHDU from astropy.io.fits.file import _File from astropy.io.fits.header import _pad_length from astropy.io.fits.util import fileobj_name
33.5
79
0.573134
5dce95b004d795178936b1032e10425b07f77812
3,815
py
Python
geoprisma/tests/test_templatetags.py
groupe-conseil-nutshimit-nippour/django-geoprisma
4732fdb8a0684eb4d7fd50aa43e11b454ee71d08
[ "BSD-3-Clause" ]
null
null
null
geoprisma/tests/test_templatetags.py
groupe-conseil-nutshimit-nippour/django-geoprisma
4732fdb8a0684eb4d7fd50aa43e11b454ee71d08
[ "BSD-3-Clause" ]
5
2020-02-12T00:23:17.000Z
2021-12-13T19:46:33.000Z
geoprisma/tests/test_templatetags.py
groupe-conseil-nutshimit-nippour/django-geoprisma
4732fdb8a0684eb4d7fd50aa43e11b454ee71d08
[ "BSD-3-Clause" ]
null
null
null
import django from django.test import TestCase from django.template import Template, Context def render(template_string, context_dict=None): """ A shortcut for testing template output. """ if context_dict is None: context_dict = {} c = Context(context_dict) t = Template(template_string) return t.render(c).strip()
28.901515
127
0.550459
5dceeb675241617c8282ee5a28736fe976ad2fa2
4,447
py
Python
src/ggrc_workflows/models/task_group.py
acidburn0zzz/ggrc-core
386781d08172102eb51030b65db8212974651628
[ "ECL-2.0", "Apache-2.0" ]
1
2016-11-06T05:21:24.000Z
2016-11-06T05:21:24.000Z
src/ggrc_workflows/models/task_group.py
acidburn0zzz/ggrc-core
386781d08172102eb51030b65db8212974651628
[ "ECL-2.0", "Apache-2.0" ]
2
2021-02-02T23:09:40.000Z
2021-02-08T21:00:48.000Z
src/ggrc_workflows/models/task_group.py
Acidburn0zzz/ggrc-core
386781d08172102eb51030b65db8212974651628
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
# Copyright (C) 2016 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> """A module containing the workflow TaskGroup model.""" from sqlalchemy import or_ from ggrc import db from ggrc.login import get_current_user from ggrc.models.associationproxy import association_proxy from ggrc.models.mixins import ( Titled, Slugged, Described, Timeboxed, WithContact ) from ggrc.models.reflection import AttributeInfo from ggrc.models.reflection import PublishOnly from ggrc.models import all_models from ggrc_workflows.models.task_group_object import TaskGroupObject
29.256579
78
0.659996
5dcf0b13e0d53d6745a01c7cc15df8b5de13bc88
1,248
py
Python
src/tests/app_functions/menu/test_change_auto_login.py
DanielNoord/DuolingoPomodoro
307b386daf3216fb9ba86f983f0e39f6647ffd64
[ "MIT" ]
null
null
null
src/tests/app_functions/menu/test_change_auto_login.py
DanielNoord/DuolingoPomodoro
307b386daf3216fb9ba86f983f0e39f6647ffd64
[ "MIT" ]
4
2021-04-25T15:39:32.000Z
2022-02-18T20:58:00.000Z
src/tests/app_functions/menu/test_change_auto_login.py
DanielNoord/DuolingoPomodoro
307b386daf3216fb9ba86f983f0e39f6647ffd64
[ "MIT" ]
null
null
null
import pytest import rumps from src.app_functions.menu.change_auto_login import change_auto_login def test_setting_is_true(mocker, basic_app): """Check if setting is changed correctly if True""" basic_app.settings["auto_login"] = True mock_function = mocker.patch("src.app_functions.menu.change_auto_login.update_menu") mocker.patch("src.app_functions.menu.change_auto_login.save_settings") change_auto_login(basic_app) assert basic_app.settings["auto_login"] is False mock_function.assert_called_once_with(basic_app) def test_setting_is_false(mocker, basic_app): """Check if setting is changed correctly if false""" basic_app.settings["auto_login"] = False mock_function = mocker.patch("src.app_functions.menu.change_auto_login.update_menu") mocker.patch("src.app_functions.menu.change_auto_login.save_settings") change_auto_login(basic_app) assert basic_app.settings["auto_login"] is True mock_function.assert_called_once_with(basic_app)
34.666667
88
0.758814
5dcf455584ab00f2818650ba6fb4636dff7442e6
3,105
py
Python
deepobs/tensorflow/testproblems/cifar100_vgg19.py
H0merJayS1mpson/deepobscustom
e85816ce42466326dac18841c58b79f87a4a1a7c
[ "MIT" ]
null
null
null
deepobs/tensorflow/testproblems/cifar100_vgg19.py
H0merJayS1mpson/deepobscustom
e85816ce42466326dac18841c58b79f87a4a1a7c
[ "MIT" ]
null
null
null
deepobs/tensorflow/testproblems/cifar100_vgg19.py
H0merJayS1mpson/deepobscustom
e85816ce42466326dac18841c58b79f87a4a1a7c
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """VGG 19 architecture for CIFAR-100.""" import tensorflow as tf from ._vgg import _vgg from ..datasets.cifar100 import cifar100 from .testproblem import TestProblem
37.865854
85
0.679549
5dcfe247dd1cc19b83a077ac143e29f6729063b0
192
py
Python
write-a-function.py
TheHumanGoogle/Hackerrank-python-solution
ab2fa515444d7493340d7c7fbb88c3a090a3a8f5
[ "MIT" ]
1
2022-01-12T16:05:01.000Z
2022-01-12T16:05:01.000Z
write-a-function.py
TheHumanGoogle/Hackerrank-python-solution
ab2fa515444d7493340d7c7fbb88c3a090a3a8f5
[ "MIT" ]
null
null
null
write-a-function.py
TheHumanGoogle/Hackerrank-python-solution
ab2fa515444d7493340d7c7fbb88c3a090a3a8f5
[ "MIT" ]
null
null
null
year = int(input())
16
35
0.546875
5dcfe5f1b4cd41078d4a64e401536ccb2333c29f
1,827
py
Python
shortio/utils.py
byshyk/shortio
054014b3936495c86d2e2cd6a61c3cee9ab9b0f2
[ "MIT" ]
null
null
null
shortio/utils.py
byshyk/shortio
054014b3936495c86d2e2cd6a61c3cee9ab9b0f2
[ "MIT" ]
null
null
null
shortio/utils.py
byshyk/shortio
054014b3936495c86d2e2cd6a61c3cee9ab9b0f2
[ "MIT" ]
null
null
null
"""Contains utility functions.""" BIN_MODE_ARGS = {'mode', 'buffering', } TEXT_MODE_ARGS = {'mode', 'buffering', 'encoding', 'errors', 'newline'} def split_args(args): """Splits args into two groups: open args and other args. Open args are used by ``open`` function. Other args are used by ``load``/``dump`` functions. Args: args: Keyword args to split. Returns: open_args: Arguments for ``open``. other_args: Arguments for ``load``/``dump``. """ mode_args = BIN_MODE_ARGS if 'b' in args['mode'] else TEXT_MODE_ARGS open_args = {} other_args = {} for arg, value in args.items(): if arg in mode_args: open_args[arg] = value else: other_args[arg] = value return open_args, other_args def read_wrapper(load, **base_kwargs): """Wraps ``load`` function to avoid context manager boilerplate. Args: load: Function that takes the return of ``open``. **base_kwargs: Base arguments that ``open``/``load`` take. Returns: Wrapper for ``load``. """ return wrapped def write_wrapper(dump, **base_kwargs): """Wraps ``dump`` function to avoid context manager boilerplate. Args: dump: Function that takes the return of ``open`` and data to dump. **base_kwargs: Base arguments that ``open``/``dump`` take. Returns: Wrapper for ``dump``. """ return wrapped
26.478261
74
0.603175
5dd0559b06c4b507ddd6a8e8abd9d084e5c41c75
3,483
py
Python
paasta_tools/async_utils.py
sobolevn/paasta
8b87e0b13816c09b3d063b6d3271e6c7627fd264
[ "Apache-2.0" ]
1,711
2015-11-10T18:04:56.000Z
2022-03-23T08:53:16.000Z
paasta_tools/async_utils.py
sobolevn/paasta
8b87e0b13816c09b3d063b6d3271e6c7627fd264
[ "Apache-2.0" ]
1,689
2015-11-10T17:59:04.000Z
2022-03-31T20:46:46.000Z
paasta_tools/async_utils.py
sobolevn/paasta
8b87e0b13816c09b3d063b6d3271e6c7627fd264
[ "Apache-2.0" ]
267
2015-11-10T19:17:16.000Z
2022-02-08T20:59:52.000Z
import asyncio import functools import time import weakref from collections import defaultdict from typing import AsyncIterable from typing import Awaitable from typing import Callable from typing import Dict from typing import List from typing import Optional from typing import TypeVar T = TypeVar("T") # NOTE: this method is not thread-safe due to lack of locking while checking # and updating the cache
32.858491
106
0.611829
5dd208f2225a11d0691db8c3c2975ede5f79f7f1
3,470
py
Python
util/dataset.py
MTI830PyTraders/pytrade
33ea3e756019c999e9c3d78fca89cd72addf6ab2
[ "BSD-3-Clause" ]
3
2017-03-08T15:42:26.000Z
2021-03-10T23:47:15.000Z
util/dataset.py
fraka6/pytrade
8a94b6e1b3922dcba95067c03abbf45975878b33
[ "BSD-3-Clause" ]
15
2015-05-20T03:11:58.000Z
2018-03-30T23:42:18.000Z
util/dataset.py
MTI830PyTraders/pytrade
33ea3e756019c999e9c3d78fca89cd72addf6ab2
[ "BSD-3-Clause" ]
7
2016-04-12T09:49:22.000Z
2021-03-10T23:47:19.000Z
#!/usr/bin/python ''' generate dataset ''' import csv import argparse import numpy as np import sklearn.metrics import theanets from sklearn.metrics import accuracy_score import logging from trendStrategy import OptTrendStrategy, TrendStrategy from util import visu def load_dataset(stock, ratio=0.8, name=OptTrendStrategy.__name__): ''' return train, valid (x,y) ''' orders = np.loadtxt("{0}_{1}_orders.csv".format(stock, name), usecols=[1], delimiter=',') orders[orders==-1]=0 features = np.loadtxt("{0}_input.csv".format(stock), delimiter=',') if len(orders)!=len(features): logging.error("len(orders)!=len(features) -> %s!=%s" %(len(orders),len(features))) features = features.astype('f') orders = orders.astype('i') pos = round(len(features)*ratio) train = (features[:pos], orders[:pos]) valid = (features[pos:], orders[pos:]) return train, valid if __name__ == "__main__": parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--stock', '-s', default="TSLA", help='stock') parser.add_argument('--ratio', '-r', default=0.8, type=int, help='train/valid ratio') parser.add_argument('--min', '-m', default=0.001, type=int, help='min improvement (stop learning)') parser.add_argument('--field', default='orders', help='compare field') args = parser.parse_args() if args.field: compare(args.stock, args.field) train, valid = load_dataset(args.stock) exp = train_strategy(args.stock, args.ratio, args.min) exp = load_strategy(args.stock, True)
35.408163
103
0.653602
5dd235954e00e3353720380ad5e4fd1579960a8d
3,788
py
Python
examples/scripts/sc/bpdn.py
manvhah/sporco
9237d7fc37e75089a2a65ebfe02b7491410da7d4
[ "BSD-3-Clause" ]
null
null
null
examples/scripts/sc/bpdn.py
manvhah/sporco
9237d7fc37e75089a2a65ebfe02b7491410da7d4
[ "BSD-3-Clause" ]
null
null
null
examples/scripts/sc/bpdn.py
manvhah/sporco
9237d7fc37e75089a2a65ebfe02b7491410da7d4
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # This file is part of the SPORCO package. Details of the copyright # and user license can be found in the 'LICENSE.txt' file distributed # with the package. """ Basis Pursuit DeNoising ======================= This example demonstrates the use of class :class:`.admm.bpdn.BPDN` to solve the Basis Pursuit DeNoising (BPDN) problem :cite:`chen-1998-atomic` $$\mathrm{argmin}_\mathbf{x} \; (1/2) \| D \mathbf{x} - \mathbf{s} \|_2^2 + \lambda \| \mathbf{x} \|_1 \;,$$ where $D$ is the dictionary, $\mathbf{x}$ is the sparse representation, and $\mathbf{s}$ is the signal to be represented. In this example the BPDN problem is used to estimate the reference sparse representation that generated a signal from a noisy version of the signal. """ from __future__ import print_function from builtins import input import numpy as np from sporco.admm import bpdn from sporco import util from sporco import plot """ Configure problem size, sparsity, and noise level. """ N = 512 # Signal size M = 4*N # Dictionary size L = 32 # Number of non-zero coefficients in generator sigma = 0.5 # Noise level """ Construct random dictionary, reference random sparse representation, and test signal consisting of the synthesis of the reference sparse representation with additive Gaussian noise. """ # Construct random dictionary and random sparse coefficients np.random.seed(12345) D = np.random.randn(N, M) x0 = np.zeros((M, 1)) si = np.random.permutation(list(range(0, M-1))) x0[si[0:L]] = np.random.randn(L, 1) # Construct reference and noisy signal s0 = D.dot(x0) s = s0 + sigma*np.random.randn(N,1) """ Set BPDN solver class options. """ opt = bpdn.BPDN.Options({'Verbose': False, 'MaxMainIter': 500, 'RelStopTol': 1e-3, 'AutoRho': {'RsdlTarget': 1.0}}) """ Select regularization parameter $\lambda$ by evaluating the error in recovering the sparse representation over a logarithmicaly spaced grid. (The reference representation is assumed to be known, which is not realistic in a real application.) A function is defined that evalues the BPDN recovery error for a specified $\lambda$, and this function is evaluated in parallel by :func:`sporco.util.grid_search`. """ # Function computing reconstruction error at lmbda # Parallel evalution of error function on lmbda grid lrng = np.logspace(1, 2, 20) sprm, sfvl, fvmx, sidx = util.grid_search(evalerr, (lrng,)) lmbda = sprm[0] print('Minimum 1 error: %5.2f at = %.2e' % (sfvl, lmbda)) """ Once the best $\lambda$ has been determined, run BPDN with verbose display of ADMM iteration statistics. """ # Initialise and run BPDN object for best lmbda opt['Verbose'] = True b = bpdn.BPDN(D, s, lmbda, opt) x = b.solve() print("BPDN solve time: %.2fs" % b.timer.elapsed('solve')) """ Plot comparison of reference and recovered representations. """ plot.plot(np.hstack((x0, x)), title='Sparse representation', lgnd=['Reference', 'Reconstructed']) """ Plot lmbda error curve, functional value, residuals, and rho """ its = b.getitstat() fig = plot.figure(figsize=(15, 10)) plot.subplot(2, 2, 1) plot.plot(fvmx, x=lrng, ptyp='semilogx', xlbl='$\lambda$', ylbl='Error', fig=fig) plot.subplot(2, 2, 2) plot.plot(its.ObjFun, xlbl='Iterations', ylbl='Functional', fig=fig) plot.subplot(2, 2, 3) plot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T, ptyp='semilogy', xlbl='Iterations', ylbl='Residual', lgnd=['Primal', 'Dual'], fig=fig) plot.subplot(2, 2, 4) plot.plot(its.Rho, xlbl='Iterations', ylbl='Penalty Parameter', fig=fig) fig.show() # Wait for enter on keyboard input()
30.063492
406
0.694298
5dd288bce128d196a30c7168a6af79b6e365abd9
11,995
py
Python
saleor-env/lib/python3.7/site-packages/snowballstemmer/nepali_stemmer.py
tadartefactorist/mask
7967dd4ad39e3d26ac516719faefb40e00a8cbff
[ "BSD-3-Clause" ]
null
null
null
saleor-env/lib/python3.7/site-packages/snowballstemmer/nepali_stemmer.py
tadartefactorist/mask
7967dd4ad39e3d26ac516719faefb40e00a8cbff
[ "BSD-3-Clause" ]
1
2021-06-01T23:55:30.000Z
2021-06-01T23:55:30.000Z
venv/lib/python2.7/site-packages/snowballstemmer/nepali_stemmer.py
tvek/DatasciencePythonInitBase
e578b4a3026b55bc2935b200453e511f1731c75e
[ "MIT" ]
null
null
null
# This file was generated automatically by the Snowball to Python compiler # http://snowballstem.org/ from .basestemmer import BaseStemmer from .among import Among
34.970845
75
0.469362
5dd337ba7906e3c3c7b8bae81a44d4305edc633f
1,361
py
Python
tests/auto_test_class_creation_spec.py
MountainField/uspec
a4f8908b1a3af519d9d2ce7b85a4b4cca7b85883
[ "MIT" ]
2
2020-03-02T01:58:05.000Z
2022-01-25T08:44:40.000Z
tests/auto_test_class_creation_spec.py
MountainField/uspec
a4f8908b1a3af519d9d2ce7b85a4b4cca7b85883
[ "MIT" ]
null
null
null
tests/auto_test_class_creation_spec.py
MountainField/uspec
a4f8908b1a3af519d9d2ce7b85a4b4cca7b85883
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # ================================================================= # uspec # # Copyright (c) 2020 Takahide Nogayama # # This software is released under the MIT License. # http://opensource.org/licenses/mit-license.php # ================================================================= from __future__ import unicode_literals, print_function, division import unittest import uspec from uspec import describe, context, it ################################### with describe("Game", test_class=TestGame): assert test_class is TestGame assert TestGame is not None ################################## TEST_CLASS_NAME_GAME2 = None with describe("Game2"): TEST_CLASS_NAME_GAME2 = test_class.__name__ assert TEST_CLASS_NAME_GAME2 in globals() ################################## wrap() assert TEST_CLASS_NAME_GAME3 in globals() if __name__ == '__main__': import unittest unittest.main(verbosity=2)
20.621212
67
0.556209
5dd4998614beb1247cc3bb983c52f0476fab9cb0
495
py
Python
main.py
Matthewk01/Snake-AI
d5f211334436676966f17bb6dbfea8aba61ee6b4
[ "MIT" ]
null
null
null
main.py
Matthewk01/Snake-AI
d5f211334436676966f17bb6dbfea8aba61ee6b4
[ "MIT" ]
null
null
null
main.py
Matthewk01/Snake-AI
d5f211334436676966f17bb6dbfea8aba61ee6b4
[ "MIT" ]
null
null
null
import pygame from game.game_logic.game import Game import matplotlib.pyplot as plt if __name__ == "__main__": main()
20.625
56
0.628283
5dd4d65be6fbb2b5be1a2991fade5b69cc8efed5
792
py
Python
closed/Intel/code/resnet50/openvino-cpu/src/tools/create_image_list.py
ctuning/inference_results_v1.1
d9176eca28fcf6d7a05ccb97994362a76a1eb5ab
[ "Apache-2.0" ]
19
2020-10-26T17:37:22.000Z
2022-01-20T09:32:38.000Z
closed/Intel/code/resnet50/openvino-cpu/src/tools/create_image_list.py
ctuning/inference_results_v1.1
d9176eca28fcf6d7a05ccb97994362a76a1eb5ab
[ "Apache-2.0" ]
24
2021-07-19T01:09:35.000Z
2022-03-17T11:44:02.000Z
closed/Intel/code/resnet50/openvino-cpu/src/tools/create_image_list.py
ctuning/inference_results_v1.1
d9176eca28fcf6d7a05ccb97994362a76a1eb5ab
[ "Apache-2.0" ]
19
2020-10-21T19:15:17.000Z
2022-01-04T08:32:08.000Z
import os import sys from glob import glob if __name__=="__main__": main()
22.628571
64
0.582071
5dd5c073bdc1758efc5e43f31738feb8fc1ef917
4,434
py
Python
AI/others/churn/churn_2.py
honchardev/Fun
ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc
[ "MIT" ]
null
null
null
AI/others/churn/churn_2.py
honchardev/Fun
ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc
[ "MIT" ]
3
2020-03-24T16:26:35.000Z
2020-04-15T19:40:41.000Z
AI/others/churn/churn_2.py
honchardev/Fun
ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc
[ "MIT" ]
null
null
null
#!/usr/bin/env python # coding: utf-8 # In[1]: # src: http://datareview.info/article/prognozirovanie-ottoka-klientov-so-scikit-learn/ # In[ ]: # -, # # . # , # , # ( 5 20 ). # : # 1. , # , # 2. , # , # . # 3. A , # , . # , # , , # , , , # . # In[ ]: # datset src: https://raw.githubusercontent.com/michaelulin/churn/master/work/churn_model/data/churn.csv # In[88]: # Load libraries import matplotlib.pyplot as plt get_ipython().run_line_magic('matplotlib', 'inline') import pandas as pd import numpy as np from sklearn.preprocessing import StandardScaler from sklearn.metrics import accuracy_score, confusion_matrix, precision_recall_fscore_support from sklearn.model_selection import KFold, train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.neighbors import KNeighborsClassifier # In[3]: # Load dataset raw_churn_df = pd.read_csv('churn.csv') # In[17]: display(raw_churn_df.shape) display(raw_churn_df.head(), raw_churn_df.tail()) display(raw_churn_df.columns.values) display(raw_churn_df.dtypes) display(raw_churn_df.isnull().sum()) # In[78]: # Isolate target data y = raw_churn_df['Churn?'] X = raw_churn_df.drop('Churn?', axis=1) # In[79]: # Drop irrelevant features features_to_drop = ['State', 'Area Code', 'Phone'] X = X.drop(features_to_drop, axis=1) # In[80]: # Encode yes/no with 1/0 values X["Int'l Plan"] = X["Int'l Plan"].map({'no': 0, 'yes': 1}) X["VMail Plan"] = X["VMail Plan"].map({'no': 0, 'yes': 1}) # In[81]: # Scale everything std_scaler = StandardScaler(with_mean=True) X = std_scaler.fit_transform(X) display(X.shape) # In[90]: # Perform CV for SVM, random forest and kNN try_clf(X, y, SVC(gamma='scale')) try_clf(X, y, RandomForestClassifier(n_estimators=100, n_jobs=-1)) try_clf(X, y, KNeighborsClassifier()) # std scaler with_mean=False accuracies: # 0.9256594724220624 # 0.9484412470023981 # 0.8896882494004796 # std scaler with_mean=True accuracies: # 0.9256594724220624 # 0.9496402877697842 # 0.8896882494004796 # In[86]: # Recall # # ? # Precision # # ? # In[101]: # # Predict probabilities # def try_probab(X, y, clf_nofit): # X_tr, X_val, y_tr, y_val = train_test_split(X, y, random_state=42) # clf = clf_nofit.fit(X_tr, y_tr) # y_prob = clf.predict_proba(X_val) # # for i in range(len(X)): # # display("y_true={0}, Predicted={1}".format(y[i], y_prob[i])) # display(pd.value_counts(y_prob[:, 1])) # try_probab(X, y, SVC(gamma='scale', probability=True)) # # try_probab(X, y, RandomForestClassifier(n_estimators=100, n_jobs=-1)) # # try_probab(X, y, KNeighborsClassifier()) # # for i in range(len(Xnew)): # # print("X=%s, Predicted=%s" % (Xnew[i], ynew[i])) # In[ ]: # todo: calibration and discrimination # https://github.com/ghuiber/churn/blob/master/churn_measurements.py # from churn_measurements import calibration, discrimination
21.735294
104
0.728913
5dd62019e7ff928c4383fc35d24cbff743f0c13d
2,157
py
Python
airbyte-integrations/connectors/source-google-sheets/google_sheets_source/models/spreadsheet.py
rajatariya21/airbyte
11e70a7a96e2682b479afbe6f709b9a5fe9c4a8d
[ "MIT" ]
null
null
null
airbyte-integrations/connectors/source-google-sheets/google_sheets_source/models/spreadsheet.py
rajatariya21/airbyte
11e70a7a96e2682b479afbe6f709b9a5fe9c4a8d
[ "MIT" ]
4
2021-04-30T08:10:26.000Z
2021-04-30T13:53:34.000Z
airbyte-integrations/connectors/source-google-sheets/google_sheets_source/models/spreadsheet.py
rajatariya21/airbyte
11e70a7a96e2682b479afbe6f709b9a5fe9c4a8d
[ "MIT" ]
null
null
null
# MIT License # # Copyright (c) 2020 Airbyte # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import annotations from typing import List, Optional from pydantic import BaseModel, Extra, Field
26.62963
80
0.730644
5dd63a69cf7b02ed5bd4b36b349a9d84dec480ac
4,518
py
Python
pytrivia/trivia.py
Dnewman9/Python-Trivia-API
0af7f999cc4ab278fb0ac6fd64733ab168984e60
[ "MIT" ]
6
2018-01-15T15:17:56.000Z
2021-06-16T19:48:14.000Z
pytrivia/trivia.py
MaT1g3R/Python-Trivia-API
0af7f999cc4ab278fb0ac6fd64733ab168984e60
[ "MIT" ]
null
null
null
pytrivia/trivia.py
MaT1g3R/Python-Trivia-API
0af7f999cc4ab278fb0ac6fd64733ab168984e60
[ "MIT" ]
7
2017-05-15T23:41:43.000Z
2021-07-10T01:09:09.000Z
""" A simple python api wrapper for https://opentdb.com/ """ from aiohttp import ClientSession from requests import get from pytrivia.__helpers import decode_dict, get_token, make_request from pytrivia.enums import *
35.857143
79
0.607791
5dd6aca7ea5896f561da5d7ef0e8b1303417fa33
1,249
py
Python
utils.py
py-ranoid/practical-nlp
514fd4da3b72f26597d91cdb89704a849bf6b36d
[ "MIT" ]
null
null
null
utils.py
py-ranoid/practical-nlp
514fd4da3b72f26597d91cdb89704a849bf6b36d
[ "MIT" ]
null
null
null
utils.py
py-ranoid/practical-nlp
514fd4da3b72f26597d91cdb89704a849bf6b36d
[ "MIT" ]
null
null
null
import requests import tarfile import os
34.694444
64
0.602082
5dd6c916a8fdc58e1d4d7d9b990faa3a6330daf0
3,957
py
Python
spritecss/config.py
yostudios/Spritemapper
277cb76a14be639b6d7fa3191bc427409e72ad69
[ "MIT" ]
49
2015-01-22T14:27:32.000Z
2021-12-24T23:07:40.000Z
spritecss/config.py
tzuryby/Spritemapper
7cd3b68348a86982420b6231861fda4a0e676f35
[ "MIT" ]
2
2015-02-12T12:31:34.000Z
2015-04-12T10:43:17.000Z
spritecss/config.py
tzuryby/Spritemapper
7cd3b68348a86982420b6231861fda4a0e676f35
[ "MIT" ]
6
2015-04-03T07:29:54.000Z
2021-12-15T02:21:35.000Z
import shlex from os import path from itertools import imap, ifilter from urlparse import urljoin from .css import CSSParser, iter_events def get_spritemap_url(self, fname): "Get output image URL for spritemap *fname*." return self.absurl(path.relpath(fname, self.root)) def get_css_out(self, fname): "Get output image filename for spritemap directory *fname*." (dirn, base) = path.split(fname) if "output_css" in self._data: (base, ext) = path.splitext(base) names = dict(filename=fname, dirname=dirn, basename=base, extension=ext) return self.normpath(self._data["output_css"].format(**names)) else: return path.join(dirn, "sm_" + base) def print_config(fname): from pprint import pprint from .css import CSSParser with open(fname, "rb") as fp: print "%s\n%s\n" % (fname, "=" * len(fname)) pprint(dict(iter_css_config(CSSParser.read_file(fp)))) print def main(): import sys for fn in sys.argv[1:]: print_config(fn) if __name__ == "__main__": main()
31.656
77
0.608036
5dd72494fca93c6bb84fb81618dd74141e12e413
5,733
py
Python
plotting/make_bar_graph.py
DanielTakeshi/debridement-code
d1a946d1fa3c60b60284c977ecb2d6584e524ae2
[ "MIT" ]
3
2017-09-29T01:41:20.000Z
2021-03-29T01:51:18.000Z
plotting/make_bar_graph.py
DanielTakeshi/debridement-code
d1a946d1fa3c60b60284c977ecb2d6584e524ae2
[ "MIT" ]
null
null
null
plotting/make_bar_graph.py
DanielTakeshi/debridement-code
d1a946d1fa3c60b60284c977ecb2d6584e524ae2
[ "MIT" ]
3
2017-09-29T01:42:35.000Z
2019-10-20T07:10:44.000Z
""" A bar graph. (c) September 2017 by Daniel Seita """ import argparse from collections import defaultdict from keras.models import Sequential from keras.layers import Dense, Activation import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np import sys np.set_printoptions(suppress=True, linewidth=200) # Some matplotlib settings. plt.style.use('seaborn-darkgrid') titlesize = 21 labelsize = 17 legendsize = 15 ticksize = 15 bar_width = 0.80 opacity = 1.0 error_config = {'ecolor': '0.0', 'linewidth':3.0} def deprecated(): """ This is a deprecated method, only to show how to possibly combine these into one plot. However, I find this unwieldly. """ fig, ax = plt.subplots() bar_width = 0.80 opacity = 0.5 error_config = {'ecolor': '0.3'} rects1 = plt.bar(np.array([0,1]), means_lin, bar_width, alpha=opacity, color='b', yerr=std_lin, error_kw=error_config, label='Lin') rects2 = plt.bar(np.array([3,4,5,6,7]), means_rfs, bar_width, alpha=opacity, color='r', yerr=std_rfs, error_kw=error_config, label='RF') rects3 = plt.bar(np.array([9,10]), means_dnn, bar_width, alpha=opacity, color='y', yerr=std_dnn, error_kw=error_config, label='DNN') plt.xticks(np.arange(11) + bar_width / 2, ('A','B','','D','E','F','G','','','J','K')) plt.xlabel('Group') plt.ylabel('Scores') plt.title('Scores by group and gender') plt.tight_layout() plt.legend() plt.savefig('figures/validation_set_results.png') if __name__ == "__main__": pp = argparse.ArgumentParser() pp.add_argument('--version', type=int) pp.add_argument('--kfolds', type=int, default=10) args = pp.parse_args() assert args.version is not None VERSION = str(args.version).zfill(2) file_name = 'results/results_kfolds10_v'+VERSION+'.npy' results = np.load(file_name)[()] print("results has keys: {}".format(results.keys())) plot(results, VERSION)
33.138728
80
0.580499
5dd728898f384c5addbd3fc04712cc8f4bb79103
998
py
Python
setup.py
tzengerink/groceries-api
a22cc3503006b87b731b956f6341d730b143bf10
[ "MIT" ]
null
null
null
setup.py
tzengerink/groceries-api
a22cc3503006b87b731b956f6341d730b143bf10
[ "MIT" ]
null
null
null
setup.py
tzengerink/groceries-api
a22cc3503006b87b731b956f6341d730b143bf10
[ "MIT" ]
null
null
null
#!/usr/bin/env python from setuptools import find_packages, setup import os import re ROOT = os.path.dirname(__file__) VERSION_RE = re.compile(r'''__version__ = \'([0-9.]+)\'''') setup( name='groceries-api', version=get_version(), license='MIT', packages=find_packages(), include_package_data=True, install_requires=[ 'alembic==0.7.5.post2', 'APScheduler==3.1.0', 'Flask==0.10.1', 'Flask-Cors==2.0.0', 'Flask-SQLAlchemy==2.0', 'gunicorn==19.3.0', 'psycopg2==2.6.1', 'PyJWT==1.1.0', 'requests==2.8.1', 'six==1.9.0', ], extras_require={ 'dev': { 'coverage==3.7.1', 'coveralls==0.5', 'flake8==2.4.0', 'mock==1.0.1', 'pytest==2.7.0', 'tox==2.1.1', }, }, )
22.177778
72
0.516032
5dd847419564638f2f188cabc13087183aa80082
83,813
py
Python
toontown/suit/DistributedLawbotBoss.py
SuperM0use24/TT-CL-Edition
fdad8394f0656ae122b687d603f72afafd220c65
[ "MIT" ]
null
null
null
toontown/suit/DistributedLawbotBoss.py
SuperM0use24/TT-CL-Edition
fdad8394f0656ae122b687d603f72afafd220c65
[ "MIT" ]
1
2021-06-08T17:16:48.000Z
2021-06-08T17:16:48.000Z
toontown/suit/DistributedLawbotBoss.py
SuperM0use24/TT-CL-Edition
fdad8394f0656ae122b687d603f72afafd220c65
[ "MIT" ]
3
2021-06-03T05:36:36.000Z
2021-06-22T15:07:31.000Z
from direct.showbase.ShowBase import * from direct.interval.IntervalGlobal import * from toontown.battle.BattleProps import * from direct.distributed.ClockDelta import * from direct.showbase.PythonUtil import Functor from direct.showbase.PythonUtil import StackTrace from direct.gui.DirectGui import * from panda3d.core import * from libotp import * from direct.fsm import FSM from direct.fsm import ClassicFSM from direct.fsm import State from direct.directnotify import DirectNotifyGlobal from toontown.toonbase import ToontownGlobals from toontown.toonbase import ToontownBattleGlobals import DistributedBossCog from toontown.toonbase import TTLocalizer import SuitDNA from toontown.toon import Toon from toontown.battle import BattleBase from direct.directutil import Mopath from direct.showutil import Rope from toontown.distributed import DelayDelete from toontown.battle import MovieToonVictory from toontown.building import ElevatorUtils from toontown.battle import RewardPanel from toontown.toon import NPCToons from direct.task import Task import random import math from toontown.coghq import CogDisguiseGlobals from toontown.building import ElevatorConstants from toontown.toonbase import ToontownTimer OneBossCog = None
45.157866
363
0.655889
5dd8d749d5dd08650d2aee4a619e3e875e2659a0
19,959
py
Python
tests/test_custom_rnncell.py
lightmatter-ai/tensorflow-onnx
a08aa32e211b859e8a437c5d8a822ea55c46e7c6
[ "Apache-2.0" ]
null
null
null
tests/test_custom_rnncell.py
lightmatter-ai/tensorflow-onnx
a08aa32e211b859e8a437c5d8a822ea55c46e7c6
[ "Apache-2.0" ]
null
null
null
tests/test_custom_rnncell.py
lightmatter-ai/tensorflow-onnx
a08aa32e211b859e8a437c5d8a822ea55c46e7c6
[ "Apache-2.0" ]
1
2021-05-11T21:51:52.000Z
2021-05-11T21:51:52.000Z
# SPDX-License-Identifier: Apache-2.0 """Unit Tests for custom rnns.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow.python.ops import init_ops from backend_test_base import Tf2OnnxBackendTestBase from common import * # pylint: disable=wildcard-import, unused-wildcard-import from tf2onnx.tf_loader import is_tf2 # pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test # pylint: disable=abstract-method,arguments-differ if is_tf2(): BasicLSTMCell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell LSTMCell = tf.compat.v1.nn.rnn_cell.LSTMCell GRUCell = tf.compat.v1.nn.rnn_cell.GRUCell RNNCell = tf.compat.v1.nn.rnn_cell.RNNCell MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell dynamic_rnn = tf.compat.v1.nn.dynamic_rnn bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn else: LSTMBlockCell = tf.contrib.rnn.LSTMBlockCell LSTMCell = tf.nn.rnn_cell.LSTMCell GRUCell = tf.nn.rnn_cell.LSTMCell RNNCell = tf.nn.rnn_cell.RNNCell MultiRNNCell = tf.contrib.rnn.MultiRNNCell dynamic_rnn = tf.nn.dynamic_rnn bidirectional_dynamic_rnn = tf.nn.bidirectional_dynamic_rnn if __name__ == '__main__': unittest_main()
46.962353
119
0.584348
5dda086e2a6749797c92ff4afeb274d3586e3b33
536
py
Python
cookie-cutter/src/templates/template.py
noname34/CHARM_Project_Hazard_Perception_I
2d03d9e8911afad21818c6f837558503508a59bd
[ "Unlicense", "MIT" ]
null
null
null
cookie-cutter/src/templates/template.py
noname34/CHARM_Project_Hazard_Perception_I
2d03d9e8911afad21818c6f837558503508a59bd
[ "Unlicense", "MIT" ]
null
null
null
cookie-cutter/src/templates/template.py
noname34/CHARM_Project_Hazard_Perception_I
2d03d9e8911afad21818c6f837558503508a59bd
[ "Unlicense", "MIT" ]
null
null
null
#!/user/bin/env python3 # -*- coding: utf-8 -*- #!/user/bin/env python3 # -*- coding: utf-8 -*- # @Author: Kevin Brgisser # @Email:[email protected] # @Date: 04.2020 # Context: CHARMPROJECT- Harzard perception """ Module documentation. """ # Imports import sys #import os # Global variables # Class declarations # Function declarations # Main body if __name__ == '__main__': main()
14.888889
51
0.630597
5ddabeb7b320c12ce5eecb63db650328a9b8e392
903
py
Python
utils/gridpeak.py
siwill22/magSA
9f3a12e6ed971d67444804cad57734dc0b4772ff
[ "MIT" ]
null
null
null
utils/gridpeak.py
siwill22/magSA
9f3a12e6ed971d67444804cad57734dc0b4772ff
[ "MIT" ]
null
null
null
utils/gridpeak.py
siwill22/magSA
9f3a12e6ed971d67444804cad57734dc0b4772ff
[ "MIT" ]
null
null
null
import numpy
29.129032
78
0.447398
5ddc336e8c10627292e9d9762e105aa2a19572a4
262
py
Python
Chapter 10/trackbackLog.py
Miillky/automate_the_boring_stuff_with_python
284b074b0738c66f38b54fe0fc5f69b3446e7e43
[ "MIT" ]
null
null
null
Chapter 10/trackbackLog.py
Miillky/automate_the_boring_stuff_with_python
284b074b0738c66f38b54fe0fc5f69b3446e7e43
[ "MIT" ]
null
null
null
Chapter 10/trackbackLog.py
Miillky/automate_the_boring_stuff_with_python
284b074b0738c66f38b54fe0fc5f69b3446e7e43
[ "MIT" ]
null
null
null
import traceback try: raise Exception('This is the error message.') except: errorFile = open('./Chapter 10/errorInfo.txt', 'w') errorFile.write(traceback.format_exc()) errorFile.close() print('The traceback info was written to errorInfo.txt')
32.75
60
0.709924
5dde2db2c5518f1b83b708f088e5f614029ac9a9
2,794
py
Python
Module_III/PySparkNetworkSimilarityClass.py
wuchiehhan/KDD2019-HandsOn-Tutorial
0377ae4b2a74e9cc08b15c983e4e0f59ab02debe
[ "MIT" ]
null
null
null
Module_III/PySparkNetworkSimilarityClass.py
wuchiehhan/KDD2019-HandsOn-Tutorial
0377ae4b2a74e9cc08b15c983e4e0f59ab02debe
[ "MIT" ]
null
null
null
Module_III/PySparkNetworkSimilarityClass.py
wuchiehhan/KDD2019-HandsOn-Tutorial
0377ae4b2a74e9cc08b15c983e4e0f59ab02debe
[ "MIT" ]
null
null
null
# Databricks notebook source from pyspark.sql.types import * from pyspark.sql import functions as F import base64 import array # COMMAND ---------- # s is a base64 encoded float[] with first element being the magnitude # Register udf functions so that it could be used in dataframe # # Perform same computation as cosineSimilarity() # # COMMAND ---------- # MAGIC %md **NetworkSimilarity** class to compute Network Similarity # COMMAND ---------- # Parameters: # resource: resource stream path # container: container name in Azure Storage (AS) account # account: Azure Storage (AS) account # sas: complete 'Blob service SAS URL' of the shared access signature (sas) for the container # key: access key for the container, if sas is specified, key is ignored # # Note: # resource does not have header # you need to provide value for either sas or key # class NetworkSimilarity(AzureStorageAccess): # constructor def __init__(self, resource, container, account, sas='', key=''): AzureStorageAccess.__init__(self, container, account, sas, key) schema = StructType() schema.add(StructField('EntityId', LongType(), False)) schema.add(StructField('EntityType', StringType(), False)) schema.add(StructField('Data', StringType(), False)) self.df = spark.read.format('csv').options(header='false', delimiter='\t').schema(schema).load(self.getFullpath(resource)) def getDataframe(self): return self.df def raiseErrorIfNotFound(self, row, e): if row is None: raise KeyError('entity ' + str(e) + ' not found')
33.261905
126
0.678597
5dde83861306805019c9d0827dc8148db30e9997
373
py
Python
fizzbuzz.py
vagnes/fizzbuzzgame
de72ffc5a21fbb3b1cfd930ef632b75697fa830f
[ "WTFPL" ]
null
null
null
fizzbuzz.py
vagnes/fizzbuzzgame
de72ffc5a21fbb3b1cfd930ef632b75697fa830f
[ "WTFPL" ]
null
null
null
fizzbuzz.py
vagnes/fizzbuzzgame
de72ffc5a21fbb3b1cfd930ef632b75697fa830f
[ "WTFPL" ]
null
null
null
print("Press q to quit") quit = False while quit is False: in_val = input("Please enter a positive integer.\n > ") if in_val is 'q': quit = True elif int(in_val) % 3 == 0 and int(in_val) % 5 == 0: print("FizzBuzz") elif int(in_val) % 5 == 0: print("Buzz") elif int(in_val) % 3 == 0: print("Fizz") else: pass
23.3125
59
0.530831
5ddf93a5acfa110cbd927feae9cad660c39b795d
926
py
Python
lesson10019_projects/pen/data/transition.py
muzudho/py-state-machine-practice
e31c066f4cf142b6b6c5ff273b56a0f89428c59e
[ "MIT" ]
null
null
null
lesson10019_projects/pen/data/transition.py
muzudho/py-state-machine-practice
e31c066f4cf142b6b6c5ff273b56a0f89428c59e
[ "MIT" ]
null
null
null
lesson10019_projects/pen/data/transition.py
muzudho/py-state-machine-practice
e31c066f4cf142b6b6c5ff273b56a0f89428c59e
[ "MIT" ]
null
null
null
from lesson14_projects.pen.data.const import ( A, E_A, E_AN, E_IS, E_OVER, E_PEN, E_PIN, E_THAT, E_THIS, E_WAS, INIT, IS, PEN, THIS, ) pen_transition_doc_v19 = { "title": "This is a pen", "entry_state": INIT, "data": { INIT: { E_OVER: [INIT], E_THAT: [INIT], E_THIS: [INIT, THIS], THIS: { E_OVER: [INIT], E_WAS: [INIT], E_IS: [INIT, THIS, IS], IS: { E_OVER: [INIT], E_AN: [INIT], E_A: [INIT, THIS, IS, A], A: { E_OVER: [INIT], E_PIN: [INIT], E_PEN: [PEN], }, }, }, }, PEN: { E_OVER: None, }, }, }
19.702128
46
0.327214
5ddff0c682bfeb9cf9d9bdcf324ee0733eb92a14
2,899
py
Python
Animation/Main.py
olesmith/SmtC
dfae5097f02192b60aae05b9d02404fcfe893be3
[ "CC0-1.0" ]
null
null
null
Animation/Main.py
olesmith/SmtC
dfae5097f02192b60aae05b9d02404fcfe893be3
[ "CC0-1.0" ]
null
null
null
Animation/Main.py
olesmith/SmtC
dfae5097f02192b60aae05b9d02404fcfe893be3
[ "CC0-1.0" ]
null
null
null
import gd,os,time from Html import Animation_Html from Iteration import Animation_Iteration from Write import Animation_Write from Base import * from Canvas2 import * from Canvas2 import Canvas2 from Image import Image from HTML import HTML __Canvas__=None
23.762295
73
0.519489
5de1c133ca3046f5ca60bc9f85bbcefa4f2854dd
1,839
py
Python
pytorch_metric_learning/miners/distance_weighted_miner.py
junjungoal/pytorch_metric_learning
e56bb440d1ec63e13622025209135a788c6f51c1
[ "MIT" ]
1
2019-11-28T19:31:29.000Z
2019-11-28T19:31:29.000Z
pytorch_metric_learning/miners/distance_weighted_miner.py
junjungoal/pytorch_metric_learning
e56bb440d1ec63e13622025209135a788c6f51c1
[ "MIT" ]
null
null
null
pytorch_metric_learning/miners/distance_weighted_miner.py
junjungoal/pytorch_metric_learning
e56bb440d1ec63e13622025209135a788c6f51c1
[ "MIT" ]
null
null
null
#! /usr/bin/env python3 from .base_miner import BasePostGradientMiner import torch from ..utils import loss_and_miner_utils as lmu # adapted from # https://github.com/chaoyuaw/incubator-mxnet/blob/master/example/gluon/ # /embedding_learning/model.py
39.978261
85
0.657423
5de3cc8b6cc08416f6501e8a2abc20d6706d9dfa
1,037
py
Python
Keywords/__init__.py
cassie01/PumpLibrary
c2a4884a36f4c6c6552fa942143ae5d21c120b41
[ "Apache-2.0" ]
null
null
null
Keywords/__init__.py
cassie01/PumpLibrary
c2a4884a36f4c6c6552fa942143ae5d21c120b41
[ "Apache-2.0" ]
null
null
null
Keywords/__init__.py
cassie01/PumpLibrary
c2a4884a36f4c6c6552fa942143ae5d21c120b41
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- from .Alarm.alarm import Alarm from .DeliveryView.bolus import Bolus from .DeliveryView.info import Info from .DeliveryView.infusion import Infusion from .DeliveryView.infusion_parameter import InfusionParameter from .DeliveryView.priming import Priming from .HardwareControl.motor import Motor from .MenuSettings.device_report import DeviceReport from .MenuSettings.history_log import HistoryLog from .MenuSettings.infusion_setting import InfusionSetting from .MenuSettings.maintenance import Maintenance from .MenuSettings.safety_setting import SafetySetting from .MenuSettings.system_setting import SystemSetting from .SensorControl.sensor import Sensor __all__ = ["Alarm", "Bolus", "Info", "Infusion", "InfusionParameter", "Priming", "Motor", "DeviceReport", "HistoryLog", "InfusionSetting", "Maintenance", "SafetySetting", "SystemSetting", "Sensor", ]
31.424242
62
0.695275
5de3f2eb79030c2d37fe6eb8becce065096245d7
1,656
py
Python
src/responsibleai/rai_analyse/constants.py
Azure/automl-devplat2-preview
05f327fe4c2504e9d49001ce26d8b49627214138
[ "MIT" ]
7
2021-05-12T01:52:09.000Z
2021-12-22T17:22:14.000Z
src/responsibleai/rai_analyse/constants.py
Azure/automl-devplat2-preview
05f327fe4c2504e9d49001ce26d8b49627214138
[ "MIT" ]
5
2021-04-16T21:27:44.000Z
2021-04-26T03:17:44.000Z
src/responsibleai/rai_analyse/constants.py
Azure/automl-devplat2-preview
05f327fe4c2504e9d49001ce26d8b49627214138
[ "MIT" ]
null
null
null
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # ---------------------------------------------------------
35.234043
80
0.710145
5de40eed6f013ca3b73d1af645e0c517f3a9ec93
4,728
py
Python
pulsar/apps/data/redis/store.py
goodboy/pulsar
e4b42d94b7e262a165782747d65f8b39fb8d3ba9
[ "BSD-3-Clause" ]
1
2020-11-30T07:36:57.000Z
2020-11-30T07:36:57.000Z
pulsar/apps/data/redis/store.py
goodboy/pulsar
e4b42d94b7e262a165782747d65f8b39fb8d3ba9
[ "BSD-3-Clause" ]
null
null
null
pulsar/apps/data/redis/store.py
goodboy/pulsar
e4b42d94b7e262a165782747d65f8b39fb8d3ba9
[ "BSD-3-Clause" ]
null
null
null
from functools import partial from pulsar import Connection, Pool, get_actor from pulsar.utils.pep import to_string from pulsar.apps.data import RemoteStore from pulsar.apps.ds import redis_parser from .client import RedisClient, Pipeline, Consumer, ResponseError from .pubsub import RedisPubSub, RedisChannels def client(self): '''Get a :class:`.RedisClient` for the Store''' return RedisClient(self) def pipeline(self): '''Get a :class:`.Pipeline` for the Store''' return Pipeline(self) def pubsub(self, protocol=None): return RedisPubSub(self, protocol=protocol) def channels(self, protocol=None, **kw): return RedisChannels(self.pubsub(protocol=protocol), **kw) def ping(self): return self.client().ping() def flush(self): return self.execute('flushdb') def close(self): '''Close all open connections.''' return self._pool.close() def has_query(self, query_type): return query_type in self.supported_queries def basekey(self, meta, *args): key = '%s%s' % (self.namespace, meta.table_name) postfix = ':'.join((to_string(p) for p in args if p is not None)) return '%s:%s' % (key, postfix) if postfix else key def meta(self, meta): '''Extract model metadata for lua script stdnet/lib/lua/odm.lua''' # indices = dict(((idx.attname, idx.unique) for idx in meta.indices)) data = meta.as_dict() data['namespace'] = self.basekey(meta) return data class CompiledQuery: def __init__(self, pipe, query): self.pipe = pipe
33.295775
78
0.635787
5de5717649c5fb1c3b234920122bfea85236921f
1,068
py
Python
tasks/migrations/0005_auto_20200616_0123.py
tschelbs18/fruitful
66635cd521ffc0990275e32298419bfc2167b90b
[ "MIT" ]
null
null
null
tasks/migrations/0005_auto_20200616_0123.py
tschelbs18/fruitful
66635cd521ffc0990275e32298419bfc2167b90b
[ "MIT" ]
4
2020-06-04T14:20:33.000Z
2021-09-22T19:09:22.000Z
tasks/migrations/0005_auto_20200616_0123.py
tschelbs18/fruitful
66635cd521ffc0990275e32298419bfc2167b90b
[ "MIT" ]
null
null
null
# Generated by Django 3.0.7 on 2020-06-16 05:23 from django.db import migrations, models import django.utils.timezone
28.864865
93
0.601124
5de5910c5b5ea17215e0b0e1f87d78465a65ecbe
2,683
py
Python
pcg_libraries/src/pcg_gazebo/parsers/types/vector.py
boschresearch/pcg_gazebo_pkgs
1c112d01847ca4f8da61ce9b273e13d13bc7eb73
[ "Apache-2.0", "BSD-3-Clause" ]
42
2019-06-26T09:46:03.000Z
2022-03-18T17:56:26.000Z
pcg_libraries/src/pcg_gazebo/parsers/types/vector.py
boschresearch/pcg_gazebo_pkgs
1c112d01847ca4f8da61ce9b273e13d13bc7eb73
[ "Apache-2.0", "BSD-3-Clause" ]
9
2019-07-18T10:36:05.000Z
2020-10-02T15:26:32.000Z
pcg_libraries/src/pcg_gazebo/parsers/types/vector.py
boschresearch/pcg_gazebo_pkgs
1c112d01847ca4f8da61ce9b273e13d13bc7eb73
[ "Apache-2.0", "BSD-3-Clause" ]
2
2019-11-01T03:20:11.000Z
2020-10-15T23:23:44.000Z
# Copyright (c) 2019 - The Procedural Generation for Gazebo authors # For information on the respective copyright owner see the NOTICE file # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from . import XMLBase import collections
40.044776
78
0.633619
5de5b5ee5bf23c10f66da04af7327075aad14c24
9,531
py
Python
tests/main/helpers/test_buyers_helpers.py
uk-gov-mirror/alphagov.digitalmarketplace-briefs-frontend
2325f01b1bdb13fb5b0afe7fe110c0be0c031da6
[ "MIT" ]
1
2021-05-06T22:37:05.000Z
2021-05-06T22:37:05.000Z
tests/main/helpers/test_buyers_helpers.py
uk-gov-mirror/alphagov.digitalmarketplace-briefs-frontend
2325f01b1bdb13fb5b0afe7fe110c0be0c031da6
[ "MIT" ]
108
2017-06-14T10:48:10.000Z
2021-06-11T08:55:25.000Z
tests/main/helpers/test_buyers_helpers.py
uk-gov-mirror/alphagov.digitalmarketplace-briefs-frontend
2325f01b1bdb13fb5b0afe7fe110c0be0c031da6
[ "MIT" ]
5
2017-06-27T15:13:11.000Z
2021-04-10T18:06:29.000Z
import mock import pytest from werkzeug.exceptions import NotFound import app.main.helpers as helpers from dmcontent.content_loader import ContentLoader from dmtestutils.api_model_stubs import BriefStub, FrameworkStub, LotStub content_loader = ContentLoader('tests/fixtures/content') content_loader.load_manifest('dos', 'data', 'edit_brief') questions_builder = content_loader.get_manifest('dos', 'edit_brief')
44.125
118
0.615255
5de70a07393091d4b0d1b81bb83f4335c31b6482
3,329
py
Python
Plot/src/test/java/io/deephaven/db/plot/example_plots/PlottingPQ.py
devinrsmith/deephaven-core
3a6930046faf1cd556f62a914ce1cfd7860147b9
[ "MIT" ]
null
null
null
Plot/src/test/java/io/deephaven/db/plot/example_plots/PlottingPQ.py
devinrsmith/deephaven-core
3a6930046faf1cd556f62a914ce1cfd7860147b9
[ "MIT" ]
1
2022-03-03T21:24:40.000Z
2022-03-03T21:24:54.000Z
Plot/src/test/java/io/deephaven/db/plot/example_plots/PlottingPQ.py
devinrsmith/deephaven-core
3a6930046faf1cd556f62a914ce1cfd7860147b9
[ "MIT" ]
null
null
null
import deephaven.TableTools as tt import deephaven.Plot as plt t = tt.emptyTable(50)\ .update("X = i + 5", "XLow = X -1", "XHigh = X + 1", "Y = Math.random() * 5", "YLow = Y - 1", "YHigh = Y + 1", "USym = i % 2 == 0 ? `AAPL` : `MSFT`") p = plt.plot("S1", t, "X", "Y").lineColor("black").show() p2 = plt.plot("S1", t, "X", "Y").plotStyle("bar").gradientVisible(True).show() p3 = plt.plot("S1", t, "X", "Y").plotStyle("scatter").pointColor("black").pointSize(2).show() p4 = plt.plot("S1", t, "X", "Y").plotStyle("area").seriesColor("red").show() p4 = plt.plot3d("S1", t, "X", "X", "Y").show() pBy = plt.plotBy("S1", t, "X", "Y", "USym").show() pBy = plt.plot3dBy("S1", t, "X", "X", "Y", "USym").show() cp = plt.catPlot("S1", t, "X", "Y").lineColor("black").show() cp2 = plt.catPlot("S1", t, "X", "Y").plotStyle("bar").gradientVisible(True).show() cp3 = plt.catPlot("S1", t, "X", "Y").plotStyle("scatter").pointColor("black").pointSize(2).show() cp4 = plt.catPlot("S1", t, "X", "Y").plotStyle("area").seriesColor("red").show() cp = plt.catPlot3d("S1", t, "X", "X", "Y").show() cpBy = plt.catPlotBy("S1", t, "X", "Y", "USym").show() cpBy = plt.catPlot3dBy("S1", t, "X", "X", "Y", "USym").show() pp = plt.piePlot("S1", t, "X", "Y") chp = plt.catHistPlot("S1", t, "X").show() hp = plt.histPlot("S1", t, "X", 5).show() hp = plt.histPlot("S1", t, "X", 0, 10, 5).show() ep = plt.errorBarXY("S1", t, "X", "XLow", "XHigh", "Y", "YLow", "YHigh").show() epBy = plt.errorBarXYBy("S1", t, "X", "XLow", "XHigh", "Y", "YLow", "YHigh", "USym").show() ep2 = plt.errorBarX("S1", t, "X", "XLow", "XHigh", "Y").show() epBy2 = plt.errorBarXBy("S1", t, "X", "XLow", "XHigh", "Y", "USym").show() ep3 = plt.errorBarY("S1", t, "X", "Y", "YLow", "YHigh").show() epBy3 = plt.errorBarYBy("S1", t, "X", "Y", "YLow", "YHigh", "USym").show() doubles = [3, 4, 3, 5, 4, 5] time = 1491946585000000000 t = tt.newTable(tt.col("USym", ["A", "B", "A", "B", "A", "B"]), tt.doubleCol("Open", doubles), tt.doubleCol("High", doubles), tt.doubleCol("Low", doubles), tt.doubleCol("Close", doubles)) t = t.updateView("Time = new DBDateTime(time + (MINUTE * i))") ohlc = plt.ohlcPlot("Test1", t, "Time", "Open", "High", "Low", "Close") ohlcPlotBy = plt.figure().newChart(0)\ .chartTitle("Chart Title")\ .newAxes()\ .xLabel("X")\ .yLabel("Y")\ .ohlcPlotBy("Test1", t, "Time", "Open", "High", "Low", "Close", "USym") categories = ["Samsung", "Others", "Nokia", "Apple", "MSFT"] valuesD = [27.8, 55.3, 16.8, 17.1, 23.1] valuesI = [27, 55, 16, 17, 15] ap = plt.plot("S1", valuesD, valuesI).show() ap = plt.plot3d("S1", valuesI, valuesI, valuesI).show() acp = plt.catPlot("S1", categories, valuesI).show() acp2 = plt.catPlot3d("S1", categories, categories, valuesD).show() achp = plt.catHistPlot("S1", categories).show() app = plt.figure().xLabel("X").yLabel("Y").piePlot("S1", categories, valuesI).pointLabelFormat("{0}").show() aep = plt.errorBarXY("S1", valuesD, valuesD, valuesD, valuesD, valuesD, valuesD).show() aep2 = plt.errorBarX("S1", valuesD, valuesD, valuesD, valuesD).show() aep3 = plt.errorBarY("S1", valuesD, valuesD, valuesD, valuesD).show() hp = plt.histPlot("S1", valuesD, 5).show() hp = plt.histPlot("S1", valuesD, 0, 10, 5).show() hp = plt.histPlot("S1", valuesI, 5).show()
37.829545
153
0.578252
5de7879bccf37dcddacbf558d1addbcf9aa0f808
1,366
py
Python
rhoci/test/routes.py
ahmedmagdyawaad/redhat-ci-dashboard
a9c0445add4e99bb44a8075752a62176968278df
[ "Apache-2.0" ]
8
2017-06-29T19:38:40.000Z
2021-07-25T18:55:37.000Z
rhoci/test/routes.py
ahmedmagdyawaad/redhat-ci-dashboard
a9c0445add4e99bb44a8075752a62176968278df
[ "Apache-2.0" ]
39
2017-06-21T07:35:02.000Z
2018-02-26T11:25:03.000Z
rhoci/test/routes.py
ahmedmagdyawaad/redhat-ci-dashboard
a9c0445add4e99bb44a8075752a62176968278df
[ "Apache-2.0" ]
7
2018-01-24T10:31:00.000Z
2021-09-18T12:27:46.000Z
# Copyright 2019 Arie Bregman # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import from flask import current_app as app from flask import render_template from flask import url_for import logging LOG = logging.getLogger(__name__) from rhoci.test import bp # noqa
32.52381
78
0.694729
5de7a1ab9ad6ce3cc45b32937415c25c0fb99a65
546
py
Python
mitmproxy/net/http/http1/__init__.py
aarnaut/mitmproxy
a8b6f48374b28954f9d8fb5cabbc4fdcaebe9e3a
[ "MIT" ]
null
null
null
mitmproxy/net/http/http1/__init__.py
aarnaut/mitmproxy
a8b6f48374b28954f9d8fb5cabbc4fdcaebe9e3a
[ "MIT" ]
null
null
null
mitmproxy/net/http/http1/__init__.py
aarnaut/mitmproxy
a8b6f48374b28954f9d8fb5cabbc4fdcaebe9e3a
[ "MIT" ]
null
null
null
from .read import ( read_request_head, read_response_head, connection_close, expected_http_body_size, validate_headers, ) from .assemble import ( assemble_request, assemble_request_head, assemble_response, assemble_response_head, assemble_body, ) __all__ = [ "read_request_head", "read_response_head", "connection_close", "expected_http_body_size", "validate_headers", "assemble_request", "assemble_request_head", "assemble_response", "assemble_response_head", "assemble_body", ]
21.84
50
0.727106
5de7e5e6d54e182aae7ef185c563685a2425fd3b
1,211
py
Python
request_token/migrations/0009_requesttokenerror.py
alex-hutton/django-request-token
299c4cb22ce3012c7ef995a648e5b1ea6b8a84d7
[ "MIT" ]
null
null
null
request_token/migrations/0009_requesttokenerror.py
alex-hutton/django-request-token
299c4cb22ce3012c7ef995a648e5b1ea6b8a84d7
[ "MIT" ]
2
2019-11-13T22:22:41.000Z
2019-12-02T22:19:56.000Z
request_token/migrations/0009_requesttokenerror.py
hongquan/django-request-token
76a5f8fce268ff252900341c7dcd7e7d442effe1
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by Django 1.10 on 2017-05-21 19:33 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion
44.851852
210
0.673823
5de81bead5f0058007dc4a5e3ad313c7ed6b6535
191
py
Python
01-basic-programs/04-lines.py
ncodeitgithub1/python-get-hands-dirty-programs
c9edb9e0bc9b2580737ca185935427343c550f01
[ "Apache-2.0" ]
null
null
null
01-basic-programs/04-lines.py
ncodeitgithub1/python-get-hands-dirty-programs
c9edb9e0bc9b2580737ca185935427343c550f01
[ "Apache-2.0" ]
null
null
null
01-basic-programs/04-lines.py
ncodeitgithub1/python-get-hands-dirty-programs
c9edb9e0bc9b2580737ca185935427343c550f01
[ "Apache-2.0" ]
1
2021-07-19T13:20:34.000Z
2021-07-19T13:20:34.000Z
#4 lines: Fibonacci, tuple assignment parents, babies = (1, 1) while babies < 100: print ('This generation has {0} babies'.format(babies)) parents, babies = (babies, parents + babies)
38.2
59
0.691099
5de8ea4c838b0533ab68d0c0085a12cb95b9a807
896
py
Python
winter/controller.py
EvgenySmekalin/winter
24b6a02f958478547a4a120324823743a1f7e1a1
[ "MIT" ]
1
2020-03-28T14:54:28.000Z
2020-03-28T14:54:28.000Z
winter/controller.py
EvgenySmekalin/winter
24b6a02f958478547a4a120324823743a1f7e1a1
[ "MIT" ]
null
null
null
winter/controller.py
EvgenySmekalin/winter
24b6a02f958478547a4a120324823743a1f7e1a1
[ "MIT" ]
null
null
null
import typing from .core import Component _Controller = typing.TypeVar('_Controller') _ControllerType = typing.Type[_Controller] ControllerFactory = typing.NewType('ControllerFactory', typing.Callable[[typing.Type], object]) _controller_factory: typing.Optional[ControllerFactory] = None
30.896552
95
0.809152
5de9426d377676b21fdbfe522c80d5ca38d85f47
7,000
bzl
Python
go/def.bzl
bobg/rules_go
fd11dd2768669dc2cc1f3a11f2b0b81d84e81c32
[ "Apache-2.0" ]
null
null
null
go/def.bzl
bobg/rules_go
fd11dd2768669dc2cc1f3a11f2b0b81d84e81c32
[ "Apache-2.0" ]
1
2022-02-18T15:47:32.000Z
2022-02-18T15:47:32.000Z
go/def.bzl
bobg/rules_go
fd11dd2768669dc2cc1f3a11f2b0b81d84e81c32
[ "Apache-2.0" ]
null
null
null
# Copyright 2014 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Public definitions for Go rules. All public Go rules, providers, and other definitions are imported and re-exported in this file. This allows the real location of definitions to change for easier maintenance. Definitions outside this file are private unless otherwise noted, and may change without notice. """ load( "//go/private:context.bzl", _go_context = "go_context", ) load( "//go/private:providers.bzl", _GoArchive = "GoArchive", _GoArchiveData = "GoArchiveData", _GoLibrary = "GoLibrary", _GoPath = "GoPath", _GoSDK = "GoSDK", _GoSource = "GoSource", ) load( "//go/private/rules:sdk.bzl", _go_sdk = "go_sdk", ) load( "//go/private:go_toolchain.bzl", _declare_toolchains = "declare_toolchains", _go_toolchain = "go_toolchain", ) load( "//go/private/rules:wrappers.bzl", _go_binary_macro = "go_binary_macro", _go_library_macro = "go_library_macro", _go_test_macro = "go_test_macro", ) load( "//go/private/rules:source.bzl", _go_source = "go_source", ) load( "//extras:embed_data.bzl", _go_embed_data = "go_embed_data", ) load( "//go/private/tools:path.bzl", _go_path = "go_path", ) load( "//go/private/rules:library.bzl", _go_tool_library = "go_tool_library", ) load( "//go/private/rules:nogo.bzl", _nogo = "nogo_wrapper", ) # TOOLS_NOGO is a list of all analysis passes in # golang.org/x/tools/go/analysis/passes. # This is not backward compatible, so use caution when depending on this -- # new analyses may discover issues in existing builds. TOOLS_NOGO = [ "@org_golang_x_tools//go/analysis/passes/asmdecl:go_default_library", "@org_golang_x_tools//go/analysis/passes/assign:go_default_library", "@org_golang_x_tools//go/analysis/passes/atomic:go_default_library", "@org_golang_x_tools//go/analysis/passes/atomicalign:go_default_library", "@org_golang_x_tools//go/analysis/passes/bools:go_default_library", "@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library", "@org_golang_x_tools//go/analysis/passes/buildtag:go_default_library", # TODO(#2396): pass raw cgo sources to cgocall and re-enable. # "@org_golang_x_tools//go/analysis/passes/cgocall:go_default_library", "@org_golang_x_tools//go/analysis/passes/composite:go_default_library", "@org_golang_x_tools//go/analysis/passes/copylock:go_default_library", "@org_golang_x_tools//go/analysis/passes/ctrlflow:go_default_library", "@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_default_library", "@org_golang_x_tools//go/analysis/passes/errorsas:go_default_library", "@org_golang_x_tools//go/analysis/passes/findcall:go_default_library", "@org_golang_x_tools//go/analysis/passes/httpresponse:go_default_library", "@org_golang_x_tools//go/analysis/passes/ifaceassert:go_default_library", "@org_golang_x_tools//go/analysis/passes/inspect:go_default_library", "@org_golang_x_tools//go/analysis/passes/loopclosure:go_default_library", "@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library", "@org_golang_x_tools//go/analysis/passes/nilfunc:go_default_library", "@org_golang_x_tools//go/analysis/passes/nilness:go_default_library", "@org_golang_x_tools//go/analysis/passes/pkgfact:go_default_library", "@org_golang_x_tools//go/analysis/passes/printf:go_default_library", "@org_golang_x_tools//go/analysis/passes/shadow:go_default_library", "@org_golang_x_tools//go/analysis/passes/shift:go_default_library", "@org_golang_x_tools//go/analysis/passes/sortslice:go_default_library", "@org_golang_x_tools//go/analysis/passes/stdmethods:go_default_library", "@org_golang_x_tools//go/analysis/passes/stringintconv:go_default_library", "@org_golang_x_tools//go/analysis/passes/structtag:go_default_library", "@org_golang_x_tools//go/analysis/passes/testinggoroutine:go_default_library", "@org_golang_x_tools//go/analysis/passes/tests:go_default_library", "@org_golang_x_tools//go/analysis/passes/unmarshal:go_default_library", "@org_golang_x_tools//go/analysis/passes/unreachable:go_default_library", "@org_golang_x_tools//go/analysis/passes/unsafeptr:go_default_library", "@org_golang_x_tools//go/analysis/passes/unusedresult:go_default_library", ] # Current version or next version to be tagged. Gazelle and other tools may # check this to determine compatibility. RULES_GO_VERSION = "0.30.0" declare_toolchains = _declare_toolchains go_context = _go_context go_embed_data = _go_embed_data go_sdk = _go_sdk go_tool_library = _go_tool_library go_toolchain = _go_toolchain nogo = _nogo # See go/providers.rst#GoLibrary for full documentation. GoLibrary = _GoLibrary # See go/providers.rst#GoSource for full documentation. GoSource = _GoSource # See go/providers.rst#GoPath for full documentation. GoPath = _GoPath # See go/providers.rst#GoArchive for full documentation. GoArchive = _GoArchive # See go/providers.rst#GoArchiveData for full documentation. GoArchiveData = _GoArchiveData # See go/providers.rst#GoSDK for full documentation. GoSDK = _GoSDK # See docs/go/core/rules.md#go_library for full documentation. go_library = _go_library_macro # See docs/go/core/rules.md#go_binary for full documentation. go_binary = _go_binary_macro # See docs/go/core/rules.md#go_test for full documentation. go_test = _go_test_macro # See docs/go/core/rules.md#go_test for full documentation. go_source = _go_source # See docs/go/core/rules.md#go_path for full documentation. go_path = _go_path
37.037037
171
0.762143
5deb3af9396589471b73ff049da7ac957d8d19d7
14,680
py
Python
anyway/parsers/united.py
ayalapol/anyway
ebf2436a8f9b152ae8f4d051c129bac754cb8cc1
[ "BSD-3-Clause" ]
null
null
null
anyway/parsers/united.py
ayalapol/anyway
ebf2436a8f9b152ae8f4d051c129bac754cb8cc1
[ "BSD-3-Clause" ]
null
null
null
anyway/parsers/united.py
ayalapol/anyway
ebf2436a8f9b152ae8f4d051c129bac754cb8cc1
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- import calendar import csv from datetime import datetime import os from flask_sqlalchemy import SQLAlchemy from sqlalchemy import and_ from ..constants import CONST from ..models import AccidentMarker from ..utilities import init_flask, decode_hebrew, open_utf8 from ..import importmail from xml.dom import minidom import math import requests import logging ############################################################################################ # United.py is responsible for the parsing and deployment of "united hatzala" data to the DB ############################################################################################ PROVIDER_CODE = CONST.UNITED_HATZALA_CODE TIME_ZONE = 2 # convert IMS hours code to hours RAIN_DURATION_CODE_TO_HOURS = {"1": 6, "2": 12, "3": 18, "4": 24, "/": 24, "5": 1, "6": 2, "7": 3, "8": 9, "9": 15} WEATHER = {"0": 1, "1": 2, "3": 3, "4": 4, "5": 5, "7": 6, "8": 6, "9": 7, "10": 8, "11": 9, "12": 10, "17": 11, "18": 12, "19": 13, "20": 14, "21": 15, "22": 16, "23": 17, "24": 18, "25": 19, "26": 20, "27": 21, "28": 22, "29": 23, "30": 24, "31": 24, "32": 24, "33": 7, "34": 7, "35": 7, "36": 25, "37": 25, "38": 25, "39": 25, "40": 26, "41": 27, "42": 28, "43": 29, "44": 9, "45": 30, "46": 30, "47": 30, "48": 31, "49": 32, "50": 33, "51": 34, "52": 33, "53": 35, "54": 36, "55": 37, "56": 38, "57": 39, "58": 37, "59": 37, "61": 37, "60": 36, "62": 40, "63": 15, "64": 41, "65": 19, "66": 42, "67": 43, "68": 44, "69": 45, "70": 46, "71": 47, "72": 48, "73": 16, "74": 50, "75": 51, "76": 52, "77": 53, "78": 54, "79": 55, "80": 56, "81": 57, "82": 58, "83": 59, "84": 60, "85": 61, "86": 62, "87": 63, "88": 64, "89": 65, "90": 66, "91": 67, "92": 68, "93": 69, "94": 70, "95": 71, "96": 72, "97": 73, "98": 74, "99": 75} def parse_date(created): """ :param created: Date & Time string from csv :return: Python datetime object """ global time global hour DATE_FORMATS = ['%m/%d/%Y %I:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y/%m/%d %I:%M:%S', '%d/%m/%Y %I:%M', '%Y/%m/%d %I:%M', '%m/%d/%Y %I:%M'] for date_format in DATE_FORMATS: try: if date_format == '%Y-%m-%d %H:%M:%S': time = datetime.strptime(str(created)[:-4], date_format) hour = time.strftime('%H') hour = int(hour) else: time = datetime.strptime(str(created)[:-3], date_format) hour = time.strftime('%H') hour = int(hour) if str(created).endswith('AM') else int(hour) + 12 break except ValueError: pass return datetime(time.year, time.month, time.day, hour, time.minute, 0) CSVMAP = [ {"id": 0, "time": 1, "lat": 2, "long": 3, "street": 4, "city": 6, "comment": 7, "type": 8, "casualties": 9}, {"id": 0, "time": 1, "type": 2, "long": 3, "lat": 4, "city": 5, "street": 6, "comment": 7, "casualties": 8}, ] def import_to_db(collection, path): """ :param path: Local files directory ('united_path' on main() below) :return: length of DB entries after execution """ app = init_flask() db = SQLAlchemy(app) accidents = list(create_accidents(collection, path)) if not accidents: return 0 new_ids = [m["id"] for m in accidents if 0 == db.session.query(AccidentMarker).filter(and_(AccidentMarker.id == m["id"], AccidentMarker.provider_code == m["provider_code"])).count()] if not new_ids: logging.info("\t\tNothing loaded, all accidents already in DB") return 0 db.session.execute(AccidentMarker.__table__.insert(), [m for m in accidents if m["id"] in new_ids]) db.session.commit() return len(new_ids) def update_db(collection): """ :return: length of DB entries after execution """ app = init_flask() db = SQLAlchemy(app) united = db.session.query(AccidentMarker).filter(AccidentMarker.provider_code == 2) for accident in united: if not accident.weather: accident.weather = process_weather_data(collection, accident.latitude, accident.longitude) db.session.commit() logging.info("\tFinished commiting the changes") def main(light=True, username='', password='', lastmail=False): """ Calls importmail.py prior to importing to DB """ collection = retrieve_ims_xml() if not light: logging.info("Importing data from mail...") importmail.main(username, password, lastmail) united_path = "static/data/united/" total = 0 logging.info("Loading United accidents...") for united_file in os.listdir(united_path): if united_file.endswith(".csv"): total += import_to_db(collection, united_path + united_file) logging.info("\tImported {0} items".format(total)) update_db(collection)
40.891365
136
0.596322
5deb5f7aaf6a1389fadf9c9089ff41e73863dbba
952
py
Python
libact/query_strategies/tests/test_variance_reduction.py
joequant/libact
4fbf4d59fd0d4e23858b264de2f35f674c50445b
[ "BSD-2-Clause" ]
1
2019-05-09T13:00:45.000Z
2019-05-09T13:00:45.000Z
libact/query_strategies/tests/test_variance_reduction.py
DunZhang/libact
e37e9ed6c36febe701d84b2d495c958ab02f0bc8
[ "BSD-2-Clause" ]
null
null
null
libact/query_strategies/tests/test_variance_reduction.py
DunZhang/libact
e37e9ed6c36febe701d84b2d495c958ab02f0bc8
[ "BSD-2-Clause" ]
1
2021-01-18T20:07:57.000Z
2021-01-18T20:07:57.000Z
import unittest from numpy.testing import assert_array_equal import numpy as np from libact.base.dataset import Dataset from libact.models import LogisticRegression from libact.query_strategies import VarianceReduction from .utils import run_qs if __name__ == '__main__': unittest.main()
31.733333
77
0.615546
5dec35ee70a7a827dfe8596bcb69fa8833b6491d
15,992
py
Python
hysds/log_utils.py
fgreg/hysds
74a1019665b02f0f475cc4e7fc0a993dd71d7a53
[ "Apache-2.0" ]
null
null
null
hysds/log_utils.py
fgreg/hysds
74a1019665b02f0f475cc4e7fc0a993dd71d7a53
[ "Apache-2.0" ]
null
null
null
hysds/log_utils.py
fgreg/hysds
74a1019665b02f0f475cc4e7fc0a993dd71d7a53
[ "Apache-2.0" ]
null
null
null
from __future__ import unicode_literals from __future__ import print_function from __future__ import division from __future__ import absolute_import from builtins import open from builtins import str from future import standard_library standard_library.install_aliases() import os import re import json import copy import socket import msgpack import traceback import types import backoff from datetime import datetime from uuid import uuid4 from redis import BlockingConnectionPool, StrictRedis, RedisError from celery.utils.log import get_task_logger import hysds from hysds.celery import app from prov_es.model import get_uuid, ProvEsDocument # logger logger = get_task_logger(__name__) # redis connection pools JOB_STATUS_POOL = None JOB_INFO_POOL = None WORKER_STATUS_POOL = None EVENT_STATUS_POOL = None # job status key template JOB_STATUS_KEY_TMPL = "hysds-job-status-%s" # worker status key template WORKER_STATUS_KEY_TMPL = "hysds-worker-status-%s" # task worker key template TASK_WORKER_KEY_TMPL = "hysds-task-worker-%s" def backoff_max_value(): """Return max value for backoff.""" return app.conf.BACKOFF_MAX_VALUE def backoff_max_tries(): """Return max tries for backoff.""" return app.conf.BACKOFF_MAX_TRIES def hard_time_limit_gap(): """Return minimum gap time after soft time limit.""" return app.conf.HARD_TIME_LIMIT_GAP def ensure_hard_time_limit_gap(soft_time_limit, time_limit): """Ensure hard time limit gap.""" gap = hard_time_limit_gap() if soft_time_limit is not None and (time_limit is None or time_limit <= soft_time_limit+gap): time_limit = soft_time_limit + gap return soft_time_limit, time_limit def set_redis_job_status_pool(): """Set redis connection pool for job status.""" global JOB_STATUS_POOL if JOB_STATUS_POOL is None: JOB_STATUS_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL) def set_redis_job_info_pool(): """Set redis connection pool for job info metrics.""" global JOB_INFO_POOL if JOB_INFO_POOL is None: JOB_INFO_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_INFO_URL) def set_redis_worker_status_pool(): """Set redis connection pool for worker status.""" global WORKER_STATUS_POOL if WORKER_STATUS_POOL is None: WORKER_STATUS_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL) def set_redis_event_status_pool(): """Set redis connection pool for event status.""" global EVENT_STATUS_POOL if EVENT_STATUS_POOL is None: EVENT_STATUS_POOL = BlockingConnectionPool.from_url( app.conf.REDIS_JOB_STATUS_URL) def log_prov_es(job, prov_es_info, prov_es_file): """Log PROV-ES document. Create temp PROV-ES document to populate attributes that only the worker has access to (e.g. PID).""" # create PROV-ES doc to generate attributes that only verdi know ps_id = "hysds:%s" % get_uuid(job['job_id']) bundle_id = "hysds:%s" % get_uuid('bundle-%s' % job['job_id']) doc = ProvEsDocument() # get bundle #bndl = doc.bundle(bundle_id) bndl = None # create sofware agent sa_label = "hysds:pge_wrapper/%s/%d/%s" % (job['job_info']['execute_node'], job['job_info']['pid'], datetime.utcnow().isoformat()) sa_id = "hysds:%s" % get_uuid(sa_label) doc.softwareAgent(sa_id, str(job['job_info']['pid']), job['job_info']['execute_node'], role=job.get('username', None), label=sa_label, bundle=bndl) # create processStep doc.processStep(ps_id, job['job_info']['cmd_start'], job['job_info']['cmd_end'], [], sa_id, None, [], [], bundle=bndl, prov_type="hysds:%s" % job['type']) # get json pd = json.loads(doc.serialize()) # update software agent and process step if 'bundle' in prov_es_info: if len(prov_es_info['bundle']) == 1: bundle_id_orig = list(prov_es_info['bundle'].keys())[0] # update software agent prov_es_info['bundle'][bundle_id_orig].setdefault( 'agent', {}).update(pd['bundle'][bundle_id]['agent']) # update wasAssociatedWith prov_es_info['bundle'][bundle_id_orig].setdefault( 'wasAssociatedWith', {}).update(pd['bundle'][bundle_id]['wasAssociatedWith']) # update activity if 'activity' in prov_es_info['bundle'][bundle_id_orig]: if len(prov_es_info['bundle'][bundle_id_orig]['activity']) == 1: ps_id_orig = list( prov_es_info['bundle'][bundle_id_orig]['activity'].keys())[0] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:startTime'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:startTime'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:endTime'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:endTime'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_id'] = job['job_id'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_type'] = job['type'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:job_url'] = job['job_info']['job_url'] prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]['hysds:mozart_url'] = app.conf.MOZART_URL if 'prov:type' not in prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig]: prov_es_info['bundle'][bundle_id_orig]['activity'][ps_id_orig][ 'prov:type'] = pd['bundle'][bundle_id]['activity'][ps_id]['prov:type'] # update wasAssociatedWith activity ids for waw_id in prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith']: if prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] == ps_id: prov_es_info['bundle'][bundle_id_orig]['wasAssociatedWith'][waw_id]['prov:activity'] = ps_id_orig else: prov_es_info['bundle'][bundle_id_orig]['activity'].update( pd['bundle'][bundle_id]['activity']) else: prov_es_info['bundle'][bundle_id_orig]['activity'] = pd['bundle'][bundle_id]['activity'] else: # update software agent prov_es_info.setdefault('agent', {}).update(pd['agent']) # update wasAssociatedWith prov_es_info.setdefault('wasAssociatedWith', {}).update( pd['wasAssociatedWith']) # update process step if 'activity' in prov_es_info: if len(prov_es_info['activity']) == 1: ps_id_orig = list(prov_es_info['activity'].keys())[0] prov_es_info['activity'][ps_id_orig]['prov:startTime'] = pd['activity'][ps_id]['prov:startTime'] prov_es_info['activity'][ps_id_orig]['prov:endTime'] = pd['activity'][ps_id]['prov:endTime'] prov_es_info['activity'][ps_id_orig]['hysds:job_id'] = job['job_id'] prov_es_info['activity'][ps_id_orig]['hysds:job_type'] = job['type'] prov_es_info['activity'][ps_id_orig]['hysds:job_url'] = job['job_info']['job_url'] prov_es_info['activity'][ps_id_orig]['hysds:mozart_url'] = app.conf.MOZART_URL if 'prov:type' not in prov_es_info['activity'][ps_id_orig]: prov_es_info['activity'][ps_id_orig]['prov:type'] = pd['activity'][ps_id]['prov:type'] # update wasAssociatedWith activity ids for waw_id in prov_es_info['wasAssociatedWith']: if prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] == ps_id: prov_es_info['wasAssociatedWith'][waw_id]['prov:activity'] = ps_id_orig else: prov_es_info['activity'].update(pd['activity']) else: prov_es_info['activity'] = pd['activity'] # write prov with open(prov_es_file, 'w') as f: json.dump(prov_es_info, f, indent=2) def log_publish_prov_es(prov_es_info, prov_es_file, prod_path, pub_urls, prod_metrics, objectid): """Log publish step in PROV-ES document.""" # create PROV-ES doc doc = ProvEsDocument(namespaces=prov_es_info['prefix']) # get bundle #bndl = doc.bundle(bundle_id) bndl = None # add input entity execute_node = socket.getfqdn() prod_url = "file://%s%s" % (execute_node, prod_path) input_id = "hysds:%s" % get_uuid(prod_url) input_ent = doc.granule(input_id, None, [prod_url], [], None, None, None, label=os.path.basename(prod_url), bundle=bndl) # add output entity output_id = "hysds:%s" % get_uuid(pub_urls[0]) output_ent = doc.product(output_id, None, [pub_urls[0]], [], None, None, None, label=objectid, bundle=bndl) # software and algorithm algorithm = "eos:product_publishing" software_version = hysds.__version__ software_title = "%s v%s" % (hysds.__description__, software_version) software = "eos:HySDS-%s" % software_version software_location = hysds.__url__ doc.software(software, [algorithm], software_version, label=software_title, location=software_location, bundle=bndl) # create sofware agent pid = os.getpid() sa_label = "hysds:publish_dataset/%s/%d/%s" % (execute_node, pid, prod_metrics['time_start']) sa_id = "hysds:%s" % get_uuid(sa_label) doc.softwareAgent(sa_id, str(pid), execute_node, role="invoked", label=sa_label, bundle=bndl) # create processStep job_id = "publish_dataset-%s" % os.path.basename(prod_path) doc.processStep("hysds:%s" % get_uuid(job_id), prod_metrics['time_start'], prod_metrics['time_end'], [software], sa_id, None, [input_id], [output_id], label=job_id, bundle=bndl, prov_type="hysds:publish_dataset") # get json pd = json.loads(doc.serialize()) # update input entity orig_ent = prov_es_info.get('entity', {}).get(input_id, {}) pd['entity'][input_id].update(orig_ent) # update output entity for attr in orig_ent: if attr in ('prov:location', 'prov:label', 'prov:type'): continue pd['entity'][output_id][attr] = orig_ent[attr] # write prov with open(prov_es_file, 'w') as f: json.dump(pd, f, indent=2)
36.763218
128
0.619497
5deeffa5857206493c1d342dae064f6fd87a3184
8,920
py
Python
openstack_dashboard/api/rest/swift.py
CplusShen/aurora-horizon
8df16b3b87097d5a19bae3752d4b341ac64bda75
[ "Apache-2.0" ]
null
null
null
openstack_dashboard/api/rest/swift.py
CplusShen/aurora-horizon
8df16b3b87097d5a19bae3752d4b341ac64bda75
[ "Apache-2.0" ]
12
2022-03-22T07:28:29.000Z
2022-03-22T07:29:55.000Z
openstack_dashboard/api/rest/swift.py
CplusShen/aurora-horizon
8df16b3b87097d5a19bae3752d4b341ac64bda75
[ "Apache-2.0" ]
null
null
null
# Copyright 2015, Rackspace, US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """API for the swift service. """ import os from django import forms from django.http import StreamingHttpResponse from django.utils.http import urlunquote from django.views.decorators.csrf import csrf_exempt from django.views import generic import six from horizon import exceptions from openstack_dashboard import api from openstack_dashboard.api.rest import urls from openstack_dashboard.api.rest import utils as rest_utils from openstack_dashboard.api import swift class UploadObjectForm(forms.Form): file = forms.FileField(required=False)
31.971326
79
0.610762
5def303cbd1f1433f2580e86e412f8af092aba1f
5,621
py
Python
datagen.py
kuangliu/pytorch-ssd
02ed1cbe6962e791895ab1c455dc5ddfb87291b9
[ "MIT" ]
124
2017-02-16T01:53:14.000Z
2022-02-22T12:48:13.000Z
datagen.py
droogg/pytorch-ssd
02ed1cbe6962e791895ab1c455dc5ddfb87291b9
[ "MIT" ]
10
2017-07-04T01:38:56.000Z
2021-08-03T09:34:34.000Z
datagen.py
droogg/pytorch-ssd
02ed1cbe6962e791895ab1c455dc5ddfb87291b9
[ "MIT" ]
43
2017-07-31T10:46:23.000Z
2021-02-16T14:12:42.000Z
'''Load image/class/box from a annotation file. The annotation file is organized as: image_name #obj xmin ymin xmax ymax class_index .. ''' from __future__ import print_function import os import sys import os.path import random import numpy as np import torch import torch.utils.data as data import torchvision.transforms as transforms from encoder import DataEncoder from PIL import Image, ImageOps
31.9375
81
0.534424
5defd443987097ce80f96a0e6f43dc63945abf24
13,258
py
Python
lingvo/core/builder.py
allenwang28/lingvo
26d3d6672d3f46d8f281c2aa9f57166ef6296738
[ "Apache-2.0" ]
2,611
2018-10-16T20:14:10.000Z
2022-03-31T14:48:41.000Z
lingvo/core/builder.py
allenwang28/lingvo
26d3d6672d3f46d8f281c2aa9f57166ef6296738
[ "Apache-2.0" ]
249
2018-10-27T06:02:29.000Z
2022-03-30T18:00:39.000Z
lingvo/core/builder.py
allenwang28/lingvo
26d3d6672d3f46d8f281c2aa9f57166ef6296738
[ "Apache-2.0" ]
436
2018-10-25T05:31:45.000Z
2022-03-31T07:26:03.000Z
# Lint as: python3 # Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A library to build composite layers. WARNING: The builder pattern is still experimental and we need to gain experience on when to use and when not to use. Please discuss w/ teammates before using it to build complicated layers. """ import functools from lingvo.core import activations from lingvo.core import builder_layers from lingvo.core import hyperparams from lingvo.core import layers from lingvo.core import py_utils from lingvo.core import tshape ###################################################################### # Layers to compose multiple layers. # # Sub-classes are discouraged to override these composition method. ###################################################################### def _Rep(self, name, repeat, *subs): r"""Connects sub-layers sequentially and repeat multiple times. E.g., _Rep('foo', 2, sa, sb, sc) constructs a layer with 6 layers sequentially connected: [sa1, sb1, sc1, sa2, sb2, sc2]. sa1 and sa2 have the same structure as the given sa, but sa1 and sa2 do not share the same weight. Args: name: The layer name. repeat: Repeat \*subs this many times in the compose layer. *subs: A list of sub-layers. Returns: The param for the composed layer. """ iterations = [] for i in range(repeat): iterations.append(self._Seq('iter_%03d' % i, *[p.Copy() for p in subs])) return self._Seq(name, *iterations) def _Seq(self, name, *subs): """Connects sub-layers sequentially.""" return builder_layers.SequentialLayer.Params().Set( name=name, sub=list(subs)) def _Graph(self, name, input_endpoints, output_endpoints, *signature_sub_param_list): """Connects sub-layers into a data flow graph.""" return builder_layers.GraphLayer.Params().Set( name=name, input_endpoints=input_endpoints, output_endpoints=output_endpoints, sub=list(signature_sub_param_list)) def _Id(self, name): """Identity. (t_1, ..., t_n) -> (t1, ..., t_n).""" return self._Seq(name) def _Arg(self, name, index): """Picks index-th element. (t_1, ..., t_n) -> (t_{index},).""" return builder_layers.ArgIndexLayer.Params().Set(name=name, idx=[index]) def _Par(self, name, *subs): """y = (f1, f2, ..., fn)(x). We feed the input tuple to all sub-layers and concatenates their output tuples into one tuple. Args: name: The layer name. *subs: A list of sub-layers. Returns: The param for the composed layer. """ return builder_layers.ParallelLayer.Params().Set( name=name, sub=list(subs), merge=ConcatTuples, merge_meta=ConcatMeta) def _Fn(self, name, fn, fn_out=None, fn_flops=None): """y = fn(x). Applies a fn: tuple(Tensor) -> a single Tensor or tuple(Tensor) to the input tuple. Typically, fn is a very simple python function. This layer can be used for prototyping but we advice to implement the logic as a sub-class of BaseLayer for all established layers as FnLayer can't be serialized. Args: name: The layer name. fn: A lambda tuple(Tensor) -> tuple(Tensor). fn_out: A lambda tuple(tshape.Shape) -> output tuple(tshape.Shape) fn_flops: A lambda tuple(tshape.Shape) -> estimated flops of fn. If None, we assume flops == sum of elements in the inputs. Returns: The param for the composed layer. """ def FnMeta(*shapes): """A lambda tuple(tshape.Shape) -> NestedMap{flops, out_shapes}.""" if fn_out: out_shapes = fn_out(*shapes) if isinstance(out_shapes, tshape.Shape): out_shapes = (out_shapes,) else: out_shapes = shapes if fn_flops: flops = fn_flops(*shapes) else: flops = sum([s.size for s in shapes]) return py_utils.NestedMap(flops=flops, out_shapes=out_shapes) return builder_layers.FnLayer.Params().Set(name=name, fn=fn, fn_meta=FnMeta) def _Save(self, name): """Returns a layer from which the activation and gradient can be accessed.""" return layers.FetchLayer.Params().Set(name=name) def _AddFetches(self, name, body, fetches): """Fetches saved activations in the body sub-layer. E.g.: _AddFetches('foo', _Seq( 'stack', _Layer('layer1', ...), _Save('layer1_out', ...), _Layer('layer2', ...), _Save('layer2_out', ...), _Output('output', ...)), ['layer1_out', 'layer2_out']) The layer returns the stack's final output together with intermediate activations from layer1_out and layer2_out. Args: name: This layer's name. body: The sub-layer. fetches: A list of fetch names inside the sub-layer body. Returns: A layer whose outputs correspond to the activations of fetch points in the sub-layer body. [input1, input2, ..., inputN, fetch1, ..., fetchM]. """ return builder_layers.BranchLayer.Params().Set( name=name, body=body, fetches=fetches) def _Rematerialize(self, name, body): """Forces rematerialization on FProp of the body layer.""" return builder_layers.RematerializationLayer.Params().Set( name=name, body=body) def _BatchParallel(self, name, sub): """Splits the batch and compute the forward pass on multiple devices. Args: name: This layer's name. sub: The sub-layer. Returns: A BatchParallel layer which splits the batch and computes the forward pass on multiple devices. """ return builder_layers.BatchParallelLayer.Params().Set(name=name, sub=sub) def _PrintShape(self, name): """Print FProp input shape information.""" return builder_layers.PrintShapeLayer.Params().Set(name=name) def _CreateNestedMap(self, name, keys): """Returns a NestedMap with keys from fprop args.""" return builder_layers.CreateNestedMapLayer.Params().Set( name=name, keys=keys) ########################################################################### # Basic nn layers. # # The following method returns a layer param, whose FProp takes a single # Tensor and returns a single Tensor. # # These methods are designed to have minimal knobs. Sub-classes which needs to # be flexible can override these methods with different options. E.g., a # sub-class builder can override _BN() to tune the decay option. ########################################################################### def _BN(self, name, dims): """Batch norm.""" return layers.BatchNormLayer.Params().Set(name=name, dim=dims, decay=0.99) def _LN(self, name, dims, use_fused_layernorm=False): """Layer norm.""" return layers.LayerNorm.Params().Set( name=name, input_dim=dims, use_fused_layernorm=use_fused_layernorm, fprop_dtype=self.params.fprop_dtype) def _Dropout(self, name, keep_prob, noise_shape_broadcast_dims=None): """Returns a DropoutLayer Params.""" if self.params.deterministic_dropout: return layers.DeterministicDropoutLayer.Params().Set( name=name, keep_prob=keep_prob, noise_shape_broadcast_dims=noise_shape_broadcast_dims) return layers.DropoutLayer.Params().Set( name=name, keep_prob=keep_prob, noise_shape_broadcast_dims=noise_shape_broadcast_dims, fprop_dtype=self.params.fprop_dtype) def _Linear(self, name, idims, odims, device_mesh=None, weight_split_dims_mapping=None, qdomain=None): """Linear layer. y = matmul([..., idims], [idims, odims]).""" p = builder_layers.LinearLayer.Params() p.name = name p.input_dims = idims p.output_dims = odims p.fprop_dtype = self.params.fprop_dtype p.device_mesh = device_mesh p.weight_split_dims_mapping = weight_split_dims_mapping p.qdomain.default = qdomain return p def _Bias(self, name, dims, device_mesh=None, weight_split_dims_mapping=None): """Bias layer. The bias is added to the last dimension of the input.""" return builder_layers.BiasLayer.Params().Set( name=name, dims=dims, fprop_dtype=self.params.fprop_dtype, device_mesh=device_mesh, weight_split_dims_mapping=weight_split_dims_mapping) def _Activation(self, name, fn='RELU'): """Activation layer.""" return activations.ActivationLayer.Params().Set(activation=fn, name=name) def _FC(self, name, idims, odims, act='RELU'): """Feed-forward fully connected. y = act(matmul(x, w) + b).""" # pyformat: disable return self._Seq( name, self._Linear('linear', idims, odims), self._Bias('bias', odims), self._Activation('act', fn=act)) def _MLP(self, name, dims, act='RELU'): """Multiple layers of feed-forward fully connected. Args: name: The layer name. dims: A list of int. i-th layer has dims[i] as its input dimension, and dims[i+1] as its output dimensions. act: The activation function. Returns: The param for the composed layer. """ l = [] for n, (i, o) in enumerate(zip(dims[:-1], dims[1:])): l += [self._FC('l%03d' % n, i, o, act)] return self._Seq(name, *l) def _Conv2D(self, name, filter_shape, filter_stride): """Conv2D layer.""" return layers.Conv2DLayerNoPadding.Params().Set( name=name, filter_shape=filter_shape, filter_stride=filter_stride, fprop_dtype=self.params.fprop_dtype) def _Reshape(self, name, shape): """Reshape inputs to the shape provided.""" return builder_layers.ReshapeLayer.Params().Set(name=name, shape=shape)
36.827778
81
0.648439
5defe80f544d4d152b4eab27921e74e04e7e4df0
4,589
py
Python
instmakelib/instmake_toolnames.py
gilramir/instmake
7b083a5061be43e9b92bdcf0f3badda7c4107eef
[ "BSD-3-Clause" ]
null
null
null
instmakelib/instmake_toolnames.py
gilramir/instmake
7b083a5061be43e9b92bdcf0f3badda7c4107eef
[ "BSD-3-Clause" ]
null
null
null
instmakelib/instmake_toolnames.py
gilramir/instmake
7b083a5061be43e9b92bdcf0f3badda7c4107eef
[ "BSD-3-Clause" ]
null
null
null
# Copyright (c) 2010 by Cisco Systems, Inc. """ Manage the tool plugins and use them appropriately. """ import os TOOLNAME_PLUGIN_PREFIX = "toolname"
35.573643
78
0.58771
5df1af1171ca12ddbf5a2ce6aeb42a6d24730f8d
12,991
py
Python
raiden/tests/integration/long_running/test_stress.py
tirkarthi/raiden
dbd03ddda039332b54ec0c02d81cbe1100bc8028
[ "MIT" ]
2,101
2016-06-01T11:31:49.000Z
2022-03-27T20:13:19.000Z
raiden/tests/integration/long_running/test_stress.py
tirkarthi/raiden
dbd03ddda039332b54ec0c02d81cbe1100bc8028
[ "MIT" ]
5,291
2016-06-01T18:14:04.000Z
2022-03-31T11:19:09.000Z
raiden/tests/integration/long_running/test_stress.py
tirkarthi/raiden
dbd03ddda039332b54ec0c02d81cbe1100bc8028
[ "MIT" ]
484
2016-06-01T18:21:06.000Z
2022-03-22T10:29:45.000Z
import time from http import HTTPStatus from itertools import count from typing import Sequence import gevent import grequests import pytest import structlog from eth_utils import to_canonical_address from flask import url_for from raiden.api.python import RaidenAPI from raiden.api.rest import APIServer, RestAPI from raiden.constants import RoutingMode from raiden.message_handler import MessageHandler from raiden.network.transport import MatrixTransport from raiden.raiden_event_handler import RaidenEventHandler from raiden.raiden_service import RaidenService from raiden.settings import RestApiConfig from raiden.tests.integration.api.utils import wait_for_listening_port from raiden.tests.integration.fixtures.raiden_network import RestartNode from raiden.tests.utils.detect_failure import raise_on_failure from raiden.tests.utils.protocol import HoldRaidenEventHandler from raiden.tests.utils.transfer import ( assert_synced_channel_state, wait_assert, watch_for_unlock_failures, ) from raiden.transfer import views from raiden.ui.startup import RaidenBundle from raiden.utils.formatting import to_checksum_address from raiden.utils.typing import ( Address, BlockNumber, Host, Iterator, List, Port, TokenAddress, TokenAmount, TokenNetworkAddress, Tuple, ) log = structlog.get_logger(__name__) def iwait_and_get(items: Sequence[gevent.Greenlet]) -> None: """Iteratively wait and get on passed greenlets. This ensures exceptions in the greenlets are re-raised as soon as possible. """ for item in gevent.iwait(items): item.get() def restart_network_and_apiservers( raiden_network: List[RaidenService], restart_node: RestartNode, api_servers: List[APIServer], port_generator: Iterator[Port], ) -> Tuple[List[RaidenService], List[APIServer]]: """Stop an app and start it back""" for rest_api in api_servers: rest_api.stop() new_network = restart_network(raiden_network, restart_node) new_servers = start_apiserver_for_network(new_network, port_generator) return (new_network, new_servers) def stress_send_serial_transfers( rest_apis: List[APIServer], token_address: TokenAddress, identifier_generator: Iterator[int], deposit: TokenAmount, ) -> None: """Send `deposit` transfers of value `1` one at a time, without changing the initial capacity. """ pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]])) # deplete the channels in one direction for server_from, server_to in pairs: sequential_transfers( server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) # deplete the channels in the backwards direction for server_to, server_from in pairs: sequential_transfers( server_from=server_from, server_to=server_to, number_of_transfers=deposit * 2, token_address=token_address, identifier_generator=identifier_generator, ) # reset the balances balances by sending the "extra" deposit forward for server_from, server_to in pairs: sequential_transfers( server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) def stress_send_parallel_transfers( rest_apis: List[APIServer], token_address: TokenAddress, identifier_generator: Iterator[int], deposit: TokenAmount, ) -> None: """Send `deposit` transfers in parallel, without changing the initial capacity.""" pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]])) # deplete the channels in one direction iwait_and_get( [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) for server_from, server_to in pairs ] ) # deplete the channels in the backwards direction iwait_and_get( [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit * 2, token_address=token_address, identifier_generator=identifier_generator, ) for server_to, server_from in pairs ] ) # reset the balances balances by sending the "extra" deposit forward iwait_and_get( [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) for server_from, server_to in pairs ] ) def stress_send_and_receive_parallel_transfers( rest_apis: List[APIServer], token_address: TokenAddress, identifier_generator: Iterator[int], deposit: TokenAmount, ) -> None: """Send transfers of value one in parallel""" pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]])) forward_transfers = [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) for server_from, server_to in pairs ] backwards_transfers = [ gevent.spawn( sequential_transfers, server_from=server_from, server_to=server_to, number_of_transfers=deposit, token_address=token_address, identifier_generator=identifier_generator, ) for server_to, server_from in pairs ] iwait_and_get(forward_transfers + backwards_transfers)
31.531553
99
0.693557
5df24f88464dca8942f1f032db545a5522ed1674
8,796
py
Python
pyabsa/utils/preprocess.py
jackie930/PyABSA
3cf733f8b95610a69c985b4650309c24f42b44b5
[ "MIT" ]
null
null
null
pyabsa/utils/preprocess.py
jackie930/PyABSA
3cf733f8b95610a69c985b4650309c24f42b44b5
[ "MIT" ]
null
null
null
pyabsa/utils/preprocess.py
jackie930/PyABSA
3cf733f8b95610a69c985b4650309c24f42b44b5
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # file: preprocess.py # author: jackie # Copyright (C) 2021. All Rights Reserved. import os import pandas as pd import argparse import emoji import re from sklearn.model_selection import train_test_split parser = argparse.ArgumentParser() parser.add_argument("--inpath", type=str, required=True, default='./raw_data/data1.csv') parser.add_argument("--folder_name", type=str, required=False, default='./custom') parser.add_argument("--task", type=str, required=False, default='aptepc') args = parser.parse_args() main(args.inpath, args.folder_name, args.task)
32.820896
118
0.582651
5df2f0f840a2ef6d66c1e525c680fc2bedf30ceb
487
py
Python
apps/06_lolcat_factory/you_try/PRD/cat_service.py
dparito/10Apps-Python_w-Andy
77ca1ec280729a9002e49071e2f31cb5bc7b75cd
[ "MIT" ]
1
2019-04-29T17:43:22.000Z
2019-04-29T17:43:22.000Z
apps/06_lolcat_factory/you_try/PRD/cat_service.py
dparito/10Apps-Python_w-Andy
77ca1ec280729a9002e49071e2f31cb5bc7b75cd
[ "MIT" ]
null
null
null
apps/06_lolcat_factory/you_try/PRD/cat_service.py
dparito/10Apps-Python_w-Andy
77ca1ec280729a9002e49071e2f31cb5bc7b75cd
[ "MIT" ]
null
null
null
import os import shutil import requests
22.136364
78
0.702259
5df3d1e6a9c7a37c58251913284702c80bde4fc2
15,348
py
Python
dask/dataframe/io/hdf.py
TryTestspace/dask
86d4f7d8c6d48ec6c4b1de1b6cfd2d3f4e5a4c1b
[ "BSD-3-Clause" ]
1
2017-10-06T05:59:15.000Z
2017-10-06T05:59:15.000Z
dask/dataframe/io/hdf.py
TryTestspace/dask
86d4f7d8c6d48ec6c4b1de1b6cfd2d3f4e5a4c1b
[ "BSD-3-Clause" ]
null
null
null
dask/dataframe/io/hdf.py
TryTestspace/dask
86d4f7d8c6d48ec6c4b1de1b6cfd2d3f4e5a4c1b
[ "BSD-3-Clause" ]
1
2021-03-28T04:50:43.000Z
2021-03-28T04:50:43.000Z
from __future__ import absolute_import, division, print_function from fnmatch import fnmatch from glob import glob import os import uuid from warnings import warn import pandas as pd from toolz import merge from .io import _link from ...base import get_scheduler from ..core import DataFrame, new_dd_object from ... import config, multiprocessing from ...base import tokenize, compute_as_if_collection from ...bytes.utils import build_name_function from ...compatibility import PY3 from ...delayed import Delayed, delayed from ...utils import get_scheduler_lock def _pd_to_hdf(pd_to_hdf, lock, args, kwargs=None): """ A wrapper function around pd_to_hdf that enables locking""" if lock: lock.acquire() try: pd_to_hdf(*args, **kwargs) finally: if lock: lock.release() return None def to_hdf(df, path, key, mode='a', append=False, get=None, scheduler=None, name_function=None, compute=True, lock=None, dask_kwargs={}, **kwargs): """ Store Dask Dataframe to Hierarchical Data Format (HDF) files This is a parallel version of the Pandas function of the same name. Please see the Pandas docstring for more detailed information about shared keyword arguments. This function differs from the Pandas version by saving the many partitions of a Dask DataFrame in parallel, either to many files, or to many datasets within the same file. You may specify this parallelism with an asterix ``*`` within the filename or datapath, and an optional ``name_function``. The asterix will be replaced with an increasing sequence of integers starting from ``0`` or with the result of calling ``name_function`` on each of those integers. This function only supports the Pandas ``'table'`` format, not the more specialized ``'fixed'`` format. Parameters ---------- path: string Path to a target filename. May contain a ``*`` to denote many filenames key: string Datapath within the files. May contain a ``*`` to denote many locations name_function: function A function to convert the ``*`` in the above options to a string. Should take in a number from 0 to the number of partitions and return a string. (see examples below) compute: bool Whether or not to execute immediately. If False then this returns a ``dask.Delayed`` value. lock: Lock, optional Lock to use to prevent concurrency issues. By default a ``threading.Lock``, ``multiprocessing.Lock`` or ``SerializableLock`` will be used depending on your scheduler if a lock is required. See dask.utils.get_scheduler_lock for more information about lock selection. **other: See pandas.to_hdf for more information Examples -------- Save Data to a single file >>> df.to_hdf('output.hdf', '/data') # doctest: +SKIP Save data to multiple datapaths within the same file: >>> df.to_hdf('output.hdf', '/data-*') # doctest: +SKIP Save data to multiple files: >>> df.to_hdf('output-*.hdf', '/data') # doctest: +SKIP Save data to multiple files, using the multiprocessing scheduler: >>> df.to_hdf('output-*.hdf', '/data', scheduler='processes') # doctest: +SKIP Specify custom naming scheme. This writes files as '2000-01-01.hdf', '2000-01-02.hdf', '2000-01-03.hdf', etc.. >>> from datetime import date, timedelta >>> base = date(year=2000, month=1, day=1) >>> def name_function(i): ... ''' Convert integer 0 to n to a string ''' ... return base + timedelta(days=i) >>> df.to_hdf('*.hdf', '/data', name_function=name_function) # doctest: +SKIP Returns ------- None: if compute == True delayed value: if compute == False See Also -------- read_hdf: to_parquet: """ name = 'to-hdf-' + uuid.uuid1().hex pd_to_hdf = getattr(df._partition_type, 'to_hdf') single_file = True single_node = True # if path is string, format using i_name if isinstance(path, str): if path.count('*') + key.count('*') > 1: raise ValueError("A maximum of one asterisk is accepted in file " "path and dataset key") fmt_obj = lambda path, i_name: path.replace('*', i_name) if '*' in path: single_file = False else: if key.count('*') > 1: raise ValueError("A maximum of one asterisk is accepted in " "dataset key") fmt_obj = lambda path, _: path if '*' in key: single_node = False if 'format' in kwargs and kwargs['format'] not in ['t', 'table']: raise ValueError("Dask only support 'table' format in hdf files.") if mode not in ('a', 'w', 'r+'): raise ValueError("Mode must be one of 'a', 'w' or 'r+'") if name_function is None: name_function = build_name_function(df.npartitions - 1) # we guarantee partition order is preserved when its saved and read # so we enforce name_function to maintain the order of its input. if not (single_file and single_node): formatted_names = [name_function(i) for i in range(df.npartitions)] if formatted_names != sorted(formatted_names): warn("To preserve order between partitions name_function " "must preserve the order of its input") # If user did not specify scheduler and write is sequential default to the # sequential scheduler. otherwise let the _get method choose the scheduler if (get is None and not config.get('get', None) and scheduler is None and not config.get('scheduler', None) and single_node and single_file): scheduler = 'single-threaded' # handle lock default based on whether we're writing to a single entity _actual_get = get_scheduler(get=get, collections=[df], scheduler=scheduler) if lock is None: if not single_node: lock = True elif not single_file and _actual_get is not multiprocessing.get: # if we're writing to multiple files with the multiprocessing # scheduler we don't need to lock lock = True else: lock = False if lock: lock = get_scheduler_lock(get, df, scheduler=scheduler) kwargs.update({'format': 'table', 'mode': mode, 'append': append}) dsk = dict() i_name = name_function(0) dsk[(name, 0)] = (_pd_to_hdf, pd_to_hdf, lock, [(df._name, 0), fmt_obj(path, i_name), key.replace('*', i_name)], kwargs) kwargs2 = kwargs.copy() if single_file: kwargs2['mode'] = 'a' if single_node: kwargs2['append'] = True filenames = [] for i in range(0,df.npartitions): i_name = name_function(i) filenames.append(fmt_obj(path, i_name)) for i in range(1, df.npartitions): i_name = name_function(i) task = (_pd_to_hdf, pd_to_hdf, lock, [(df._name, i), fmt_obj(path, i_name), key.replace('*', i_name)], kwargs2) if single_file: link_dep = i - 1 if single_node else 0 task = (_link, (name, link_dep), task) dsk[(name, i)] = task dsk = merge(df.dask, dsk) if single_file and single_node: keys = [(name, df.npartitions - 1)] else: keys = [(name, i) for i in range(df.npartitions)] if compute: compute_as_if_collection(DataFrame, dsk, keys, get=get, scheduler=scheduler, **dask_kwargs) return filenames else: return delayed([Delayed(k, dsk) for k in keys]) dont_use_fixed_error_message = """ This HDFStore is not partitionable and can only be use monolithically with pandas. In the future when creating HDFStores use the ``format='table'`` option to ensure that your dataset can be parallelized""" read_hdf_error_msg = """ The start and stop keywords are not supported when reading from more than one file/dataset. The combination is ambiguous because it could be interpreted as the starting and stopping index per file, or starting and stopping index of the global dataset.""" def _read_single_hdf(path, key, start=0, stop=None, columns=None, chunksize=int(1e6), sorted_index=False, lock=None, mode='a'): """ Read a single hdf file into a dask.dataframe. Used for each file in read_hdf. """ def get_keys_stops_divisions(path, key, stop, sorted_index, chunksize): """ Get the "keys" or group identifiers which match the given key, which can contain wildcards. This uses the hdf file identified by the given path. Also get the index of the last row of data for each matched key. """ with pd.HDFStore(path, mode=mode) as hdf: keys = [k for k in hdf.keys() if fnmatch(k, key)] stops = [] divisions = [] for k in keys: storer = hdf.get_storer(k) if storer.format_type != 'table': raise TypeError(dont_use_fixed_error_message) if stop is None: stops.append(storer.nrows) elif stop > storer.nrows: raise ValueError("Stop keyword exceeds dataset number " "of rows ({})".format(storer.nrows)) else: stops.append(stop) if sorted_index: division = [storer.read_column('index', start=start, stop=start + 1)[0] for start in range(0, storer.nrows, chunksize)] division_end = storer.read_column('index', start=storer.nrows - 1, stop=storer.nrows)[0] division.append(division_end) divisions.append(division) else: divisions.append(None) return keys, stops, divisions def one_path_one_key(path, key, start, stop, columns, chunksize, division, lock): """ Get the data frame corresponding to one path and one key (which should not contain any wildcards). """ empty = pd.read_hdf(path, key, mode=mode, stop=0) if columns is not None: empty = empty[columns] token = tokenize((path, os.path.getmtime(path), key, start, stop, empty, chunksize, division)) name = 'read-hdf-' + token if empty.ndim == 1: base = {'name': empty.name, 'mode': mode} else: base = {'columns': empty.columns, 'mode': mode} if start >= stop: raise ValueError("Start row number ({}) is above or equal to stop " "row number ({})".format(start, stop)) dsk = dict(((name, i), (_pd_read_hdf, path, key, lock, update(s))) for i, s in enumerate(range(start, stop, chunksize))) if division: divisions = division else: divisions = [None] * (len(dsk) + 1) return new_dd_object(dsk, name, empty, divisions) keys, stops, divisions = get_keys_stops_divisions(path, key, stop, sorted_index, chunksize) if (start != 0 or stop is not None) and len(keys) > 1: raise NotImplementedError(read_hdf_error_msg) from ..multi import concat return concat([one_path_one_key(path, k, start, s, columns, chunksize, d, lock) for k, s, d in zip(keys, stops, divisions)]) def _pd_read_hdf(path, key, lock, kwargs): """ Read from hdf5 file with a lock """ if lock: lock.acquire() try: result = pd.read_hdf(path, key, **kwargs) finally: if lock: lock.release() return result def read_hdf(pattern, key, start=0, stop=None, columns=None, chunksize=1000000, sorted_index=False, lock=True, mode='a'): """ Read HDF files into a Dask DataFrame Read hdf files into a dask dataframe. This function is like ``pandas.read_hdf``, except it can read from a single large file, or from multiple files, or from multiple keys from the same file. Parameters ---------- pattern : string, list File pattern (string), buffer to read from, or list of file paths. Can contain wildcards. key : group identifier in the store. Can contain wildcards start : optional, integer (defaults to 0), row number to start at stop : optional, integer (defaults to None, the last row), row number to stop at columns : list of columns, optional A list of columns that if not None, will limit the return columns (default is None) chunksize : positive integer, optional Maximal number of rows per partition (default is 1000000). sorted_index : boolean, optional Option to specify whether or not the input hdf files have a sorted index (default is False). lock : boolean, optional Option to use a lock to prevent concurrency issues (default is True). mode : {'a', 'r', 'r+'}, default 'a'. Mode to use when opening file(s). 'r' Read-only; no data can be modified. 'a' Append; an existing file is opened for reading and writing, and if the file does not exist it is created. 'r+' It is similar to 'a', but the file must already exist. Returns ------- dask.DataFrame Examples -------- Load single file >>> dd.read_hdf('myfile.1.hdf5', '/x') # doctest: +SKIP Load multiple files >>> dd.read_hdf('myfile.*.hdf5', '/x') # doctest: +SKIP >>> dd.read_hdf(['myfile.1.hdf5', 'myfile.2.hdf5'], '/x') # doctest: +SKIP Load multiple datasets >>> dd.read_hdf('myfile.1.hdf5', '/*') # doctest: +SKIP """ if lock is True: lock = get_scheduler_lock() key = key if key.startswith('/') else '/' + key if isinstance(pattern, str): paths = sorted(glob(pattern)) else: paths = pattern if (start != 0 or stop is not None) and len(paths) > 1: raise NotImplementedError(read_hdf_error_msg) if chunksize <= 0: raise ValueError("Chunksize must be a positive integer") if (start != 0 or stop is not None) and sorted_index: raise ValueError("When assuming pre-partitioned data, data must be " "read in its entirety using the same chunksizes") from ..multi import concat return concat([_read_single_hdf(path, key, start=start, stop=stop, columns=columns, chunksize=chunksize, sorted_index=sorted_index, lock=lock, mode=mode) for path in paths]) if PY3: from ..core import _Frame _Frame.to_hdf.__doc__ = to_hdf.__doc__
36.028169
95
0.601968
5df431be7adb55ae6ec852df04ddc2566bd34906
2,411
py
Python
src/charma/media_info/manager.py
mononobi/charma-server
ed90f5ec0b5ff3996232d5fe49a4f77f96d82ced
[ "BSD-3-Clause" ]
1
2020-01-16T23:36:10.000Z
2020-01-16T23:36:10.000Z
src/charma/media_info/manager.py
mononobi/imovie-server
ed90f5ec0b5ff3996232d5fe49a4f77f96d82ced
[ "BSD-3-Clause" ]
24
2020-06-08T18:27:04.000Z
2021-06-06T12:01:39.000Z
src/charma/media_info/manager.py
mononobi/charma-server
ed90f5ec0b5ff3996232d5fe49a4f77f96d82ced
[ "BSD-3-Clause" ]
1
2020-12-20T05:29:04.000Z
2020-12-20T05:29:04.000Z
# -*- coding: utf-8 -*- """ media info manager module. """ from pyrin.core.mixin import HookMixin from pyrin.core.structs import Manager import pyrin.utils.path as path_utils from charma.media_info import MediaInfoPackage from charma.media_info.interface import AbstractMediaInfoProvider from charma.media_info.exceptions import InvalidMediaInfoProviderTypeError
28.702381
90
0.633762
5df7763c501c1594868f6878a3ef39da6fe70cae
842
py
Python
tests/test_parsers.py
FlorisHoogenboom/BoxRec
c9cc5d149318f916facdf57d7dbe94e797d81582
[ "MIT" ]
5
2018-04-20T11:47:43.000Z
2021-05-04T18:54:16.000Z
tests/test_parsers.py
FlorisHoogenboom/BoxRec
c9cc5d149318f916facdf57d7dbe94e797d81582
[ "MIT" ]
1
2018-03-21T08:44:25.000Z
2018-03-22T12:08:17.000Z
tests/test_parsers.py
FlorisHoogenboom/BoxRec
c9cc5d149318f916facdf57d7dbe94e797d81582
[ "MIT" ]
6
2018-03-16T14:05:55.000Z
2018-03-16T14:08:41.000Z
import unittest from boxrec.parsers import FightParser
25.515152
77
0.63658
5df786c7bbc659882d2ccb4bb744e69c8b4ccbd8
4,868
py
Python
hyperdock/common/workqueue.py
ErikGartner/hyperdock
19510b4bf1e123576d7be067555d959cb8a7cf45
[ "Apache-2.0" ]
8
2018-05-07T19:12:35.000Z
2021-12-21T01:30:48.000Z
hyperdock/common/workqueue.py
ErikGartner/hyperdock
19510b4bf1e123576d7be067555d959cb8a7cf45
[ "Apache-2.0" ]
92
2018-05-15T14:57:48.000Z
2019-12-27T10:48:25.000Z
hyperdock/common/workqueue.py
ErikGartner/hyperdock
19510b4bf1e123576d7be067555d959cb8a7cf45
[ "Apache-2.0" ]
2
2019-06-01T22:42:17.000Z
2019-12-25T12:48:36.000Z
from datetime import datetime, timedelta from bson.objectid import ObjectId WORK_TIMEOUT = 600
31.205128
87
0.474528
5df79191a02e9cdc36eab83fa9b24e2f2d9fe213
7,695
py
Python
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/apache_libcloud-0.15.1-py2.7.egg/libcloud/test/test_connection.py
poojavade/Genomics_Docker
829b5094bba18bbe03ae97daf925fee40a8476e8
[ "Apache-2.0" ]
1
2019-07-29T02:53:51.000Z
2019-07-29T02:53:51.000Z
libcloud/test/test_connection.py
elastacloud/libcloud
f3792b2dca835c548bdbce0da2eb71bfc9463b72
[ "Apache-2.0" ]
1
2021-09-11T14:30:32.000Z
2021-09-11T14:30:32.000Z
libcloud/test/test_connection.py
elastacloud/libcloud
f3792b2dca835c548bdbce0da2eb71bfc9463b72
[ "Apache-2.0" ]
2
2016-12-19T02:27:46.000Z
2019-07-29T02:53:54.000Z
# -*- coding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import ssl from mock import Mock, call from libcloud.test import unittest from libcloud.common.base import Connection from libcloud.common.base import LoggingConnection if __name__ == '__main__': sys.exit(unittest.main())
36.995192
94
0.624172
5df7daeb42f8803f9c7b7af1f59daf2cde2ea6c7
3,605
py
Python
igibson/utils/data_utils/ext_object/scripts/step_1_visual_mesh.py
mamadbiabon/iGibson
d416a470240eb7ad86e04fee475ae4bd67263a7c
[ "MIT" ]
360
2020-04-02T11:12:09.000Z
2022-03-24T21:46:58.000Z
igibson/utils/data_utils/ext_object/scripts/step_1_visual_mesh.py
mamadbiabon/iGibson
d416a470240eb7ad86e04fee475ae4bd67263a7c
[ "MIT" ]
169
2020-04-07T21:01:05.000Z
2022-03-31T10:07:39.000Z
igibson/utils/data_utils/ext_object/scripts/step_1_visual_mesh.py
mamadbiabon/iGibson
d416a470240eb7ad86e04fee475ae4bd67263a7c
[ "MIT" ]
94
2020-04-09T23:22:17.000Z
2022-03-17T21:49:03.000Z
import os import sys import bpy script_dir = os.path.dirname(os.path.abspath(__file__)) utils_dir = os.path.join(script_dir, "../../blender_utils") sys.path.append(utils_dir) from utils import bake_model, clean_unused, export_ig_object, import_obj_folder ############################################# # Parse command line arguments ############################################# should_bake = "--bake" in sys.argv axis = ["X", "Y", "Z", "-X", "-Y", "-Z"] import_axis_up = get_arg(sys.argv, "--up", default="Z") if import_axis_up not in axis: raise ValueError("Axis up not supported: {} (should be among X,Y,Z,-X,-Y,-Z)".format(import_axis_up)) import_axis_forward = get_arg(sys.argv, "--forward", default="X") if import_axis_forward not in axis: raise ValueError("Axis forward not supported: {} (should be among X,Y,Z,-X,-Y,-Z)".format(import_axis_forward)) source_dir = get_arg(sys.argv, "--source_dir") if source_dir is None: raise ValueError("Source directory not specified.") dest_dir = get_arg(sys.argv, "--dest_dir") if dest_dir is None: raise ValueError("Destination directory not specified.") os.makedirs(dest_dir, exist_ok=True) model_id = os.path.basename(source_dir) ############################################# # Importing obj files from source dir ############################################# for on in bpy.context.scene.objects.keys(): obj = bpy.context.scene.objects[on] bpy.data.objects.remove(obj) clean_unused() import_obj_folder(model_id, source_dir, up=import_axis_up, forward=import_axis_forward) ############################################# # Optional UV Unwrapping # This only needed if baking will be performed ############################################# if should_bake: uv_unwrapped = True for o in bpy.context.scene.objects: if not o.data.uv_layers: uv_unwrapped = False if not uv_unwrapped: bpy.ops.object.mode_set(mode="OBJECT") vl = bpy.context.view_layer bpy.ops.object.select_all(action="DESELECT") for on in bpy.context.scene.objects.keys(): obj = bpy.context.scene.objects[on] new_uv = bpy.context.scene.objects[on].data.uv_layers.new(name="obj_uv") vl.objects.active = obj obj.select_set(True) bpy.ops.object.editmode_toggle() bpy.ops.mesh.select_all(action="SELECT") bpy.ops.uv.smart_project(angle_limit=66, island_margin=0.02) bpy.context.tool_settings.mesh_select_mode = (False, False, True) bpy.ops.object.mode_set(mode="OBJECT") ############################################# # Export models ############################################# export_ig_object(dest_dir, save_material=not should_bake) ############################################# # Optional Texture Baking ############################################# if should_bake: mat_dir = os.path.join(dest_dir, "material") os.makedirs(mat_dir, exist_ok=True) # bpy.ops.wm.open_mainfile(filepath=blend_path) # import_ig_object(model_root, import_mat=True) for obj in bpy.context.scene.objects: obj.select_set(True) bpy.context.view_layer.objects.active = obj bpy.ops.object.select_all(action="SELECT") bpy.ops.object.join() channels = { "DIFFUSE": (2048, 32), "ROUGHNESS": (1024, 16), "METALLIC": (1024, 16), "NORMAL": (1024, 16), } bake_model(mat_dir, channels, overwrite=True) bpy.ops.wm.quit_blender()
33.073394
115
0.603606
5df83448e7dd852878051c1b5e24915762ddad3f
3,057
py
Python
ceilometerclient/common/base.py
mail2nsrajesh/python-ceilometerclient
3b4e35abada626ce052f20d55c71fe12ab77052a
[ "Apache-2.0" ]
null
null
null
ceilometerclient/common/base.py
mail2nsrajesh/python-ceilometerclient
3b4e35abada626ce052f20d55c71fe12ab77052a
[ "Apache-2.0" ]
null
null
null
ceilometerclient/common/base.py
mail2nsrajesh/python-ceilometerclient
3b4e35abada626ce052f20d55c71fe12ab77052a
[ "Apache-2.0" ]
null
null
null
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Base utilities to build API operation managers and objects on top of. """ import copy from ceilometerclient.apiclient import base from ceilometerclient.apiclient import exceptions from ceilometerclient import exc def getid(obj): """Extracts object ID. Abstracts the common pattern of allowing both an object or an object's ID (UUID) as a parameter when dealing with relationships. """ try: return obj.id except AttributeError: return obj
28.570093
79
0.648021
5dfa61d9200420a717e96bb426552082800e9861
11,020
py
Python
lib/charms/layer/azure.py
freyes/charm-azure-integrator
9c96eed30388e5e7ae2ff590574890e27e845b5c
[ "Apache-2.0" ]
null
null
null
lib/charms/layer/azure.py
freyes/charm-azure-integrator
9c96eed30388e5e7ae2ff590574890e27e845b5c
[ "Apache-2.0" ]
null
null
null
lib/charms/layer/azure.py
freyes/charm-azure-integrator
9c96eed30388e5e7ae2ff590574890e27e845b5c
[ "Apache-2.0" ]
null
null
null
import json import os import re import subprocess from base64 import b64decode from enum import Enum from math import ceil, floor from pathlib import Path from urllib.error import HTTPError from urllib.request import urlopen import yaml from charmhelpers.core import hookenv from charmhelpers.core.unitdata import kv from charms.layer import status ENTITY_PREFIX = 'charm.azure' MODEL_UUID = os.environ['JUJU_MODEL_UUID'] MAX_ROLE_NAME_LEN = 64 MAX_POLICY_NAME_LEN = 128 # When debugging hooks, for some reason HOME is set to /home/ubuntu, whereas # during normal hook execution, it's /root. Set it here to be consistent. os.environ['HOME'] = '/root' def get_credentials(): """ Get the credentials from either the config or the hook tool. Prefers the config so that it can be overridden. """ no_creds_msg = 'missing credentials; set credentials config' config = hookenv.config() # try to use Juju's trust feature try: result = subprocess.run(['credential-get'], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) creds = yaml.load(result.stdout.decode('utf8')) creds_data = creds['credential']['attributes'] login_cli(creds_data) return True except FileNotFoundError: pass # juju trust not available except subprocess.CalledProcessError as e: if 'permission denied' not in e.stderr.decode('utf8'): raise no_creds_msg = 'missing credentials access; grant with: juju trust' # try credentials config if config['credentials']: try: creds_data = b64decode(config['credentials']).decode('utf8') login_cli(creds_data) return True except Exception: status.blocked('invalid value for credentials config') return False # no creds provided status.blocked(no_creds_msg) return False def login_cli(creds_data): """ Use the credentials to authenticate the Azure CLI. """ app_id = creds_data['application-id'] app_pass = creds_data['application-password'] sub_id = creds_data['subscription-id'] tenant_id = _get_tenant_id(sub_id) try: log('Forcing logout of Azure CLI') _azure('logout') except AzureError: pass try: log('Logging in to Azure CLI') _azure('login', '--service-principal', '-u', app_id, '-p', app_pass, '-t', tenant_id) # cache the subscription ID for use in roles kv().set('charm.azure.sub-id', sub_id) except AzureError as e: # redact the credential info from the exception message stderr = re.sub(app_id, '<app-id>', e.args[0]) stderr = re.sub(app_pass, '<app-pass>', stderr) stderr = re.sub(tenant_id, '<tenant-id>', stderr) # from None suppresses the previous exception from the stack trace raise AzureError(stderr) from None def send_additional_metadata(request): """ Get additional info about the requesting instance via the API that isn't available from the metadata server. """ res_grp = _azure('group', 'show', '--name', request.resource_group) # hard-code most of these because with Juju, they're always the same # and the queries required to look them up are a PITA request.send_additional_metadata( resource_group_location=res_grp['location'], vnet_name='juju-internal-network', vnet_resource_group=request.resource_group, subnet_name='juju-internal-subnet', security_group_name='juju-internal-nsg', ) def tag_instance(request): """ Tag the given instance with the given tags. """ log('Tagging instance with: {}', request.instance_tags) _azure('vm', 'update', '--name', request.vm_name, '--resource-group', request.resource_group, '--set', *['tags.{}={}'.format(tag, value) for tag, value in request.instance_tags.items()]) def enable_instance_inspection(request): """ Enable instance inspection access for the given application. """ log('Enabling instance inspection') _assign_role(request, _get_role('vm-reader')) def enable_network_management(request): """ Enable network management for the given application. """ log('Enabling network management') _assign_role(request, StandardRole.NETWORK_MANAGER) def enable_security_management(request): """ Enable security management for the given application. """ log('Enabling security management') _assign_role(request, StandardRole.SECURITY_MANAGER) def enable_block_storage_management(request): """ Enable block storage (disk) management for the given application. """ log('Enabling block storage management') _assign_role(request, _get_role('disk-manager')) def enable_dns_management(request): """ Enable DNS management for the given application. """ log('Enabling DNS management') _assign_role(request, StandardRole.DNS_MANAGER) def enable_object_storage_access(request): """ Enable object storage read-only access for the given application. """ log('Enabling object storage read') _assign_role(request, StandardRole.OBJECT_STORE_READER) def enable_object_storage_management(request): """ Enable object storage management for the given application. """ log('Enabling object store management') _assign_role(request, StandardRole.OBJECT_STORE_MANAGER) def cleanup(): """ Perform cleanup. """ pass # Internal helpers def _elide(s, max_len, ellipsis='...'): """ Elide s in the middle to ensure it is under max_len. That is, shorten the string, inserting an ellipsis where the removed characters were to show that they've been removed. """ if len(s) > max_len: hl = (max_len - len(ellipsis)) / 2 headl, taill = floor(hl), ceil(hl) s = s[:headl] + ellipsis + s[-taill:] return s def _get_tenant_id(subscription_id): """ Translate the subscription ID into a tenant ID by making an unauthorized request to the API and extracting the tenant ID from the WWW-Authenticate header in the error response. """ url = ('https://management.azure.com/subscriptions/' '{}?api-version=2018-03-01-01.6.1'.format(subscription_id)) try: urlopen(url) log_err('Error getting tenant ID: did not get "unauthorized" response') return None except HTTPError as e: if 'WWW-Authenticate' not in e.headers: log_err('Error getting tenant ID: missing WWW-Authenticate header') return None www_auth = e.headers['WWW-Authenticate'] match = re.search(r'authorization_uri="[^"]*/([^/"]*)"', www_auth) if not match: log_err('Error getting tenant ID: unable to find in {}', www_auth) return None return match.group(1) def _azure(cmd, *args, return_stderr=False): """ Call the azure-cli tool. """ cmd = ['az', cmd] cmd.extend(args) result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = result.stdout.decode('utf8').strip() stderr = result.stderr.decode('utf8').strip() if result.returncode != 0: raise AzureError.get(stderr) if return_stderr: return stderr if stdout: stdout = json.loads(stdout) return stdout def _get_msi(vm_id): """ Get the Managed System Identity for the VM. """ vm_identities = kv().get('charm.azure.vm-identities', {}) return vm_identities.get(vm_id) def _get_role(role_name): """ Translate short role name into a full role name and ensure that the custom role is loaded. The custom roles have to be applied to a specific subscription ID, but the subscription ID applies to the entire credential, so will almost certainly be reused, so there's not much danger in hitting the 2k custom role limit. """ known_roles = kv().get('charm.azure.roles', {}) if role_name in known_roles: return known_roles[role_name] sub_id = kv().get('charm.azure.sub-id') role_file = Path('files/roles/{}.json'.format(role_name)) role_data = json.loads(role_file.read_text()) role_fullname = role_data['Name'].format(sub_id) scope = role_data['AssignableScopes'][0].format(sub_id) role_data['Name'] = role_fullname role_data['AssignableScopes'][0] = scope try: log('Ensuring role {}', role_fullname) _azure('role', 'definition', 'create', '--role-definition', json.dumps(role_data)) except AzureError as e: if 'already exists' not in e.args[0]: raise known_roles[role_name] = role_fullname return role_fullname
30.955056
79
0.649909
5dfa81c4561263d9017352c96e5be1e9f43f9cf3
2,220
py
Python
Assignment-1/Code/server3.py
pankajk22/Computer-Networks-Assignments
5c227ef59c31ab52cde160568242dbbc84482bc5
[ "MIT" ]
null
null
null
Assignment-1/Code/server3.py
pankajk22/Computer-Networks-Assignments
5c227ef59c31ab52cde160568242dbbc84482bc5
[ "MIT" ]
null
null
null
Assignment-1/Code/server3.py
pankajk22/Computer-Networks-Assignments
5c227ef59c31ab52cde160568242dbbc84482bc5
[ "MIT" ]
null
null
null
import socket import csv import traceback import threading s=socket.socket(socket.AF_INET,socket.SOCK_STREAM) usrpass={} ihost=socket.gethostname() host=socket.gethostbyname(ihost) ihost=socket.gethostname() host=socket.gethostbyname(ihost) iport=[] hostfile="host.csv" with open(hostfile,'r')as host_file: csv_hfile = csv.reader(host_file, delimiter=",") for row in csv_hfile: iport.append(row[1]) port=int(iport[4]) # def checkusr(username): # if username in usrpass: # return 1 # else: # print("Invalid Username") # return -1 main()
23.368421
95
0.578378
5dfb825aca8a665a7da3ab055c3e267e40f81b41
3,040
py
Python
research/utils/_check_pipelines.py
joaopfonseca/research
02659512218d077d9ef28d481178e62172ef18cd
[ "MIT" ]
1
2021-01-25T00:09:32.000Z
2021-01-25T00:09:32.000Z
mlresearch/utils/_check_pipelines.py
joaopfonseca/research
ac4ad6fa05b5985050c63dc9e4e18cd00965e09b
[ "MIT" ]
null
null
null
mlresearch/utils/_check_pipelines.py
joaopfonseca/research
ac4ad6fa05b5985050c63dc9e4e18cd00965e09b
[ "MIT" ]
null
null
null
from itertools import product from sklearn.base import clone from sklearn.preprocessing import FunctionTransformer from sklearn.model_selection import ParameterGrid from imblearn.pipeline import Pipeline from rlearn.utils import check_random_states def check_pipelines(objects_list, random_state, n_runs): """Extract estimators and parameters grids.""" # Create random states random_states = check_random_states(random_state, n_runs) pipelines = [] param_grid = [] for comb, rs in product(product(*objects_list), random_states): name = "|".join([i[0] for i in comb]) # name, object, sub grid comb = [ (nm, ob, ParameterGrid(sg)) if ob is not None else (nm, FunctionTransformer(), ParameterGrid(sg)) for nm, ob, sg in comb ] # Create estimator if name not in [n[0] for n in pipelines]: est = Pipeline([(nm, ob) for nm, ob, _ in comb]) pipelines.append((name, est)) # Create intermediate parameter grids sub_grids = [ [{f"{nm}__{k}": v for k, v in param_def.items()} for param_def in sg] for nm, obj, sg in comb ] # Create parameter grids for sub_grid in product(*sub_grids): param_prefix = "" if len(comb) == 1 else f"{name}__" grid = {"est_name": [name]} grid.update( {f"{param_prefix}{k}": [v] for d in sub_grid for k, v in d.items()} ) random_states = { f"{param_prefix}{param}": [rs] for param in est.get_params() if "random_state" in param } grid.update(random_states) # Avoid multiple runs over pipelines without random state if grid not in param_grid: param_grid.append(grid) return pipelines, param_grid
31.666667
83
0.575329
5dfbc6d76c2633ab81a042a9da06802874d69efe
2,986
py
Python
mushroom_rl/utils/plots/common_plots.py
PuzeLiu/mushroom-rl
99942b425e66b4ddcc26009d7105dde23841e95d
[ "MIT" ]
344
2020-01-10T09:45:02.000Z
2022-03-30T09:48:28.000Z
mushroom_rl/utils/plots/common_plots.py
AmmarFahmy/mushroom-rl
2625ee7f64d5613b3b9fba00f0b7a39fece88ca5
[ "MIT" ]
44
2020-01-23T03:00:56.000Z
2022-03-25T17:14:22.000Z
mushroom_rl/utils/plots/common_plots.py
AmmarFahmy/mushroom-rl
2625ee7f64d5613b3b9fba00f0b7a39fece88ca5
[ "MIT" ]
93
2020-01-10T21:17:58.000Z
2022-03-31T17:58:52.000Z
from mushroom_rl.utils.plots import PlotItemBuffer, DataBuffer from mushroom_rl.utils.plots.plot_item_buffer import PlotItemBufferLimited
28.990291
79
0.609846
5dfc18ba2772ffd25b6600bc97edfc21e288fb90
13,044
py
Python
libs/python-daemon-2.2.0/test/test_metadata.py
helion-security/helion
1e5f22da9808c4d67bb773b93c5295c72fcaf45a
[ "MIT" ]
1
2021-10-10T20:05:07.000Z
2021-10-10T20:05:07.000Z
libs/python-daemon-2.2.0/test/test_metadata.py
helion-security/helion
1e5f22da9808c4d67bb773b93c5295c72fcaf45a
[ "MIT" ]
null
null
null
libs/python-daemon-2.2.0/test/test_metadata.py
helion-security/helion
1e5f22da9808c4d67bb773b93c5295c72fcaf45a
[ "MIT" ]
5
2020-02-02T14:41:30.000Z
2022-03-18T08:34:01.000Z
# -*- coding: utf-8 -*- # # test/test_metadata.py # Part of python-daemon, an implementation of PEP 3143. # # This is free software, and you are welcome to redistribute it under # certain conditions; see the end of this file for copyright # information, grant of license, and disclaimer of warranty. """ Unit test for _metadata private module. """ from __future__ import (absolute_import, unicode_literals) import collections import errno import functools import json import re try: # Python 3 standard library. import urllib.parse as urlparse except ImportError: # Python 2 standard library. import urlparse import mock import pkg_resources import testtools.helpers import testtools.matchers from . import scaffold from .scaffold import unicode import daemon._metadata as metadata FakeYearRange = collections.namedtuple('FakeYearRange', ['begin', 'end']) try: FileNotFoundError except NameError: # Python 2 uses IOError. FileNotFoundError = functools.partial(IOError, errno.ENOENT) version_info_filename = "version_info.json" def fake_func_has_metadata(testcase, resource_name): """ Fake the behaviour of pkg_resources.Distribution.has_metadata. """ if ( resource_name != testcase.version_info_filename or not hasattr(testcase, 'test_version_info')): return False return True def fake_func_get_metadata(testcase, resource_name): """ Fake the behaviour of pkg_resources.Distribution.get_metadata. """ if not fake_func_has_metadata(testcase, resource_name): error = FileNotFoundError(resource_name) raise error content = testcase.test_version_info return content def fake_func_get_distribution(testcase, distribution_name): """ Fake the behaviour of pkg_resources.get_distribution. """ if distribution_name != metadata.distribution_name: raise pkg_resources.DistributionNotFound if hasattr(testcase, 'get_distribution_error'): raise testcase.get_distribution_error mock_distribution = testcase.mock_distribution mock_distribution.has_metadata.side_effect = functools.partial( fake_func_has_metadata, testcase) mock_distribution.get_metadata.side_effect = functools.partial( fake_func_get_metadata, testcase) return mock_distribution # Copyright 20082018 Ben Finney <[email protected]> # # This is free software: you may copy, modify, and/or distribute this work # under the terms of the GNU General Public License as published by the # Free Software Foundation; version 3 of that license or any later version. # No warranty expressed or implied. See the file LICENSE.GPL-3 for details. # Local variables: # coding: utf-8 # mode: python # End: # vim: fileencoding=utf-8 filetype=python :
35.835165
79
0.611546
5dfd3f4f20e57ebcb5265eb99e3913785aac266b
517
py
Python
objectModel/Python/cdm/persistence/cdmfolder/types/purpose_reference.py
wheatdog/CDM
8b6698f4a8b4f44132b12d97f9f261afcfeb798c
[ "CC-BY-4.0", "MIT" ]
null
null
null
objectModel/Python/cdm/persistence/cdmfolder/types/purpose_reference.py
wheatdog/CDM
8b6698f4a8b4f44132b12d97f9f261afcfeb798c
[ "CC-BY-4.0", "MIT" ]
3
2021-05-11T22:31:59.000Z
2021-08-04T04:04:18.000Z
objectModel/Python/cdm/persistence/cdmfolder/types/purpose_reference.py
wheatdog/CDM
8b6698f4a8b4f44132b12d97f9f261afcfeb798c
[ "CC-BY-4.0", "MIT" ]
null
null
null
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. from typing import Union, List from .purpose import * from .trait_reference import TraitReference from cdm.utilities import JObject
30.411765
94
0.733075
5dfe1873a422b9d98cb23a45aa91a24e21973cf8
1,725
py
Python
text_preprocessing/normalizer.py
cyberpunk317/inverted_index
f49ae3ca4f0255928986c1610c5ff8ee38c5f1ff
[ "MIT" ]
9
2021-09-03T10:02:16.000Z
2021-12-22T14:19:33.000Z
text_preprocessing/normalizer.py
cyberpunk317/inverted_index
f49ae3ca4f0255928986c1610c5ff8ee38c5f1ff
[ "MIT" ]
3
2021-04-19T17:13:57.000Z
2022-03-18T15:11:53.000Z
text_preprocessing/normalizer.py
cyberpunk317/inverted_index
f49ae3ca4f0255928986c1610c5ff8ee38c5f1ff
[ "MIT" ]
1
2021-12-11T09:47:46.000Z
2021-12-11T09:47:46.000Z
import re from typing import Union, List import nltk from bs4 import BeautifulSoup
27.822581
86
0.506667
5dfe4e27d16878f382ef6d6119132647294b2b99
1,874
py
Python
env/lib/python3.7/site-packages/prompt_toolkit/filters/cli.py
MarcoMancha/BreastCancerDetector
be0dfdcebd1ae66da6d0cf48e2525c24942ae877
[ "Apache-2.0" ]
2
2020-09-30T00:11:09.000Z
2021-10-04T13:00:38.000Z
env/lib/python3.7/site-packages/prompt_toolkit/filters/cli.py
MarcoMancha/BreastCancerDetector
be0dfdcebd1ae66da6d0cf48e2525c24942ae877
[ "Apache-2.0" ]
9
2020-08-11T15:19:55.000Z
2022-03-12T00:11:12.000Z
env/lib/python3.7/site-packages/prompt_toolkit/filters/cli.py
MarcoMancha/BreastCancerDetector
be0dfdcebd1ae66da6d0cf48e2525c24942ae877
[ "Apache-2.0" ]
2
2020-08-03T13:02:06.000Z
2020-11-04T03:15:44.000Z
""" For backwards-compatibility. keep this file. (Many people are going to have key bindings that rely on this file.) """ from __future__ import unicode_literals from .app import * __all__ = [ # Old names. 'HasArg', 'HasCompletions', 'HasFocus', 'HasSelection', 'HasValidationError', 'IsDone', 'IsReadOnly', 'IsMultiline', 'RendererHeightIsKnown', 'InEditingMode', 'InPasteMode', 'ViMode', 'ViNavigationMode', 'ViInsertMode', 'ViInsertMultipleMode', 'ViReplaceMode', 'ViSelectionMode', 'ViWaitingForTextObjectMode', 'ViDigraphMode', 'EmacsMode', 'EmacsInsertMode', 'EmacsSelectionMode', 'IsSearching', 'HasSearch', 'ControlIsSearchable', ] # Keep the original classnames for backwards compatibility. HasValidationError = lambda: has_validation_error HasArg = lambda: has_arg IsDone = lambda: is_done RendererHeightIsKnown = lambda: renderer_height_is_known ViNavigationMode = lambda: vi_navigation_mode InPasteMode = lambda: in_paste_mode EmacsMode = lambda: emacs_mode EmacsInsertMode = lambda: emacs_insert_mode ViMode = lambda: vi_mode IsSearching = lambda: is_searching HasSearch = lambda: is_searching ControlIsSearchable = lambda: control_is_searchable EmacsSelectionMode = lambda: emacs_selection_mode ViDigraphMode = lambda: vi_digraph_mode ViWaitingForTextObjectMode = lambda: vi_waiting_for_text_object_mode ViSelectionMode = lambda: vi_selection_mode ViReplaceMode = lambda: vi_replace_mode ViInsertMultipleMode = lambda: vi_insert_multiple_mode ViInsertMode = lambda: vi_insert_mode HasSelection = lambda: has_selection HasCompletions = lambda: has_completions IsReadOnly = lambda: is_read_only IsMultiline = lambda: is_multiline HasFocus = has_focus # No lambda here! (Has_focus is callable that returns a callable.) InEditingMode = in_editing_mode
27.558824
88
0.766275
5dfec5e4fee06a96072b5a9530a2216e08d3cbd3
1,988
py
Python
genetic/spaces.py
shilpasayura/bk
2b0a1aa9300da80e201264bcf80226b3c5ff4ad6
[ "MIT" ]
4
2018-09-08T10:30:27.000Z
2021-07-23T07:59:24.000Z
genetic/spaces.py
shilpasayura/bk
2b0a1aa9300da80e201264bcf80226b3c5ff4ad6
[ "MIT" ]
null
null
null
genetic/spaces.py
shilpasayura/bk
2b0a1aa9300da80e201264bcf80226b3c5ff4ad6
[ "MIT" ]
6
2018-09-07T05:54:17.000Z
2021-07-23T07:59:25.000Z
#spaces.py ''' AlgoHack Genetic Algorithm for University Semaster Planning Version 0.03 2018 Niranjan Meegammana Shilpasayura.org ''' import xdb if __name__ == "__main__": delay=0.05 conn=xdb.opendb('genetic56.db') cursor =conn.cursor() # create a cursor object success=crt_spaces_table(cursor, True) # create spaces table #dedicated lecture hall, lab for group and semaster success, count =insert_spaces(cursor,1,1,1,1,delay) # generate records xdb.commit(conn) xdb.closedb(conn)
32.064516
147
0.628773
5dff31a15c326fed56b2875daa3e36cda971efde
2,062
py
Python
threaded_remote_pi_camera.py
hyansuper/flask-video-streaming
a6ba19519b9ba5470e59e535552b3e8c448d57ae
[ "MIT" ]
7
2020-01-03T17:35:29.000Z
2021-11-24T14:29:50.000Z
threaded_remote_pi_camera.py
hyansuper/flask-video-streaming
a6ba19519b9ba5470e59e535552b3e8c448d57ae
[ "MIT" ]
null
null
null
threaded_remote_pi_camera.py
hyansuper/flask-video-streaming
a6ba19519b9ba5470e59e535552b3e8c448d57ae
[ "MIT" ]
4
2020-04-30T15:41:25.000Z
2021-08-07T17:05:54.000Z
import urllib.request import cv2 import numpy as np import time import threading
31.242424
132
0.541707
5dff826ca431e889e0cef41a0054e1a64431e876
22,520
py
Python
scheduler/misc/Ec2SpotCustomScheduler_jan19.py
jalawala/custom-kubernetes-scheduler
07ccba57610048185a245257a1501f6273399d80
[ "Apache-2.0" ]
4
2021-02-24T23:42:17.000Z
2021-03-10T06:31:35.000Z
misc-folder-ignore/scheduler/misc/Ec2SpotCustomScheduler_jan19.py
ABottleofWater7/custom-kubernetes-scheduler
f179a45c85291ba8d34d37e11a33396c94fd5bac
[ "Apache-2.0" ]
null
null
null
misc-folder-ignore/scheduler/misc/Ec2SpotCustomScheduler_jan19.py
ABottleofWater7/custom-kubernetes-scheduler
f179a45c85291ba8d34d37e11a33396c94fd5bac
[ "Apache-2.0" ]
2
2021-09-27T09:08:37.000Z
2022-03-21T04:20:07.000Z
#! /usr/bin/python3 import time import random import json import os from pprint import pprint from kubernetes.client.rest import ApiException from pint import UnitRegistry from collections import defaultdict from kubernetes import client, config, watch from timeloop import Timeloop from datetime import timedelta config.load_kube_config() #config.load_incluster_config() # doing this computation within a k8s cluster #k8s.config.load_incluster_config() core_api = client.CoreV1Api() apis_api = client.AppsV1Api() #sdclient = SdcClient(<Your Sysdig API token>) sysdig_metric = "net.http.request.time" metrics = [{ "id": sysdig_metric, "aggregations": { "time": "timeAvg", "group": "avg" } }] #scheduler_name = "Ec2SpotK8sScheduler" CustomSchedulerName ='K8SCustomScheduler' ureg = UnitRegistry() ureg.load_definitions('kubernetes_units.txt') pendingPodsList = [] failedPodsList = [] runningPodsList =[] nodesListPerNodeLabel = {} Q_ = ureg.Quantity #tl = Timeloop() #@tl.job(interval=timedelta(seconds=10)) __all__ = ["get_node_available_nodes_list"] if __name__ == '__main__': #ready_nodes = nodes_available() #pprint(ready_nodes) #name='review-v1-787d8fbfbb-ltdzt' node='ip-10-0-3-253.ec2.internal' #namespace='ecommerce' #ret=scheduler(name, node, namespace) #pprint(ret) #main() #test() #testpod() #check_node_resources(node) #RunEc2SpotCustomScheduler() #getPodsListForDeployment(' ') #lifecycle = 'OnDemand' #lifecycle = 'Ec2Spot' #get_node_available_nodes_list(lifecycle) #RunEc2SpotCustomScheduler() #NumOfPodsToDeleted = 1 #podsAlreadyRunningOnNodeLabelList = [] #d ={'name':'nginx-66cb875766-vx6bp'} #podsAlreadyRunningOnNodeLabelList.append(d) #deletePods(NumOfPodsToDeleted, podsAlreadyRunningOnNodeLabelList) #deploymentName='nginx' #deploymentName = 'kube-ops-view' #getPodsListForDeployment(deploymentName) #testlist() #tl.start(block=True) while True: RunEc2SpotCustomScheduler() time.sleep(10)
42.330827
281
0.607948
5dffed5f88346db8858c1e4167f535bc237800cb
349
py
Python
local/utils/validate_label_locale.py
DewiBrynJones/docker-deepspeech-cy
99159a746651bd848a8309da7f676045913f3d25
[ "MIT" ]
3
2018-10-11T20:11:28.000Z
2019-02-01T02:46:46.000Z
local/utils/validate_label_locale.py
DewiBrynJones/docker-deepspeech-cy
99159a746651bd848a8309da7f676045913f3d25
[ "MIT" ]
1
2021-01-23T12:56:31.000Z
2021-01-27T15:32:38.000Z
local/utils/validate_label_locale.py
techiaith/docker-deepspeech-cy
99159a746651bd848a8309da7f676045913f3d25
[ "MIT" ]
6
2018-09-24T13:59:53.000Z
2018-10-23T09:29:46.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from clean_transcript import clean_transcript ALPHABET_FILE_PATH = "/DeepSpeech/bin/bangor_welsh/alphabet.txt"
23.266667
64
0.730659
b900fe014c618b5968bd75cca2f986adc96f1a10
13,806
py
Python
src/models/nn/adaptive_softmax.py
dumpmemory/state-spaces
2a85503cb3e9e86cc05753950d4a249df9a0fffb
[ "Apache-2.0" ]
513
2021-11-03T23:08:23.000Z
2022-03-31T16:29:18.000Z
src/models/nn/adaptive_softmax.py
dumpmemory/state-spaces
2a85503cb3e9e86cc05753950d4a249df9a0fffb
[ "Apache-2.0" ]
18
2021-11-05T12:42:59.000Z
2022-03-27T19:49:55.000Z
src/models/nn/adaptive_softmax.py
MikeOwino/state-spaces
b6672bca994b6a36347f414faa59761e42b1e2b1
[ "Apache-2.0" ]
47
2021-11-04T01:32:54.000Z
2022-03-30T18:24:26.000Z
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional import functools import torch import torch.nn as nn import torch.nn.functional as F def _init_weight(weight, d : int, init_scale : Optional[float], default=None): assert init_scale or default if init_scale is None: std = default else: std = init_scale * (d ** -0.5) nn.init.normal_(weight, mean=0, std=std) _init_embed = functools.partial(_init_weight, default=0.02) _init_proj = functools.partial(_init_weight, default=0.01) ### Just for this codebase, we need to squeeze the last dimension because inputs are always given as (B, L, D) instead of (B, L) import src.models.nn.utils as U # AdaptiveEmbedding = U.Squeeze(AdaptiveEmbedding)
39.786744
132
0.563378
b9014ad1cdd3760612e00e54f9b058e7af94d104
11,770
py
Python
the_el/cli.py
CityOfPhiladelphia/the-el
e3a97afc55d41f2e5fd76cef60ad9393dfa23547
[ "MIT" ]
11
2017-04-19T18:44:51.000Z
2022-03-07T22:36:47.000Z
the_el/cli.py
CityOfPhiladelphia/the-el
e3a97afc55d41f2e5fd76cef60ad9393dfa23547
[ "MIT" ]
9
2017-04-19T18:43:13.000Z
2017-12-08T16:42:38.000Z
the_el/cli.py
CityOfPhiladelphia/the-el
e3a97afc55d41f2e5fd76cef60ad9393dfa23547
[ "MIT" ]
3
2017-12-08T15:09:03.000Z
2018-08-14T02:42:01.000Z
import json import csv import sys import os import re import codecs import logging from logging.config import dictConfig import click import yaml from sqlalchemy import create_engine from jsontableschema_sql import Storage from smart_open import smart_open from . import postgres from . import carto csv.field_size_limit(sys.maxsize)
37.603834
134
0.651572
b90258212d799fd07af2bd908c88516410b648a2
6,182
py
Python
examples/asr/experimental/speech_to_text_sclite.py
vadam5/NeMo
3c5db09539293c3c19a6bb7437011f91261119af
[ "Apache-2.0" ]
2
2021-06-23T19:16:59.000Z
2022-02-23T18:49:07.000Z
examples/asr/experimental/speech_to_text_sclite.py
vadam5/NeMo
3c5db09539293c3c19a6bb7437011f91261119af
[ "Apache-2.0" ]
null
null
null
examples/asr/experimental/speech_to_text_sclite.py
vadam5/NeMo
3c5db09539293c3c19a6bb7437011f91261119af
[ "Apache-2.0" ]
12
2021-06-20T08:56:10.000Z
2022-03-16T19:07:10.000Z
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script is based on speech_to_text_infer.py and allows you to score the hypotheses with sclite. A local installation from https://github.com/usnistgov/SCTK is required. Hypotheses and references are first saved in trn format and are scored after applying a glm file (if provided). """ import errno import json import os import subprocess from argparse import ArgumentParser import torch from nemo.collections.asr.metrics.wer import WER from nemo.collections.asr.models import EncDecCTCModel from nemo.utils import logging try: from torch.cuda.amp import autocast except ImportError: from contextlib import contextmanager can_gpu = torch.cuda.is_available() if __name__ == '__main__': main() # noqa pylint: disable=no-value-for-parameter
38.880503
117
0.674054
b9034036dd7c92efb32754807bdeb44d6dc9be42
1,335
py
Python
accalib/utils.py
pj0620/acca-video-series
1b09548014cc899ded5a8fdd1293f7fc121a98bc
[ "MIT" ]
null
null
null
accalib/utils.py
pj0620/acca-video-series
1b09548014cc899ded5a8fdd1293f7fc121a98bc
[ "MIT" ]
3
2020-04-16T09:24:48.000Z
2021-03-27T19:27:48.000Z
accalib/utils.py
pj0620/acca-video-series
1b09548014cc899ded5a8fdd1293f7fc121a98bc
[ "MIT" ]
1
2020-09-01T05:32:04.000Z
2020-09-01T05:32:04.000Z
from manimlib.imports import * from manimlib.utils import bezier import numpy as np
31.785714
100
0.526592
b90426c42855fd2a5119f138e577d0e9dbffc737
297
py
Python
setup.py
def-mycroft/rapid-plotly
87ba5d9e6894e2c3288435aae9a377647b006e79
[ "MIT" ]
1
2018-11-19T16:23:31.000Z
2018-11-19T16:23:31.000Z
setup.py
def-mycroft/rapid-plotly
87ba5d9e6894e2c3288435aae9a377647b006e79
[ "MIT" ]
10
2018-11-26T17:20:12.000Z
2019-05-06T14:29:54.000Z
setup.py
def-mycroft/rapid-plotly
87ba5d9e6894e2c3288435aae9a377647b006e79
[ "MIT" ]
null
null
null
from setuptools import setup setup(name='rapid_plotly', version='0.1', description='Convenience functions to rapidly create beautiful Plotly graphs', author='Joseph Dasenbrock', author_email='[email protected]', packages=['rapid_plotly'], zip_safe=False)
29.7
84
0.703704
b9044d615f386c353b51176e0cfb09ae8fe5c1b6
5,834
py
Python
dodo.py
enerqi/bridge-bidding-systems
30ea2bf6f8bc0b786df4de8571063509d971236f
[ "MIT" ]
2
2020-05-24T17:30:55.000Z
2020-11-22T15:27:56.000Z
dodo.py
enerqi/bridge-bidding-systems
30ea2bf6f8bc0b786df4de8571063509d971236f
[ "MIT" ]
null
null
null
dodo.py
enerqi/bridge-bidding-systems
30ea2bf6f8bc0b786df4de8571063509d971236f
[ "MIT" ]
null
null
null
#! /usr/bin/doit -f # https://pydoit.org # `pip install [--user] doit` adds `doit.exe` to the PATH # - Note `doit auto`, the file watcher only works on Linux/Mac # - All commands are relative to dodo.py (doit runs in the working dir of dodo.py # even if ran from a different directory `doit -f path/to/dodo.py`) from glob import glob import json from os import environ from os.path import abspath, basename, dirname, exists, expanduser, join, splitext from shutil import copyfile from typing import Iterator, List, NewType, Optional from doit.tools import title_with_actions Path = NewType("Path", str) home = Path(expanduser("~")) bml_tools_dir = Path(environ.get("BML_TOOLS_DIRECTORY", join(home, "dev/bml"))) bml_includes_cache_file = ".include-deps.json" def task_bmlcss(): """Copy the bml CSS style sheet to this directory.""" css_basename = "bml.css" src_css_file = Path(join(bml_tools_dir, css_basename)) return { 'actions': [copy_file], 'file_dep': [src_css_file], 'targets': [css_basename], 'title': title_with_actions }
37.159236
118
0.652383
b904eadc54bfc2aeedb89068e48092d072692ffd
620
py
Python
learn/hard-way/EmptyFileError.py
hustbill/Python-auto
9f43bc2613a64a373927047ac52d8e90ffe644f8
[ "Apache-2.0" ]
null
null
null
learn/hard-way/EmptyFileError.py
hustbill/Python-auto
9f43bc2613a64a373927047ac52d8e90ffe644f8
[ "Apache-2.0" ]
null
null
null
learn/hard-way/EmptyFileError.py
hustbill/Python-auto
9f43bc2613a64a373927047ac52d8e90ffe644f8
[ "Apache-2.0" ]
null
null
null
filenames = ["myfile1", "nonExistent", "emptyFile", "myfile2"] for file in filenames: try: f = open(file, 'r') line = f.readline() if line == "": f.close() raise EmptyFileError("%s: is empty" % file) # except IOError as error: # print("%s: could not be opened: %s" % (file, error.strerror) ## except EmptyFileError as error: # print(error) # else: # print("%s: %s" % (file, f.readline())) # finally: # print("Done processing", file)
31
73
0.504839
b9058a9a6aeb7e495abc710b44e918cfdd30a156
1,288
py
Python
plugins/crumbling_in.py
jimconner/digital_sky
9427cd19dbd9fb1c82ca12fa8f962532d700c67f
[ "MIT" ]
2
2019-03-04T20:38:44.000Z
2019-03-15T22:34:25.000Z
plugins/crumbling_in.py
jimconner/digital_sky
9427cd19dbd9fb1c82ca12fa8f962532d700c67f
[ "MIT" ]
null
null
null
plugins/crumbling_in.py
jimconner/digital_sky
9427cd19dbd9fb1c82ca12fa8f962532d700c67f
[ "MIT" ]
null
null
null
# Crumbling In # Like randomised coloured dots and then they # increase on both sides getting closer and closer into the middle. import sys, traceback, random from numpy import array,full
28.622222
67
0.470497
b905b9044ea31f3964e2eca2dbedd8cd13ec51f5
16,884
py
Python
pybleau/app/plotting/tests/test_plot_config.py
KBIbiopharma/pybleau
5cdfce603ad29af874f74f0f527adc6b4c9066e8
[ "MIT" ]
4
2020-02-27T22:38:29.000Z
2021-05-03T05:32:11.000Z
pybleau/app/plotting/tests/test_plot_config.py
KBIbiopharma/pybleau
5cdfce603ad29af874f74f0f527adc6b4c9066e8
[ "MIT" ]
85
2020-02-04T21:57:14.000Z
2021-05-03T14:29:40.000Z
pybleau/app/plotting/tests/test_plot_config.py
KBIbiopharma/pybleau
5cdfce603ad29af874f74f0f527adc6b4c9066e8
[ "MIT" ]
1
2020-02-20T00:45:09.000Z
2020-02-20T00:45:09.000Z
from __future__ import division from unittest import skipIf, TestCase import os from pandas import DataFrame import numpy as np from numpy.testing import assert_array_equal BACKEND_AVAILABLE = os.environ.get("ETS_TOOLKIT", "qt4") != "null" if BACKEND_AVAILABLE: from app_common.apptools.testing_utils import assert_obj_gui_works from pybleau.app.plotting.plot_config import HeatmapPlotConfigurator, \ HEATMAP_PLOT_TYPE, HistogramPlotConfigurator, HIST_PLOT_TYPE, \ LinePlotConfigurator, BarPlotConfigurator, ScatterPlotConfigurator, \ SCATTER_PLOT_TYPE, CMAP_SCATTER_PLOT_TYPE, LINE_PLOT_TYPE, \ BAR_PLOT_TYPE LEN = 16 TEST_DF = DataFrame({"a": [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4], "b": [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4], "c": [1, 2, 3, 4, 2, 3, 1, 1, 4, 4, 5, 6, 4, 4, 5, 6], "d": list("ababcabcdabcdeab"), "e": np.random.randn(LEN), "f": range(LEN), # Highly repetitive column to split the entire data into 2 "g": np.array(["0", "1"] * (LEN // 2)), "h": np.array([0, 1] * (LEN // 2), dtype=bool), })
42.422111
79
0.636105
b905bf0f95f0e168b31539b1c4fa3ef57493a4f1
1,220
py
Python
test/integration/languages/test_mixed.py
thomasrockhu/bfg9000
1cd1226eab9bed2fc2ec6acccf7864fdcf2ed31a
[ "BSD-3-Clause" ]
72
2015-06-23T02:35:13.000Z
2021-12-08T01:47:40.000Z
test/integration/languages/test_mixed.py
thomasrockhu/bfg9000
1cd1226eab9bed2fc2ec6acccf7864fdcf2ed31a
[ "BSD-3-Clause" ]
139
2015-03-01T18:48:17.000Z
2021-06-18T15:45:14.000Z
test/integration/languages/test_mixed.py
thomasrockhu/bfg9000
1cd1226eab9bed2fc2ec6acccf7864fdcf2ed31a
[ "BSD-3-Clause" ]
19
2015-12-23T21:24:33.000Z
2022-01-06T04:04:41.000Z
import os.path from .. import *
33.888889
77
0.644262
b906c6820493a72163f757fe7ce4006f0287b820
821
py
Python
code/7/collections/namedtupe_example.py
TeamLab/introduction_to_pythoy_TEAMLAB_MOOC
ebf1ff02d6a341bfee8695eac478ff8297cb97e4
[ "MIT" ]
65
2017-11-01T01:57:21.000Z
2022-02-08T13:36:25.000Z
code/7/collections/namedtupe_example.py
TeamLab/introduction_to_pythoy_TEAMLAB_MOOC
ebf1ff02d6a341bfee8695eac478ff8297cb97e4
[ "MIT" ]
9
2017-11-03T15:05:30.000Z
2018-05-17T03:18:36.000Z
code/7/collections/namedtupe_example.py
TeamLab/introduction_to_pythoy_TEAMLAB_MOOC
ebf1ff02d6a341bfee8695eac478ff8297cb97e4
[ "MIT" ]
64
2017-11-01T01:57:23.000Z
2022-01-19T03:52:12.000Z
from collections import namedtuple # Basic example Point = namedtuple('Point', ['x', 'y']) p = Point(11, y=22) print(p[0] + p[1]) x, y = p print(x, y) print(p.x + p.y) print(Point(x=11, y=22)) from collections import namedtuple import csv f = open("users.csv", "r") next(f) reader = csv.reader(f) student_list = [] for row in reader: student_list.append(row) print(row) print(student_list) columns = ["user_id", "integration_id", "login_id", "password", "first_name", "last_name", "full_name", "sortable_name", "short_name", "email", "status"] Student = namedtuple('Student', columns) student_namedtupe_list = [] for row in student_list: student = Student(*row) student_namedtupe_list.append(student) print(student_namedtupe_list[0]) print(student_namedtupe_list[0].full_name)
24.147059
77
0.685749
b9078d0e4d15cf11492a86d93eb5a61b04a92b6f
1,439
py
Python
test/helper_tools/benchtool.py
dotnes/mitmproxy
5eb17bbf6d47c8d703763bfa41cf1ff3f98a632f
[ "MIT" ]
4
2018-03-14T03:47:22.000Z
2018-06-28T08:00:39.000Z
test/helper_tools/benchtool.py
dotnes/mitmproxy
5eb17bbf6d47c8d703763bfa41cf1ff3f98a632f
[ "MIT" ]
1
2021-05-09T11:18:14.000Z
2021-05-09T11:18:14.000Z
test/helper_tools/benchtool.py
dotnes/mitmproxy
5eb17bbf6d47c8d703763bfa41cf1ff3f98a632f
[ "MIT" ]
1
2018-04-22T15:43:46.000Z
2018-04-22T15:43:46.000Z
# Profile mitmdump with apachebench and # yappi (https://code.google.com/p/yappi/) # # Requirements: # - Apache Bench "ab" binary # - pip install click yappi from mitmproxy.main import mitmdump from os import system from threading import Thread import time import yappi import click if __name__ == '__main__': main()
25.245614
94
0.649062
b907c416aa083b16df70a844cea0da2fdc9f29d9
8,922
py
Python
pivpy/graphics.py
alexliberzonlab/pivpy
c1c984cd669fce6f5c0b6a602d6a51ed3fec5954
[ "BSD-3-Clause" ]
1
2018-07-15T07:17:30.000Z
2018-07-15T07:17:30.000Z
pivpy/graphics.py
alexliberzonlab/pivpy
c1c984cd669fce6f5c0b6a602d6a51ed3fec5954
[ "BSD-3-Clause" ]
4
2018-06-14T14:02:45.000Z
2018-07-15T00:19:01.000Z
pivpy/graphics.py
alexliberzonlab/pivpy
c1c984cd669fce6f5c0b6a602d6a51ed3fec5954
[ "BSD-3-Clause" ]
1
2019-07-18T15:25:02.000Z
2019-07-18T15:25:02.000Z
# -*- coding: utf-8 -*- """ Various plots """ import numpy as np import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation, FFMpegWriter import xarray as xr import os def quiver(data, arrScale = 25.0, threshold = None, nthArr = 1, contourLevels = None, colbar = True, logscale = False, aspectratio='equal', colbar_orient = 'vertical', units = None): """ Generates a quiver plot of a 'data' xarray DataArray object (single frame from a dataset) Inputs: data - xarray DataArray of the type defined in pivpy, one of the frames in the Dataset selected by default using .isel(t=0) threshold - values above the threshold will be set equal to threshold arrScale - use to change arrow scales nthArr - use to plot only every nth arrow from the array contourLevels - use to specify the maximum value (abs) of contour plots colbar - True/False wether to generate a colorbar or not logscale - if true then colorbar is on log scale aspectratio - set auto or equal for the plot's apearence colbar_orient - 'horizontal' or 'vertical' orientation of the colorbar (if colbar is True) Outputs: none Usage: graphics.quiver(data, arrScale = 0.2, threshold = Inf, n) """ data = dataset_to_array(data) x = data.x y = data.y u = data.u v = data.v if units is not None: lUnits = units[0] # ['m' 'm' 'mm/s' 'mm/s'] velUnits = units[2] tUnits = velUnits.split('/')[1] # make it 's' or 'dt' else: lUnits, velUnits, tUnits = '', '', '' if threshold is not None: data['u'] = xr.where(data['u']>threshold, threshold, data['u']) data['v'] = xr.where(data['v']>threshold, threshold, data['v']) S = np.array(np.sqrt(u**2 + v**2)) fig = plt.get_fignums() if len(fig) == 0: # if no figure is open fig, ax = plt.subplots() # open a new figure else: ax = plt.gca() if contourLevels is None: levels = np.linspace(0, np.max(S.flatten()), 30) # default contour levels up to max of S else: levels = np.linspace(0, contourLevels, 30) if logscale: c = ax.contourf(x,y,S,alpha=0.8, cmap = plt.get_cmap("Blues"), levels = levels, norm = plt.colors.LogNorm()) else: c = ax.contourf(x,y,S,alpha=0.8, cmap = plt.get_cmap("Blues"), levels=levels) if colbar: cbar = plt.colorbar(c, orientation=colbar_orient) cbar.set_label(r'$\left| \, V \, \right|$ ['+ lUnits +' $\cdot$ '+ tUnits +'$^{-1}$]') ax.quiver(x[::nthArr],y[::nthArr], u[::nthArr,::nthArr],v[::nthArr,::nthArr],units='width', scale = np.max(S*arrScale),headwidth=2) ax.set_xlabel('x (' + lUnits + ')') ax.set_ylabel('y (' + lUnits + ')') ax.set_aspect(aspectratio) return fig,ax def histogram(data, normed = False): """ this function will plot a normalized histogram of the velocity data. Input: data : xarray DataSet with ['u','v'] attrs['units'] normed : (optional) default is False to present normalized histogram """ u = np.asarray(data.u).flatten() v = np.asarray(data.v).flatten() units = data.attrs['units'] f,ax = plt.subplots(2) ax[0].hist(u,bins=np.int(np.sqrt(len(u))*0.5),density=normed) ax[0].set_xlabel('u ['+units[2]+']') ax[1] = plt.subplot2grid((2,1),(1,0)) ax[1].hist(v,bins=np.int(np.sqrt(len(v)*0.5)),density=normed) ax[1].set_xlabel('v ['+units[2]+']') plt.tight_layout() return f, ax def contour_plot(data, threshold = None, contourLevels = None, colbar = True, logscale = False, aspectration='equal', units=None): """ contourf ajusted for the xarray PIV dataset, creates a contour map for the data['w'] property. Input: data : xarray PIV DataArray, converted automatically using .isel(t=0) threshold : a threshold value, default is None (no data clipping) contourLevels : number of contour levels, default is None colbar : boolean (default is True) show/hide colorbar logscale : boolean (True is default) create in linear/log scale aspectration : string, 'equal' is the default """ data = dataset_to_array(data) if units is not None: lUnits = units[0] # ['m' 'm' 'mm/s' 'mm/s'] # velUnits = units[2] # tUnits = velUnits.split('/')[1] # make it 's' or 'dt' else: # lUnits, velUnits = '', '' lUnits = '' f,ax = plt.subplots() if threshold is not None: data['w'] = xr.where(data['w']>threshold, threshold, data['w']) m = np.amax(abs(data['w'])) if contourLevels == None: levels = np.linspace(-m, m, 30) else: levels = np.linspace(-contourLevels, contourLevels, 30) if logscale: c = ax.contourf(data.x,data.y,np.abs(data['w']), levels=levels, cmap = plt.get_cmap('RdYlBu'), norm=plt.colors.LogNorm()) else: c = ax.contourf(data.x,data.y,data['w'], levels=levels, cmap = plt.get_cmap('RdYlBu')) plt.xlabel('x [' + lUnits + ']') plt.ylabel('y [' + lUnits + ']') if colbar: cbar = plt.colorbar(c) cbar.set_label(r'$\omega$ [s$^{-1}$]') ax.set_aspect(aspectration) return f,ax def showf(data, variables=None, units=None, fig=None): """ showf(data, var, units) Arguments: data : xarray.DataSet that contains dimensions of t,x,y and variables u,v and maybe w (scalar) """ if variables is None: xlabel = ' ' ylabel = ' ' else: xlabel = variables[0] ylabel = variables[1] if units is not None: xlabel += ' ' + units[0] ylabel += ' ' + units[1] fig = plt.figure(None if fig is None else fig.number) for t in data['t']: d = data.isel(t=t) plt.quiver(d['x'],d['y'],d['u'],d['v'],d['u']**2 + d['v']**2) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.draw() plt.pause(0.1) plt.show() def showscal(data, property='ken'): """ showf(data, var, units) Arguments: data : xarray.DataSet that contains dimensions of t,x,y and a variable w (scalar) """ # fig = plt.figure(None if fig is None else fig.number) # import pdb; pdb.set_trace() # xlabel = (None if var is None else var[0]) + ' [' + (None if units is None else units[0])+']' # ylabel = (None if var is None else var[1]) + ' [' + (None if units is None else units[1])+']' data = data.piv.vec2scal(property=property) contour_plot(data) def animate(data, arrowscale=1, savepath=None): """ animates the quiver plot for the dataset (multiple frames) Input: data : xarray PIV type of DataSet arrowscale : [optional] integer, default is 1 savepath : [optional] path to save the MP4 animation, default is None Output: if savepath is None, then only an image display of the animation if savepath is an existing path, a file named im.mp4 is saved """ X, Y = data.x, data.y U, V = data.u[:,:,0], data.v[:,:,0] # first frame fig, ax = plt.subplots(1,1) M = np.sqrt(U**2 + V**2) Q = ax.quiver(X[::3,::3], Y[::3,::3], U[::3,::3], V[::3,::3], M[::3,::3], units='inches', scale=arrowscale) cb = plt.colorbar(Q) units = data.attrs['units'] cb.ax.set_ylabel('velocity (' + units[2] + ')') text = ax.text(0.2,1.05, '1/'+str(len(data.t)), ha='center', va='center', transform=ax.transAxes) anim = FuncAnimation(fig, update_quiver, fargs=(Q,data,text), frames = len(data.t), blit=False) mywriter = FFMpegWriter() if savepath: p = os.getcwd() os.chdir(savepath) anim.save('im.mp4', writer=mywriter) os.chdir(p) else: anim.save('im.mp4', writer=mywriter) def dataset_to_array(data,N=0): """ converts xarray Dataset to array """ if 't' in data.dims: print('Warning: function for a single frame, using first frame, supply data.isel(t=N)') data = data.isel(t=N) return data
32.922509
99
0.553015
b9081ad94fb9a0b4f6e0a49043c2a08a7969c6fc
1,212
py
Python
configs/my_config/vit_base_aspp.py
BostonCrayfish/mmsegmentation
e8b87242b877bfe0c32ea2630c2fd08977d7dd4b
[ "Apache-2.0" ]
null
null
null
configs/my_config/vit_base_aspp.py
BostonCrayfish/mmsegmentation
e8b87242b877bfe0c32ea2630c2fd08977d7dd4b
[ "Apache-2.0" ]
null
null
null
configs/my_config/vit_base_aspp.py
BostonCrayfish/mmsegmentation
e8b87242b877bfe0c32ea2630c2fd08977d7dd4b
[ "Apache-2.0" ]
null
null
null
# model settings norm_cfg = dict(type='BN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='pretrain/vit_base_patch16_224.pth', backbone=dict( type='VisionTransformer', img_size=(224, 224), patch_size=16, in_channels=3, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, # out_indices=(2, 5, 8, 11), qkv_bias=True, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, with_cls_token=True, norm_cfg=dict(type='LN', eps=1e-6), act_cfg=dict(type='GELU'), norm_eval=False, interpolate_mode='bicubic'), neck=None, decode_head=dict( type='ASPPHead', in_channels=768, # in_index=3, channels=512, dilations=(1, 6, 12, 18), dropout_ratio=0.1, num_classes=21, contrast=True, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), auxiliary_head=None, # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole')) # yapf: disable
28.857143
74
0.587459
b9083abf7ea4269348156a83680d8a60f00f6033
69,300
py
Python
tripleo_ansible/ansible_plugins/modules/podman_container.py
smolar/tripleo-ansible
7bd37f019870c032bea71f22b305832932d81424
[ "Apache-2.0" ]
null
null
null
tripleo_ansible/ansible_plugins/modules/podman_container.py
smolar/tripleo-ansible
7bd37f019870c032bea71f22b305832932d81424
[ "Apache-2.0" ]
null
null
null
tripleo_ansible/ansible_plugins/modules/podman_container.py
smolar/tripleo-ansible
7bd37f019870c032bea71f22b305832932d81424
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python # Copyright (c) 2019 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # flake8: noqa: E501 from __future__ import absolute_import, division, print_function __metaclass__ = type import json from distutils.version import LooseVersion import yaml from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_bytes, to_native ANSIBLE_METADATA = { 'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = """ module: podman_container author: - "Sagi Shnaidman (@sshnaidm)" version_added: '2.9' short_description: Manage podman containers notes: [] description: - Start, stop, restart and manage Podman containers requirements: - "Podman installed on host" options: name: description: - Name of the container required: True type: str executable: description: - Path to C(podman) executable if it is not in the C($PATH) on the machine running C(podman) default: 'podman' type: str state: description: - I(absent) - A container matching the specified name will be stopped and removed. - I(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no container matches the name, a container will be created. If a container matches the name but the provided configuration does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created with the requested config. Image version will be taken into account when comparing configuration. Use the recreate option to force the re-creation of the matching container. - I(started) - Asserts there is a running container matching the name and any provided configuration. If no container matches the name, a container will be created and started. Use recreate to always re-create a matching container, even if it is running. Use force_restart to force a matching container to be stopped and restarted. - I(stopped) - Asserts that the container is first I(present), and then if the container is running moves it to a stopped state. type: str default: started choices: - absent - present - stopped - started image: description: - Repository path (or image name) and tag used to create the container. If an image is not found, the image will be pulled from the registry. If no tag is included, C(latest) will be used. - Can also be an image ID. If this is the case, the image is assumed to be available locally. type: str annotation: description: - Add an annotation to the container. The format is key value, multiple times. type: dict authfile: description: - Path of the authentication file. Default is ``${XDG_RUNTIME_DIR}/containers/auth.json`` (Not available for remote commands) You can also override the default path of the authentication file by setting the ``REGISTRY_AUTH_FILE`` environment variable. ``export REGISTRY_AUTH_FILE=path`` type: path blkio_weight: description: - Block IO weight (relative weight) accepts a weight value between 10 and 1000 type: int blkio_weight_device: description: - Block IO weight (relative device weight, format DEVICE_NAME[:]WEIGHT). type: dict cap_add: description: - List of capabilities to add to the container. type: list elements: str cap_drop: description: - List of capabilities to drop from the container. type: list elements: str cgroup_parent: description: - Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. type: path cgroupns: description: - Path to cgroups under which the cgroup for the container will be created. type: str cgroups: description: - Determines whether the container will create CGroups. Valid values are enabled and disabled, which the default being enabled. The disabled option will force the container to not create CGroups, and thus conflicts with CGroup options cgroupns and cgroup-parent. type: str choices: - default - disabled cidfile: description: - Write the container ID to the file type: path cmd_args: description: - Any additionl command options you want to pass to podman command, cmd_args - ['--other-param', 'value'] Be aware module doesn't support idempotency if this is set. type: list elements: str conmon_pidfile: description: - Write the pid of the conmon process to a file. conmon runs in a separate process than Podman, so this is necessary when using systemd to restart Podman containers. type: path command: description: - Override command of container. Can be a string or a list. type: raw cpu_period: description: - Limit the CPU real-time period in microseconds type: int cpu_rt_period: description: - Limit the CPU real-time period in microseconds. Limit the container's Real Time CPU usage. This flag tell the kernel to restrict the container's Real Time CPU usage to the period you specify. type: int cpu_rt_runtime: description: - Limit the CPU real-time runtime in microseconds. This flag tells the kernel to limit the amount of time in a given CPU period Real Time tasks may consume. type: int cpu_shares: description: - CPU shares (relative weight) type: int cpus: description: - Number of CPUs. The default is 0.0 which means no limit. type: str cpuset_cpus: description: - CPUs in which to allow execution (0-3, 0,1) type: str cpuset_mems: description: - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. type: str detach: description: - Run container in detach mode type: bool default: True debug: description: - Return additional information which can be helpful for investigations. type: bool default: False detach_keys: description: - Override the key sequence for detaching a container. Format is a single character or ctrl-value type: str device: description: - Add a host device to the container. The format is <device-on-host>[:<device-on-container>][:<permissions>] (e.g. device /dev/sdc:/dev/xvdc:rwm) type: list elements: str device_read_bps: description: - Limit read rate (bytes per second) from a device (e.g. device-read-bps /dev/sda:1mb) type: list device_read_iops: description: - Limit read rate (IO per second) from a device (e.g. device-read-iops /dev/sda:1000) type: list device_write_bps: description: - Limit write rate (bytes per second) to a device (e.g. device-write-bps /dev/sda:1mb) type: list device_write_iops: description: - Limit write rate (IO per second) to a device (e.g. device-write-iops /dev/sda:1000) type: list dns: description: - Set custom DNS servers type: list elements: str dns_option: description: - Set custom DNS options type: str dns_search: description: - Set custom DNS search domains (Use dns_search with '' if you don't wish to set the search domain) type: str entrypoint: description: - Overwrite the default ENTRYPOINT of the image type: str env: description: - Set environment variables. This option allows you to specify arbitrary environment variables that are available for the process that will be launched inside of the container. type: dict env_file: description: - Read in a line delimited file of environment variables type: path env_host: description: - Use all current host environment variables in container. Defaults to false. type: bool etc_hosts: description: - Dict of host-to-IP mappings, where each host name is a key in the dictionary. Each host name will be added to the container's ``/etc/hosts`` file. type: dict aliases: - add_hosts expose: description: - Expose a port, or a range of ports (e.g. expose "3300-3310") to set up port redirection on the host system. type: list elements: str aliases: - exposed - exposed_ports force_restart: description: - Force restart of container. type: bool default: False aliases: - restart gidmap: description: - Run the container in a new user namespace using the supplied mapping. type: str group_add: description: - Add additional groups to run as type: list healthcheck: description: - Set or alter a healthcheck command for a container. type: str healthcheck_interval: description: - Set an interval for the healthchecks (a value of disable results in no automatic timer setup) (default "30s") type: str healthcheck_retries: description: - The number of retries allowed before a healthcheck is considered to be unhealthy. The default value is 3. type: int healthcheck_start_period: description: - The initialization time needed for a container to bootstrap. The value can be expressed in time format like 2m3s. The default value is 0s type: str healthcheck_timeout: description: - The maximum time allowed to complete the healthcheck before an interval is considered failed. Like start-period, the value can be expressed in a time format such as 1m22s. The default value is 30s type: str hostname: description: - Container host name. Sets the container host name that is available inside the container. type: str http_proxy: description: - By default proxy environment variables are passed into the container if set for the podman process. This can be disabled by setting the http_proxy option to false. The environment variables passed in include http_proxy, https_proxy, ftp_proxy, no_proxy, and also the upper case versions of those. Defaults to true type: bool image_volume: description: - Tells podman how to handle the builtin image volumes. The options are bind, tmpfs, or ignore (default bind) type: str choices: - 'bind' - 'tmpfs' - 'ignore' image_strict: description: - Whether to compare images in idempotency by taking into account a full name with registry and namespaces. type: bool default: False init: description: - Run an init inside the container that forwards signals and reaps processes. type: str init_path: description: - Path to the container-init binary. type: str interactive: description: - Keep STDIN open even if not attached. The default is false. When set to true, keep stdin open even if not attached. The default is false. type: bool ip: description: - Specify a static IP address for the container, for example '10.88.64.128'. Can only be used if no additional CNI networks to join were specified via 'network:', and if the container is not joining another container's network namespace via 'network container:<name|id>'. The address must be within the default CNI network's pool (default 10.88.0.0/16). type: str ipc: description: - Default is to create a private IPC namespace (POSIX SysV IPC) for the container type: str kernel_memory: description: - Kernel memory limit (format <number>[<unit>], where unit = b, k, m or g) Note - idempotency is supported for integers only. type: str label: description: - Add metadata to a container, pass dictionary of label names and values type: dict label_file: description: - Read in a line delimited file of labels type: str log_driver: description: - Logging driver. Used to set the log driver for the container. For example log_driver "k8s-file". type: str choices: - k8s-file - journald - json-file log_opt: description: - Logging driver specific options. Used to set the path to the container log file. For example log_opt "path=/var/log/container/mycontainer.json" type: str aliases: - log_options memory: description: - Memory limit (format 10k, where unit = b, k, m or g) Note - idempotency is supported for integers only. type: str memory_reservation: description: - Memory soft limit (format 100m, where unit = b, k, m or g) Note - idempotency is supported for integers only. type: str memory_swap: description: - A limit value equal to memory plus swap. Must be used with the -m (--memory) flag. The swap LIMIT should always be larger than -m (--memory) value. By default, the swap LIMIT will be set to double the value of --memory Note - idempotency is supported for integers only. type: str memory_swappiness: description: - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. type: int mount: description: - Attach a filesystem mount to the container. bind or tmpfs For example mount "type=bind,source=/path/on/host,destination=/path/in/container" type: str network: description: - Set the Network mode for the container * bridge create a network stack on the default bridge * none no networking * container:<name|id> reuse another container's network stack * host use the podman host network stack. * <network-name>|<network-id> connect to a user-defined network * ns:<path> path to a network namespace to join * slirp4netns use slirp4netns to create a user network stack. This is the default for rootless containers type: list elements: str aliases: - net no_hosts: description: - Do not create /etc/hosts for the container Default is false. type: bool oom_kill_disable: description: - Whether to disable OOM Killer for the container or not. Default is false. type: bool oom_score_adj: description: - Tune the host's OOM preferences for containers (accepts -1000 to 1000) type: int pid: description: - Set the PID mode for the container type: str pids_limit: description: - Tune the container's pids limit. Set -1 to have unlimited pids for the container. type: str pod: description: - Run container in an existing pod. If you want podman to make the pod for you, preference the pod name with "new:" type: str privileged: description: - Give extended privileges to this container. The default is false. type: bool publish: description: - Publish a container's port, or range of ports, to the host. Format - ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort type: list elements: str aliases: - ports - published - published_ports publish_all: description: - Publish all exposed ports to random ports on the host interfaces. The default is false. type: bool read_only: description: - Mount the container's root filesystem as read only. Default is false type: bool read_only_tmpfs: description: - If container is running in --read-only mode, then mount a read-write tmpfs on /run, /tmp, and /var/tmp. The default is true type: bool recreate: description: - Use with present and started states to force the re-creation of an existing container. type: bool default: False restart_policy: description: - Restart policy to follow when containers exit. Restart policy will not take effect if a container is stopped via the podman kill or podman stop commands. Valid values are * no - Do not restart containers on exit * on-failure[:max_retries] - Restart containers when they exit with a non-0 exit code, retrying indefinitely or until the optional max_retries count is hit * always - Restart containers when they exit, regardless of status, retrying indefinitely type: str rm: description: - Automatically remove the container when it exits. The default is false. type: bool aliases: - remove rootfs: description: - If true, the first argument refers to an exploded container on the file system. The dafault is false. type: bool security_opt: description: - Security Options. For example security_opt "seccomp=unconfined" type: list elements: str shm_size: description: - Size of /dev/shm. The format is <number><unit>. number must be greater than 0. Unit is optional and can be b (bytes), k (kilobytes), m(megabytes), or g (gigabytes). If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses 64m type: str sig_proxy: description: - Proxy signals sent to the podman run command to the container process. SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is true. type: bool stop_signal: description: - Signal to stop a container. Default is SIGTERM. type: int stop_timeout: description: - Timeout (in seconds) to stop a container. Default is 10. type: int subgidname: description: - Run the container in a new user namespace using the map with 'name' in the /etc/subgid file. type: str subuidname: description: - Run the container in a new user namespace using the map with 'name' in the /etc/subuid file. type: str sysctl: description: - Configure namespaced kernel parameters at runtime type: dict systemd: description: - Run container in systemd mode. The default is true. type: bool tmpfs: description: - Create a tmpfs mount. For example tmpfs "/tmp" "rw,size=787448k,mode=1777" type: dict tty: description: - Allocate a pseudo-TTY. The default is false. type: bool uidmap: description: - Run the container in a new user namespace using the supplied mapping. type: list ulimit: description: - Ulimit options type: list user: description: - Sets the username or UID used and optionally the groupname or GID for the specified command. type: str userns: description: - Set the user namespace mode for the container. It defaults to the PODMAN_USERNS environment variable. An empty value means user namespaces are disabled. type: str uts: description: - Set the UTS mode for the container type: str volume: description: - Create a bind mount. If you specify, volume /HOST-DIR:/CONTAINER-DIR, podman bind mounts /HOST-DIR in the host to /CONTAINER-DIR in the podman container. type: list elements: str aliases: - volumes volumes_from: description: - Mount volumes from the specified container(s). type: list elements: str workdir: description: - Working directory inside the container. The default working directory for running binaries within a container is the root directory (/). type: str """ EXAMPLES = """ - name: Run container podman_container: name: container image: quay.io/bitnami/wildfly state: started - name: Create a data container podman_container: name: mydata image: busybox volume: - /tmp/data - name: Re-create a redis container podman_container: name: myredis image: redis command: redis-server --appendonly yes state: present recreate: yes expose: - 6379 volumes_from: - mydata - name: Restart a container podman_container: name: myapplication image: redis state: started restart: yes etc_hosts: other: "127.0.0.1" restart_policy: "no" device: "/dev/sda:/dev/xvda:rwm" ports: - "8080:9000" - "127.0.0.1:8081:9001/udp" env: SECRET_KEY: "ssssh" BOOLEAN_KEY: "yes" - name: Container present podman_container: name: mycontainer state: present image: ubuntu:14.04 command: "sleep 1d" - name: Stop a container podman_container: name: mycontainer state: stopped - name: Start 4 load-balanced containers podman_container: name: "container{{ item }}" recreate: yes image: someuser/anotherappimage command: sleep 1d with_sequence: count=4 - name: remove container podman_container: name: ohno state: absent - name: Writing output podman_container: name: myservice image: busybox log_options: path=/var/log/container/mycontainer.json log_driver: k8s-file """ RETURN = """ container: description: - Facts representing the current state of the container. Matches the podman inspection output. - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts are also accessible directly as C(podman_container). Note that the returned fact will be removed in Ansible 2.12. - Empty if C(state) is I(absent). returned: always type: dict sample: '{ "AppArmorProfile": "", "Args": [ "sh" ], "BoundingCaps": [ "CAP_CHOWN", ... ], "Config": { "Annotations": { "io.kubernetes.cri-o.ContainerType": "sandbox", "io.kubernetes.cri-o.TTY": "false" }, "AttachStderr": false, "AttachStdin": false, "AttachStdout": false, "Cmd": [ "sh" ], "Domainname": "", "Entrypoint": "", "Env": [ "PATH=/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", "HOSTNAME=", "container=podman" ], "Hostname": "", "Image": "docker.io/library/busybox:latest", "Labels": null, "OpenStdin": false, "StdinOnce": false, "StopSignal": 15, "Tty": false, "User": { "gid": 0, "uid": 0 }, "Volumes": null, "WorkingDir": "/" }, "ConmonPidFile": "...", "Created": "2019-06-17T19:13:09.873858307+03:00", "Dependencies": [], "Driver": "overlay", "EffectiveCaps": [ "CAP_CHOWN", ... ], "ExecIDs": [], "ExitCommand": [ "/usr/bin/podman", "--root", ... ], "GraphDriver": { ... }, "HostConfig": { ... }, "HostnamePath": "...", "HostsPath": "...", "ID": "...", "Image": "...", "ImageName": "docker.io/library/busybox:latest", "IsInfra": false, "LogPath": "/tmp/container/mycontainer.json", "MountLabel": "system_u:object_r:container_file_t:s0:c282,c782", "Mounts": [ ... ], "Name": "myservice", "Namespace": "", "NetworkSettings": { "Bridge": "", ... }, "Path": "sh", "ProcessLabel": "system_u:system_r:container_t:s0:c282,c782", "ResolvConfPath": "...", "RestartCount": 0, "Rootfs": "", "State": { "Dead": false, "Error": "", "ExitCode": 0, "FinishedAt": "2019-06-17T19:13:10.157518963+03:00", "Healthcheck": { "FailingStreak": 0, "Log": null, "Status": "" }, "OOMKilled": false, "OciVersion": "1.0.1-dev", "Paused": false, "Pid": 4083, "Restarting": false, "Running": false, "StartedAt": "2019-06-17T19:13:10.152479729+03:00", "Status": "exited" }, "StaticDir": "..." ... }' """ def ensure_image_exists(module, image): """If image is passed, ensure it exists, if not - pull it or fail. Arguments: module {obj} -- ansible module object image {str} -- name of image Returns: list -- list of image actions - if it pulled or nothing was done """ image_actions = [] module_exec = module.params['executable'] if not image: return image_actions rc, out, err = module.run_command([module_exec, 'image', 'exists', image]) if rc == 0: return image_actions rc, out, err = module.run_command([module_exec, 'image', 'pull', image]) if rc != 0: module.fail_json(msg="Can't pull image %s" % image, stdout=out, stderr=err) image_actions.append("pulled image %s" % image) return image_actions def _perform_action(self, action): """Perform action with container. Arguments: action {str} -- action to perform - start, create, stop, run, delete """ b_command = PodmanModuleParams(action, self.module.params, self.version, self.module, ).construct_command_from_params() full_cmd = " ".join([self.module.params['executable']] + [to_native(i) for i in b_command]) self.module.log("PODMAN-CONTAINER-DEBUG: %s" % full_cmd) self.actions.append(full_cmd) if not self.module.check_mode: rc, out, err = self.module.run_command( [self.module.params['executable'], b'container'] + b_command, expand_user_and_vars=False) self.stdout = out self.stderr = err if rc != 0: self.module.fail_json( msg="Can't %s container %s" % (action, self.name), stdout=out, stderr=err) def run(self): """Run the container.""" self._perform_action('run') def delete(self): """Delete the container.""" self._perform_action('delete') def stop(self): """Stop the container.""" self._perform_action('stop') def start(self): """Start the container.""" self._perform_action('start') def create(self): """Create the container.""" self._perform_action('create') def recreate(self): """Recreate the container.""" self.delete() self.run() def restart(self): """Restart the container.""" self.stop() self.run() class PodmanManager: """Module manager class. Defines according to parameters what actions should be applied to container """ def __init__(self, module): """Initialize PodmanManager class. Arguments: module {obj} -- ansible module object """ super(PodmanManager, self).__init__() self.module = module self.results = { 'changed': False, 'actions': [], 'container': {}, } self.name = self.module.params['name'] self.executable = \ self.module.get_bin_path(self.module.params['executable'], required=True) self.image = self.module.params['image'] image_actions = ensure_image_exists(self.module, self.image) self.results['actions'] += image_actions self.state = self.module.params['state'] self.restart = self.module.params['force_restart'] self.recreate = self.module.params['recreate'] self.container = PodmanContainer(self.module, self.name) def update_container_result(self, changed=True): """Inspect the current container, update results with last info, exit. Keyword Arguments: changed {bool} -- whether any action was performed (default: {True}) """ facts = self.container.get_info() if changed else self.container.info out, err = self.container.stdout, self.container.stderr self.results.update({'changed': changed, 'container': facts, 'podman_actions': self.container.actions}, stdout=out, stderr=err) if self.container.diff: self.results.update({'diff': self.container.diff}) if self.module.params['debug']: self.results.update({'podman_version': self.container.version}) self.module.exit_json(**self.results) def make_started(self): """Run actions if desired state is 'started'.""" if self.container.running and \ (self.container.different or self.recreate): self.container.recreate() self.results['actions'].append('recreated %s' % self.container.name) self.update_container_result() elif self.container.running and not self.container.different: if self.restart: self.container.restart() self.results['actions'].append('restarted %s' % self.container.name) self.update_container_result() self.update_container_result(changed=False) elif not self.container.exists: self.container.run() self.results['actions'].append('started %s' % self.container.name) self.update_container_result() elif self.container.stopped and self.container.different: self.container.recreate() self.results['actions'].append('recreated %s' % self.container.name) self.update_container_result() elif self.container.stopped and not self.container.different: self.container.start() self.results['actions'].append('started %s' % self.container.name) self.update_container_result() def make_stopped(self): """Run actions if desired state is 'stopped'.""" if not self.container.exists and not self.image: self.module.fail_json(msg='Cannot create container when image' ' is not specified!') if not self.container.exists: self.container.create() self.results['actions'].append('created %s' % self.container.name) self.update_container_result() if self.container.stopped: self.update_container_result(changed=False) elif self.container.running: self.container.stop() self.results['actions'].append('stopped %s' % self.container.name) self.update_container_result() def make_absent(self): """Run actions if desired state is 'absent'.""" if not self.container.exists: self.results.update({'changed': False}) elif self.container.exists: self.container.delete() self.results['actions'].append('deleted %s' % self.container.name) self.results.update({'changed': True}) self.results.update({'container': {}, 'podman_actions': self.container.actions}) self.module.exit_json(**self.results) def execute(self): """Execute the desired action according to map of actions & states.""" states_map = { 'present': self.make_started, 'started': self.make_started, 'absent': self.make_absent, 'stopped': self.make_stopped } process_action = states_map[self.state] process_action() self.module.fail_json(msg="Unexpected logic error happened, " "please contact maintainers ASAP!") if __name__ == '__main__': main()
34.65
99
0.601198
b908698cf79967eaadf3686141afa64182f22f9d
4,756
py
Python
setup.py
UdoGi/dark-matter
3d49e89fa5e81f83144119f6216c5774176d203b
[ "MIT" ]
null
null
null
setup.py
UdoGi/dark-matter
3d49e89fa5e81f83144119f6216c5774176d203b
[ "MIT" ]
null
null
null
setup.py
UdoGi/dark-matter
3d49e89fa5e81f83144119f6216c5774176d203b
[ "MIT" ]
null
null
null
#!/usr/bin/env python from setuptools import setup # Modified from http://stackoverflow.com/questions/2058802/ # how-can-i-get-the-version-defined-in-setup-py-setuptools-in-my-package # Explicitly list bin scripts to be installed, seeing as I have a few local # bin files that are not (yet) part of the distribution. scripts = [ 'bin/aa-info.py', 'bin/aa-to-dna.py', 'bin/aa-to-properties.py', 'bin/adaptor-distances.py', 'bin/alignment-panel-civ.py', 'bin/alignments-per-read.py', 'bin/bit-score-to-e-value.py', 'bin/cat-json-blast-records.py', 'bin/check-fasta-json-blast-consistency.py', 'bin/codon-distance.py', 'bin/compare-consensuses.py', 'bin/compare-sequences.py', 'bin/convert-blast-xml-to-json.py', 'bin/convert-diamond-to-json.py', 'bin/convert-diamond-to-sam.py', 'bin/convert-sam-to-fastq.sh', 'bin/create-newick-relabeling-output.py', 'bin/dark-matter-version.py', 'bin/describe-protein-database.py', 'bin/dna-to-aa.py', 'bin/download-genbank.sh', 'bin/e-value-to-bit-score.py', 'bin/extract-ORFs.py', 'bin/fasta-base-indices.py', 'bin/fasta-count.py', 'bin/fasta-diff.sh', 'bin/fasta-identity-table.py', 'bin/fasta-ids.py', 'bin/fasta-join.py', 'bin/fasta-lengths.py', 'bin/fasta-sequences.py', 'bin/fasta-sort.py', 'bin/fasta-split-by-id.py', 'bin/fasta-subset.py', 'bin/fasta-subtraction.py', 'bin/fasta-to-phylip.py', 'bin/fasta-variable-sites.py', 'bin/filter-fasta-by-complexity.py', 'bin/filter-fasta-by-taxonomy.py', 'bin/filter-fasta.py', 'bin/filter-hits-to-fasta.py', 'bin/filter-reads-alignments.py', 'bin/filter-sam.py', 'bin/find-hits.py', 'bin/format-fasta.py', 'bin/genome-protein-summary.py', 'bin/get-features.py', 'bin/get-hosts.py', 'bin/get-reads.py', 'bin/get-taxonomy.py', 'bin/graph-evalues.py', 'bin/local-align.py', 'bin/make-consensus.py', 'bin/make-fasta-database.py', 'bin/make-protein-database.py', 'bin/ncbi-fetch-id.py', 'bin/newick-to-ascii.py', 'bin/noninteractive-alignment-panel.py', 'bin/parse-genbank-flat-file.py', 'bin/position-summary.py', 'bin/pre-commit.sh', 'bin/print-blast-xml-for-derek.py', 'bin/print-blast-xml.py', 'bin/print-read-lengths.py', 'bin/proteins-to-pathogens.py', 'bin/proteins-to-pathogens-civ.py', 'bin/randomize-fasta.py', 'bin/read-blast-json.py', 'bin/read-blast-xml.py', 'bin/relabel-newick-tree.py', 'bin/run-bwa.py', 'bin/run-bowtie2.py', 'bin/sam-coverage.py', 'bin/sam-coverage-depth.py', 'bin/sam-to-fasta-alignment.py', 'bin/sam-reference-read-counts.py', 'bin/sam-references.py', 'bin/sff-to-fastq.py', 'bin/split-fasta-by-adaptors.py', 'bin/subset-protein-database.py', 'bin/summarize-fasta-bases.py', 'bin/summarize-reads.py', 'bin/trim-primers.py', 'bin/trim-reads.py', 'bin/write-htcondor-job-spec.py', ] setup(name='dark-matter', version=version(), packages=['dark', 'dark.blast', 'dark.diamond', 'dark.civ'], url='https://github.com/acorg/dark-matter', download_url='https://github.com/acorg/dark-matter', author='Terry Jones, Barbara Muehlemann, Tali Veith, Sophie Mathias', author_email='[email protected]', keywords=['virus discovery'], classifiers=[ 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Topic :: Software Development :: Libraries :: Python Modules', ], license='MIT', description='Python classes for working with genetic sequence data', scripts=scripts, install_requires=[ 'biopython>=1.71', 'bz2file>=0.98', 'Cython>=0.29.16', 'ipython>=3.1.0', 'matplotlib>=1.4.3', 'mysql-connector-python==8.0.11', 'numpy>=1.14.2', 'pysam>=0.15.2', 'pyfaidx>=0.4.8.4', 'pyzmq>=14.3.1', 'requests>=2.18.4', 'cachetools>=3.1.0', 'simplejson>=3.5.3', 'six>=1.11.0', ])
31.919463
75
0.603238
b9093a206a1a67140ea6cc8087c03166f895cb37
1,732
py
Python
authenticationApp/templatetags/timetags.py
FilipBali/VirtualPortfolio-WebApplication
9236509205e37c2c682b7b2f518f5794a94fd178
[ "MIT" ]
null
null
null
authenticationApp/templatetags/timetags.py
FilipBali/VirtualPortfolio-WebApplication
9236509205e37c2c682b7b2f518f5794a94fd178
[ "MIT" ]
null
null
null
authenticationApp/templatetags/timetags.py
FilipBali/VirtualPortfolio-WebApplication
9236509205e37c2c682b7b2f518f5794a94fd178
[ "MIT" ]
null
null
null
# ====================================================================================================================== # Fakulta informacnich technologii VUT v Brne # Bachelor thesis # Author: Filip Bali (xbalif00) # License: MIT # ====================================================================================================================== from django import template import datetime import time from portfolioApp.models import NotificationEvent register = template.Library() import pandas as pd register.filter(print_timestamp) register.filter(print_timestamp_analysis) register.filter(print_timestamp_notifications) register.filter(print_notification_text) register.filter(print_symbol_notifications) register.filter(print_type_notifications)
34.64
120
0.65127
b909e91c70f62d03b4cb515c5e970eae1b71dc91
585
py
Python
pycfmodel/model/resources/properties/policy.py
donatoaz/pycfmodel
1586e290b67d2347493dd4a77d2b0c8ee6c0936b
[ "Apache-2.0" ]
23
2018-06-28T10:45:01.000Z
2021-05-07T11:12:39.000Z
pycfmodel/model/resources/properties/policy.py
donatoaz/pycfmodel
1586e290b67d2347493dd4a77d2b0c8ee6c0936b
[ "Apache-2.0" ]
27
2019-03-09T08:33:22.000Z
2022-03-03T14:59:11.000Z
pycfmodel/model/resources/properties/policy.py
donatoaz/pycfmodel
1586e290b67d2347493dd4a77d2b0c8ee6c0936b
[ "Apache-2.0" ]
7
2019-03-09T02:18:18.000Z
2021-07-22T20:33:09.000Z
from pycfmodel.model.resources.properties.policy_document import PolicyDocument from pycfmodel.model.resources.properties.property import Property from pycfmodel.model.types import Resolvable, ResolvableStr
32.5
118
0.788034
b90a40f6dda56faee8a822969d3d8c8da41382ab
99
py
Python
stlearn/__init__.py
mrahim/stacked-learn
b04b49f65f06de7f5b59ba4139b0f78f8d66d94a
[ "BSD-3-Clause" ]
2
2017-05-23T18:06:53.000Z
2017-08-18T19:03:04.000Z
stlearn/__init__.py
mrahim/stacked-learn
b04b49f65f06de7f5b59ba4139b0f78f8d66d94a
[ "BSD-3-Clause" ]
7
2017-03-14T15:56:20.000Z
2017-05-18T08:28:44.000Z
stlearn/__init__.py
mrahim/stacked-learn
b04b49f65f06de7f5b59ba4139b0f78f8d66d94a
[ "BSD-3-Clause" ]
1
2018-10-05T08:07:44.000Z
2018-10-05T08:07:44.000Z
from .stacking import StackingClassifier, stack_features from .multitask import MultiTaskEstimator
33
56
0.878788
b90a7ababb1e0f6301fc1099880a560c64176ef6
4,209
bzl
Python
samples/workload/XNNPACK/toolchain/emscripten_toolchain_config.bzl
utsavm9/wasm-micro-runtime
0960e82db2be30b741f5c83e7a57ea9056b2ab59
[ "Apache-2.0" ]
2
2020-08-27T03:48:31.000Z
2020-09-17T03:02:53.000Z
samples/workload/XNNPACK/toolchain/emscripten_toolchain_config.bzl
utsavm9/wasm-micro-runtime
0960e82db2be30b741f5c83e7a57ea9056b2ab59
[ "Apache-2.0" ]
3
2020-09-11T04:03:00.000Z
2020-09-23T06:16:43.000Z
samples/workload/XNNPACK/toolchain/emscripten_toolchain_config.bzl
utsavm9/wasm-micro-runtime
0960e82db2be30b741f5c83e7a57ea9056b2ab59
[ "Apache-2.0" ]
null
null
null
# Copyright (C) 2019 Intel Corporation. All rights reserved. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception load("@bazel_tools//tools/build_defs/cc:action_names.bzl", "ACTION_NAMES") load( "@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl", "feature", "flag_group", "flag_set", "tool_path", ) all_compile_actions = [ ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile, ] all_link_actions = [ ACTION_NAMES.cpp_link_executable, ACTION_NAMES.cpp_link_dynamic_library, ACTION_NAMES.cpp_link_nodeps_dynamic_library, ] emsdk_toolchain_config = rule( implementation = _impl, attrs = {}, provides = [CcToolchainConfigInfo], )
30.5
82
0.434545
b90aa19934d5d7330ff2185f5e9e641a32b1df92
8,781
py
Python
cloud_storages/gdrive/gdrive.py
toplenboren/safezone
eafad765ed7cd6f6b7607ac07e75fd843d32ee07
[ "MIT" ]
null
null
null
cloud_storages/gdrive/gdrive.py
toplenboren/safezone
eafad765ed7cd6f6b7607ac07e75fd843d32ee07
[ "MIT" ]
null
null
null
cloud_storages/gdrive/gdrive.py
toplenboren/safezone
eafad765ed7cd6f6b7607ac07e75fd843d32ee07
[ "MIT" ]
null
null
null
from __future__ import print_function import json from typing import List from functools import lru_cache from cloud_storages.http_shortcuts import * from database.database import Database from models.models import StorageMetaInfo, Resource, Size from cloud_storages.storage import Storage from cloud_storages.gdrive.client_config import GOOGLE_DRIVE_CONFIG, SCOPES from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request from google.oauth2.credentials import Credentials GOOGLE_DRIVE_DB_KEY = 'google' def create_path(self, remote_path: List[str]) -> None: """ Creates the remote path on yandex disk """ print(f'[{__name__}] Trying to create directory {"/".join(remote_path)} on remote...') dir_to_create = [] for dir in remote_path: dir_to_create.append(dir) path_to_create = '/'.join(dir_to_create) response = put_with_OAuth(f'https://cloud-api.yandex.net/v1/disk/resources?path={path_to_create}', token=self.token) if 199 < response.status_code < 401: print(f'[{__name__}] Created directory {path_to_create}') continue elif response.status_code == 409 and ' ' in response.json().get('message', ''): continue return def save_resource_to_path(self, resource: Resource, remote_path: str, overwrite: bool, _rec_call:bool = False) -> Resource or None: """ Put an Item to the directory :param resource: resource on the local fs :param remote_path: string, path to resource on remote fs :param _rec_call: bool, a system parameter, whether or not this function was called as a recursive call :return: saved resource or raises exception """ upload_successful_flag = False response = get_with_OAuth( f'https://cloud-api.yandex.net/v1/disk/resources/upload?path={remote_path}&overwrite=${overwrite}', token=self.token ) if response.status_code == 200: response_read = response.json() upload_link = response_read['href'] with open(resource.path, 'rb') as f: files = f response = put_with_OAuth(upload_link, data=files) if 199 < response.status_code < 401: upload_successful_flag = True response = get_with_OAuth(f'https://cloud-api.yandex.net/v1/disk/resources?path={remote_path}', token=self.token) resource_metainfo = self._deserialize_resource(response.json()) if 199 < response.status_code < 401: return resource_metainfo elif upload_successful_flag: return resource # This dir is not present in the storage # We use _rec_call to tell that the next call was made as recursive call, so we don't cause SO elif response.status_code == 409 and not _rec_call: # We don't need to create a folder with the name equal to the filename, so we do [:-1] self.create_path(remote_path.split('/')[:-1]) return self.save_resource_to_path(resource, remote_path, overwrite, _rec_call=True) raise ValueError(f"Something went wrong with YD: Response: " f"{str(response.status_code)} {response.json().get('message', '')}") def main(): storage = GDriveStorage(None) db = Database('../storage.db') storage.auth(db) authed_storage = GDriveStorage(json.loads(db.get(GOOGLE_DRIVE_DB_KEY))['token']) result = authed_storage.list_resources_on_path('savezone') print(result) if __name__ == '__main__': main()
38.853982
135
0.59959
b90b0ec76c39d933c89c13f5c997460e2300453d
677
py
Python
index/urls.py
darkestmidnight/fedcodeathon2018
2cac972b6eaebd7bfc47c02aade36b0f4a6869ab
[ "MIT" ]
1
2019-02-08T02:15:52.000Z
2019-02-08T02:15:52.000Z
index/urls.py
darkestmidnight/fedcodeathon2018
2cac972b6eaebd7bfc47c02aade36b0f4a6869ab
[ "MIT" ]
null
null
null
index/urls.py
darkestmidnight/fedcodeathon2018
2cac972b6eaebd7bfc47c02aade36b0f4a6869ab
[ "MIT" ]
1
2018-10-23T21:52:39.000Z
2018-10-23T21:52:39.000Z
from django.urls import re_path, include from . import views app_name='logged' # url mappings for the webapp. urlpatterns = [ re_path(r'^$', views.logged_count, name="logged_count"), re_path(r'^loggedusers/', views.logged, name="logged_users"), re_path(r'^settings/', views.user_settings, name="update_info"), re_path(r'^administrators/', views.post_alert, name="post_alert"), re_path(r'^alerts/$', views.list_alert, name="list_alert"), re_path(r'^alerts/(?P<slug>[\w-]+)/$', views.view_alert, name="view_alert"), re_path(r'^display/', views.display, name="display"), re_path(r'^doorselection/', views.doors_election, name="door_selecttion") ]
42.3125
80
0.698671
b90cb0cd96548302814d62e2805216240024b671
3,202
py
Python
scout/dao/item.py
uw-it-aca/scout
be787378c216f1fb172d68914a550a91c62bc264
[ "Apache-2.0" ]
7
2017-01-29T09:51:22.000Z
2022-02-24T16:40:55.000Z
scout/dao/item.py
uw-it-aca/scout
be787378c216f1fb172d68914a550a91c62bc264
[ "Apache-2.0" ]
338
2016-03-21T19:55:04.000Z
2022-03-30T21:12:28.000Z
scout/dao/item.py
uw-it-aca/scout
be787378c216f1fb172d68914a550a91c62bc264
[ "Apache-2.0" ]
4
2016-03-02T01:19:01.000Z
2016-12-13T14:48:31.000Z
# Copyright 2021 UW-IT, University of Washington # SPDX-License-Identifier: Apache-2.0 from scout.dao.space import get_spots_by_filter, _get_spot_filters, \ _get_extended_info_by_key import copy
28.846847
76
0.584635
b90cef65b59792b28b4c92088d99214713e0be27
458
py
Python
juriscraper/opinions/united_states/state/minnctapp.py
umeboshi2/juriscraper
16abceb3747947593841b1c2708de84dcc85c59d
[ "BSD-2-Clause" ]
null
null
null
juriscraper/opinions/united_states/state/minnctapp.py
umeboshi2/juriscraper
16abceb3747947593841b1c2708de84dcc85c59d
[ "BSD-2-Clause" ]
null
null
null
juriscraper/opinions/united_states/state/minnctapp.py
umeboshi2/juriscraper
16abceb3747947593841b1c2708de84dcc85c59d
[ "BSD-2-Clause" ]
1
2021-03-03T00:03:16.000Z
2021-03-03T00:03:16.000Z
#Scraper for Minnesota Court of Appeals Published Opinions #CourtID: minnctapp #Court Short Name: MN #Author: mlr #Date: 2016-06-03 from juriscraper.opinions.united_states.state import minn
26.941176
58
0.703057
b90d416b48352a6528abbda811ab137b9f58c6c2
1,223
py
Python
monty/os/__init__.py
JosephMontoya-TRI/monty
facef1776c7d05c941191a32a0b93f986a9761dd
[ "MIT" ]
null
null
null
monty/os/__init__.py
JosephMontoya-TRI/monty
facef1776c7d05c941191a32a0b93f986a9761dd
[ "MIT" ]
null
null
null
monty/os/__init__.py
JosephMontoya-TRI/monty
facef1776c7d05c941191a32a0b93f986a9761dd
[ "MIT" ]
null
null
null
from __future__ import absolute_import import os import errno from contextlib import contextmanager __author__ = 'Shyue Ping Ong' __copyright__ = 'Copyright 2013, The Materials Project' __version__ = '0.1' __maintainer__ = 'Shyue Ping Ong' __email__ = '[email protected]' __date__ = '1/24/14' def makedirs_p(path, **kwargs): """ Wrapper for os.makedirs that does not raise an exception if the directory already exists, in the fashion of "mkdir -p" command. The check is performed in a thread-safe way Args: path: path of the directory to create kwargs: standard kwargs for os.makedirs """ try: os.makedirs(path, **kwargs) except OSError as exc: if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise
23.075472
111
0.6435
b90f4e751b3217015ecc06286993d45ab12fc397
405
py
Python
{{ cookiecutter.repo_name }}/tests/test_environment.py
FrancisMudavanhu/cookiecutter-data-science
be766817a7399ccd714bf03d085609985fa7313a
[ "MIT" ]
null
null
null
{{ cookiecutter.repo_name }}/tests/test_environment.py
FrancisMudavanhu/cookiecutter-data-science
be766817a7399ccd714bf03d085609985fa7313a
[ "MIT" ]
null
null
null
{{ cookiecutter.repo_name }}/tests/test_environment.py
FrancisMudavanhu/cookiecutter-data-science
be766817a7399ccd714bf03d085609985fa7313a
[ "MIT" ]
null
null
null
import sys REQUIRED_PYTHON = "python3" required_major = 3 if __name__ == '__main__': main()
19.285714
62
0.632099