hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
11 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
251
max_stars_repo_name
stringlengths
4
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
251
max_issues_repo_name
stringlengths
4
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
251
max_forks_repo_name
stringlengths
4
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.05M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.04M
alphanum_fraction
float64
0
1
e67e2b8d5cc36e4de07019122375c2f2fc7e621b
765
py
Python
ucs-python/create_ucs_sp_template.py
movinalot/ucs
dc0d37784592d6d78f46efee40c86b6f7ac928b4
[ "MIT" ]
null
null
null
ucs-python/create_ucs_sp_template.py
movinalot/ucs
dc0d37784592d6d78f46efee40c86b6f7ac928b4
[ "MIT" ]
null
null
null
ucs-python/create_ucs_sp_template.py
movinalot/ucs
dc0d37784592d6d78f46efee40c86b6f7ac928b4
[ "MIT" ]
2
2020-06-17T15:49:37.000Z
2021-01-28T07:21:21.000Z
""" create_ucs_sp_template.py Purpose: UCS Manager Create a UCS Service Profile Template Author: John McDonough ([email protected]) github: (@movinalot) Cisco Systems, Inc. """ from ucsmsdk.ucshandle import UcsHandle from ucsmsdk.mometa.ls.LsServer import LsServer from ucsmsdk.mometa.org.OrgOrg import OrgOrg HANDLE = UcsHandle( "sandbox-ucsm1.cisco.com", "admin", "password" ) HANDLE.login() ORG_ORG = OrgOrg( parent_mo_or_dn='org-root', name="devnet", ) HANDLE.add_mo(ORG_ORG, modify_present=True) HANDLE.commit() SP_TEMPLATE = LsServer( parent_mo_or_dn='org-root/org-devnet', name="devcore_template", type="updating-template" ) HANDLE.add_mo(SP_TEMPLATE, modify_present=True) HANDLE.commit() HANDLE.logout()
19.125
60
0.732026
e67f72e9b27124ae9fe286846ee45d52e71dc993
4,105
py
Python
epab/core/config.py
132nd-etcher/epab
5226d3a36580f8cc50cf5dcac426adb1350a2c9b
[ "MIT" ]
2
2018-12-13T06:49:10.000Z
2018-12-13T07:37:49.000Z
epab/core/config.py
etcher-be/epab
5226d3a36580f8cc50cf5dcac426adb1350a2c9b
[ "MIT" ]
109
2018-08-22T04:25:56.000Z
2019-10-17T05:10:21.000Z
epab/core/config.py
etcher-be/epab
5226d3a36580f8cc50cf5dcac426adb1350a2c9b
[ "MIT" ]
1
2018-02-25T05:53:18.000Z
2018-02-25T05:53:18.000Z
# coding=utf-8 """ Handles EPAB's config file """ import logging import pathlib import elib_config CHANGELOG_DISABLE = elib_config.ConfigValueBool( 'changelog', 'disable', description='Disable changelog building', default=False ) CHANGELOG_FILE_PATH = elib_config.ConfigValuePath( 'changelog', 'file_path', description='Path to changelog file', default='CHANGELOG.md' ) CHANGELOG_FILE_PATH.must_be_file() TEST_RUNNER_OPTIONS = elib_config.ConfigValueString( 'test', 'runner_options', description='Additional options for test run', default='' ) TEST_DURATION_COUNT = elib_config.ConfigValueInteger( 'test', 'duration_count', description='Amount of "slow" tests to show', default=10 ) TEST_DURATION_COUNT.set_limits(min_=0, max_=50) TEST_TARGET = elib_config.ConfigValueString( 'test', 'target', description='Target of pytest', default='test' ) TEST_COVERAGE_FAIL_UNDER = elib_config.ConfigValueInteger( 'test', 'coverage_fail_under', description='Minimal coverage to pass tests', default=20 ) TEST_COVERAGE_FAIL_UNDER.set_limits(min_=0, max_=100) TEST_PYTEST_TIMEOUT = elib_config.ConfigValueInteger( 'test', 'timeout', description='Timeout in seconds for pytest runner', default=300 ) TEST_PYTEST_TIMEOUT.set_limits(min_=0, max_=3600) LINT_LINE_LENGTH = elib_config.ConfigValueInteger( 'lint', 'line_length', description='Linter max line width', default=120 ) LINT_LINE_LENGTH.set_limits(min_=0, max_=500) PACKAGE_NAME = elib_config.ConfigValueString( 'package_name', description='Package name' ) FREEZE_ENTRY_POINT = elib_config.ConfigValueString( 'freeze', 'entry_point', description='Main entry point for pyinstaller', default='' ) FREEZE_DATA_FILES = elib_config.ConfigValueList( 'freeze', 'data_files', description='PyInstaller data-files list', element_type=str, default=[] ) DOC_REPO = elib_config.ConfigValueString( 'doc', 'repo', description='Documentation repository on Github', default='' ) DOC_FOLDER = elib_config.ConfigValuePath( 'doc', 'folder', description='Local documentation directory', default='./doc' ) DOC_FOLDER.must_be_dir() QUIET = elib_config.ConfigValueBool( 'quiet', description='Less console output', default=False ) VERBOSE = elib_config.ConfigValueBool( 'verbose', description='More console output', default=False ) TEST_AV_RUNNER_OPTIONS = elib_config.ConfigValueString( 'appveyor', 'test_runner_options', description='Additional command line options for tests run on AV', default='--long' ) ARTIFACTS = elib_config.ConfigValueList( 'appveyor', 'artifacts', description='List of artifacts for Appveyor', element_type=str, default=[] ) FLAKE8_EXCLUDE = elib_config.ConfigValueString( 'lint', 'flake8_exclude', description='List of comma separated files for flake8 to exclude', default='' ) MYPY_ARGS = elib_config.ConfigValueString( 'lint', 'mypy_args', description='Additional MyPy arguments', default='' ) QT_RES_SRC = elib_config.ConfigValueString( 'qt', 'res_src', description='Qt resource file (.qrc) location', default='' ) QT_RES_TGT = elib_config.ConfigValueString( 'qt', 'res_tgt', description='Compiled Qt resource file (.py) target location', default='' ) UPLOAD_TO_TWINE = elib_config.ConfigValueBool( 'twine', 'upload', description='Upload package to Twine after build', default=True, ) MAKE_GRAPH = elib_config.ConfigValueBool( 'graph', 'make', description='Generate graphs using PyReverse', default=True, ) def setup_config(epab_version: str): """ Set up elib_config package :param epab_version: installed version of EPAB as as string """ logger = logging.getLogger('EPAB') logger.debug('setting up config') elib_config.ELIBConfig.setup( app_name='EPAB', app_version=epab_version, config_file_path='pyproject.toml', config_sep_str='__', root_path=['tool', 'epab'] ) elib_config.write_example_config('pyproject.toml.example') if not pathlib.Path('pyproject.toml').exists(): raise FileNotFoundError('pyproject.toml') elib_config.validate_config()
34.495798
107
0.747138
e67fead92c8110015c821a38623a6b98e6c63185
5,793
py
Python
create_flask_app.py
Creativity-Hub/create_flask_app
4c4e2c7360c7773f6f5e3d2fd30e310777650f57
[ "MIT" ]
2
2020-08-05T04:33:20.000Z
2020-08-06T23:03:40.000Z
create_flask_app.py
Creativity-Hub/create_flask_app
4c4e2c7360c7773f6f5e3d2fd30e310777650f57
[ "MIT" ]
null
null
null
create_flask_app.py
Creativity-Hub/create_flask_app
4c4e2c7360c7773f6f5e3d2fd30e310777650f57
[ "MIT" ]
null
null
null
import os import argparse if __name__ == '__main__': create_flask_app()
39.141892
335
0.666494
e680d5976ff70e83c58f67740990b745a8b0973b
1,835
py
Python
examples/flaskr/flaskr/__init__.py
Flared/flask-sqlalchemy
e73abd51d957a4436bca6b5eadbf5d63771cf5ef
[ "BSD-3-Clause" ]
2
2020-04-09T15:28:49.000Z
2020-04-18T02:55:16.000Z
examples/flaskr/flaskr/__init__.py
Flared/flask-sqlalchemy
e73abd51d957a4436bca6b5eadbf5d63771cf5ef
[ "BSD-3-Clause" ]
null
null
null
examples/flaskr/flaskr/__init__.py
Flared/flask-sqlalchemy
e73abd51d957a4436bca6b5eadbf5d63771cf5ef
[ "BSD-3-Clause" ]
1
2020-06-19T11:49:30.000Z
2020-06-19T11:49:30.000Z
import os import click from flask import Flask from flask.cli import with_appcontext from flask_sqlalchemy import SQLAlchemy __version__ = (1, 0, 0, "dev") db = SQLAlchemy() def create_app(test_config=None): """Create and configure an instance of the Flask application.""" app = Flask(__name__, instance_relative_config=True) # some deploy systems set the database url in the environ db_url = os.environ.get("DATABASE_URL") if db_url is None: # default to a sqlite database in the instance folder db_url = "sqlite:///" + os.path.join(app.instance_path, "flaskr.sqlite") # ensure the instance folder exists os.makedirs(app.instance_path, exist_ok=True) app.config.from_mapping( # default secret that should be overridden in environ or config SECRET_KEY=os.environ.get("SECRET_KEY", "dev"), SQLALCHEMY_DATABASE_URI=db_url, SQLALCHEMY_TRACK_MODIFICATIONS=False, ) if test_config is None: # load the instance config, if it exists, when not testing app.config.from_pyfile("config.py", silent=True) else: # load the test config if passed in app.config.update(test_config) # initialize Flask-SQLAlchemy and the init-db command db.init_app(app) app.cli.add_command(init_db_command) # apply the blueprints to the app from flaskr import auth, blog app.register_blueprint(auth.bp) app.register_blueprint(blog.bp) # make "index" point at "/", which is handled by "blog.index" app.add_url_rule("/", endpoint="index") return app
27.38806
80
0.689918
e681d9f0d0bbcd56a55111fcb8b7b0c2f584018e
142
py
Python
simulator/cc.py
mcfx/trivm
5b77ea157c562cfbfe87f7e7d256fb9702f8ceec
[ "MIT" ]
6
2022-02-21T15:49:52.000Z
2022-02-23T07:16:02.000Z
simulator/cc.py
mcfx/trivm
5b77ea157c562cfbfe87f7e7d256fb9702f8ceec
[ "MIT" ]
null
null
null
simulator/cc.py
mcfx/trivm
5b77ea157c562cfbfe87f7e7d256fb9702f8ceec
[ "MIT" ]
null
null
null
import os, sys fn = sys.argv[1] if os.system('python compile.py %s __tmp.S' % fn) == 0: os.system('python asm.py __tmp.S %s' % fn[:-2])
20.285714
55
0.598592
e6820129758b4a88f3d5692d1d9e3fcd58b99051
3,806
py
Python
ad2/Actor.py
ariadnepinheiro/Disease_Simulator
e875036f4b0485575327463a17f4282487350cb3
[ "MIT" ]
4
2020-11-06T22:28:51.000Z
2022-02-24T10:40:26.000Z
ad2/Actor.py
ariadnepinheiro/Disease_Simulator
e875036f4b0485575327463a17f4282487350cb3
[ "MIT" ]
null
null
null
ad2/Actor.py
ariadnepinheiro/Disease_Simulator
e875036f4b0485575327463a17f4282487350cb3
[ "MIT" ]
2
2021-03-07T20:26:42.000Z
2021-12-14T03:17:22.000Z
#!/usr/bin/env python # coding: UTF-8 # # @package Actor # @author Ariadne Pinheiro # @date 26/08/2020 # # Actor class, which is the base class for Disease objects. # ##
27.781022
86
0.584078
e6821c09b4a2b0ae38dad98719d218377bec1dfe
1,516
py
Python
conversions/decimal_to_binary.py
smukk9/Python
5f4da5d616926dbe77ece828986b8d19c7d65cb5
[ "MIT" ]
6
2020-06-23T11:56:55.000Z
2021-10-03T17:21:34.000Z
conversions/decimal_to_binary.py
smukk9/Python
5f4da5d616926dbe77ece828986b8d19c7d65cb5
[ "MIT" ]
3
2020-06-08T07:03:15.000Z
2020-06-08T08:41:22.000Z
conversions/decimal_to_binary.py
smukk9/Python
5f4da5d616926dbe77ece828986b8d19c7d65cb5
[ "MIT" ]
2
2020-06-26T09:16:11.000Z
2020-07-01T08:55:48.000Z
"""Convert a Decimal Number to a Binary Number.""" def decimal_to_binary(num: int) -> str: """ Convert a Integer Decimal Number to a Binary Number as str. >>> decimal_to_binary(0) '0b0' >>> decimal_to_binary(2) '0b10' >>> decimal_to_binary(7) '0b111' >>> decimal_to_binary(35) '0b100011' >>> # negatives work too >>> decimal_to_binary(-2) '-0b10' >>> # other floats will error >>> decimal_to_binary(16.16) # doctest: +ELLIPSIS Traceback (most recent call last): ... TypeError: 'float' object cannot be interpreted as an integer >>> # strings will error as well >>> decimal_to_binary('0xfffff') # doctest: +ELLIPSIS Traceback (most recent call last): ... TypeError: 'str' object cannot be interpreted as an integer """ if type(num) == float: raise TypeError("'float' object cannot be interpreted as an integer") if type(num) == str: raise TypeError("'str' object cannot be interpreted as an integer") if num == 0: return "0b0" negative = False if num < 0: negative = True num = -num binary = [] while num > 0: binary.insert(0, num % 2) num >>= 1 if negative: return "-0b" + "".join(str(e) for e in binary) return "0b" + "".join(str(e) for e in binary) if __name__ == "__main__": import doctest doctest.testmod()
25.266667
77
0.558047
e68358c694510e180fb49e743ec559c977aea7b6
1,467
py
Python
src/HandNetwork.py
xausky/hand-network
e885003c5bb9157cd06dc3ea3aabddbb7162a0ab
[ "MIT" ]
2
2017-04-18T03:31:06.000Z
2017-06-08T10:27:59.000Z
src/HandNetwork.py
xausky/hand-network
e885003c5bb9157cd06dc3ea3aabddbb7162a0ab
[ "MIT" ]
null
null
null
src/HandNetwork.py
xausky/hand-network
e885003c5bb9157cd06dc3ea3aabddbb7162a0ab
[ "MIT" ]
null
null
null
#!/usr/bin/python3 #-*- coding: utf-8 -*- import urllib.parse import json import base64 import requests import logging
36.675
84
0.657805
e684146ff5ca787d26fd1c2feebd83d974744890
1,725
py
Python
algorithms_keeper/parser/rules/use_fstring.py
Fongeme/algorithms-keeper
ea80d9342b4d2efd246a6bc409889ed780accf08
[ "MIT" ]
50
2021-02-27T04:13:11.000Z
2022-03-29T04:34:01.000Z
algorithms_keeper/parser/rules/use_fstring.py
dedsec-9/algorithms-keeper
0d98e4e24e239524c48d9eab19c493ac288ecf83
[ "MIT" ]
52
2021-08-09T22:40:20.000Z
2022-03-07T16:56:36.000Z
algorithms_keeper/parser/rules/use_fstring.py
dedsec-9/algorithms-keeper
0d98e4e24e239524c48d9eab19c493ac288ecf83
[ "MIT" ]
22
2021-04-28T06:56:27.000Z
2022-03-13T07:27:45.000Z
import libcst as cst import libcst.matchers as m from fixit import CstLintRule from fixit import InvalidTestCase as Invalid from fixit import ValidTestCase as Valid
33.173077
85
0.576232
e6848af64f5fa82bd5d7d5132ff08186219ab513
15,634
py
Python
bert_multitask_learning/model_fn.py
akashnd/bert-multitask-learning
aee5be006ef6a3feadf0c751a6f9b42c24c3fd21
[ "Apache-2.0" ]
null
null
null
bert_multitask_learning/model_fn.py
akashnd/bert-multitask-learning
aee5be006ef6a3feadf0c751a6f9b42c24c3fd21
[ "Apache-2.0" ]
null
null
null
bert_multitask_learning/model_fn.py
akashnd/bert-multitask-learning
aee5be006ef6a3feadf0c751a6f9b42c24c3fd21
[ "Apache-2.0" ]
null
null
null
# AUTOGENERATED! DO NOT EDIT! File to edit: source_nbs/13_model_fn.ipynb (unless otherwise specified). __all__ = ['variable_summaries', 'filter_loss', 'BertMultiTaskBody', 'BertMultiTaskTop', 'BertMultiTask'] # Cell from typing import Dict, Tuple from inspect import signature import tensorflow as tf import transformers from .modeling import MultiModalBertModel from .params import BaseParams from .top import (Classification, MultiLabelClassification, PreTrain, Seq2Seq, SequenceLabel, MaskLM) from .utils import get_embedding_table_from_model, get_transformer_main_model def variable_summaries(var, name): """Attach a lot of summaries to a Tensor (for TensorBoard visualization).""" with tf.compat.v1.name_scope(name): mean = tf.reduce_mean(input_tensor=var) tf.compat.v1.summary.scalar('mean', mean) with tf.compat.v1.name_scope('stddev'): stddev = tf.sqrt(tf.reduce_mean( input_tensor=tf.square(var - mean))) tf.compat.v1.summary.scalar('stddev', stddev) tf.compat.v1.summary.scalar('max', tf.reduce_max(input_tensor=var)) tf.compat.v1.summary.scalar('min', tf.reduce_min(input_tensor=var)) tf.compat.v1.summary.histogram('histogram', var) # Cell # Cell
42.368564
114
0.623321
e685406479e82ae52847e5dad03d1463ba77358b
5,000
py
Python
SiMon/visualization.py
Jennyx18/SiMon
522432ff708954ac37050609cfd6f42dd96467e4
[ "BSD-2-Clause" ]
9
2017-03-04T08:00:58.000Z
2021-04-03T18:18:40.000Z
SiMon/visualization.py
Jennyx18/SiMon
522432ff708954ac37050609cfd6f42dd96467e4
[ "BSD-2-Clause" ]
52
2016-09-23T14:06:06.000Z
2021-08-05T12:21:29.000Z
SiMon/visualization.py
Jennyx18/SiMon
522432ff708954ac37050609cfd6f42dd96467e4
[ "BSD-2-Clause" ]
4
2016-09-15T02:09:42.000Z
2021-06-15T11:42:58.000Z
import os import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np import math from datetime import datetime from matplotlib.colors import ListedColormap, BoundaryNorm from matplotlib.collections import LineCollection from matplotlib import cm from SiMon.simulation import Simulation from SiMon.callback import Callback from matplotlib.ticker import MaxNLocator import time
32.894737
102
0.4948
e6855e47f2ad7aa6ba42d8fa11c100eb19915033
3,700
py
Python
bin/psm/oil_jet.py
ChrisBarker-NOAA/tamoc
c797cbb6fee28d788b76d21cc5b0cc0df5444ba8
[ "MIT" ]
18
2016-02-24T01:48:41.000Z
2021-11-05T03:18:24.000Z
bin/psm/oil_jet.py
ChrisBarker-NOAA/tamoc
c797cbb6fee28d788b76d21cc5b0cc0df5444ba8
[ "MIT" ]
16
2016-08-09T07:06:35.000Z
2021-12-23T19:38:37.000Z
bin/psm/oil_jet.py
ChrisBarker-NOAA/tamoc
c797cbb6fee28d788b76d21cc5b0cc0df5444ba8
[ "MIT" ]
9
2017-03-01T01:22:27.000Z
2021-09-17T12:13:40.000Z
""" Particle Size Models: Pure Oil Jet =================================== Use the ``TAMOC`` `particle_size_models` module to simulate a laboratory scale pure oil jet into water. This script demonstrates the typical steps involved in using the `particle_size_models.PureJet` object, which requires specification of all of the fluid properties of the jet. """ # S. Socolofsky, March 2020, Texas A&M University <[email protected]>. from __future__ import (absolute_import, division, print_function) from tamoc import seawater, particle_size_models import numpy as np import warnings warnings.filterwarnings("ignore") if __name__ == '__main__': print('\n---------------------------------------------------------------') print('Demonstration using the PureJet class in the') print('particle_size_models module of TAMOC for the ') print('experiments in the paper by Brandvik et al. (2013).') print('\nComparisons are for the data reported in Table 3') print('of the paper') print('---------------------------------------------------------------') # Simulate an experiment from Brandvik et al. (2013). Their data uses # Oseberg oil, with the following reported properties rho_oil = 839.3 mu_oil = 5.e-3 sigma = 15.5e-3 # We will simulate data from Table 3 in the Brandvik et al. (2013) paper. # These experiments have a nozzle diameter of 1.5 mm d0 = 0.0015 # They also used seawater (assumed salinity of 34.5 psu) and released the # oil from a depth of about 6 m at a temperature of 13 deg C T = 273.15 + 13. S = 34.5 rho = seawater.density(T, S, 101325.) P = 101325. + rho * 9.81 * 6. rho = seawater.density(T, S, P) mu = seawater.mu(T, S, P) # With this information, we can initialize a # `particle_size_models.PureJet` object jet = particle_size_models.PureJet(rho_oil, mu_oil, sigma, rho, mu, fp_type = 1) # Brandvik et al. (2013) report the exit velocity at the nozzle. We # need to convert this to a mass flow rate. The mass flow rate should # always be reported within a numpy array, which allows for different # mass fluxes for different pseudocomponents of the oil. u_oil = 11.3 A_oil = np.pi * (d0 / 2.)**2 q_oil = u_oil * A_oil md_oil = np.array([rho_oil * q_oil]) # To simulate the no-dispersant case, all of the oil properties in the # jet object are currently correct. Hence, we may use: jet.simulate(d0, md_oil) # We compare the result to the measured data as follows: print('\nThe median droplet size for the no-disperant experiment is:') print(' Measured: %3.3d um' % 237) print(' Modeled : %3.3d um\n' % (jet.get_d50() * 1.e6)) # When dispersant is added in sufficient quantities, the interfacial # tension reduces and the droplet size gets smaller. At a dispersant # to oil ratio of 50, sigma is: sigma = 0.05e-3 # We can run this case by updating the properties of the jet object and # re-running the simualtion jet.update_properties(rho_oil, mu_oil, sigma, rho, mu, fp_type = 1) jet.simulate(d0, md_oil) # We compare the result to the measured data as follows: print('\nThe median droplet size for an experiments with a') print('dispersant to oil ratio of 50 is:') print(' Measured: %3.3d um' % 170) print(' Modeled : %3.3d um\n' % (jet.get_d50() * 1.e6)) # We can also plot the size distribution print('\nThe corresponding size distribution is plotted in Figure 1') jet.get_distributions(15) jet.plot_psd(1)
38.947368
78
0.635946
e6862496cf199e7f27dd40deb80fa8e54704b966
1,121
py
Python
tron/Nubs/hal.py
sdss/tron
886c5c5fb6341ad85e4a9f5d6f5ecb6bbc0d8322
[ "BSD-3-Clause" ]
null
null
null
tron/Nubs/hal.py
sdss/tron
886c5c5fb6341ad85e4a9f5d6f5ecb6bbc0d8322
[ "BSD-3-Clause" ]
null
null
null
tron/Nubs/hal.py
sdss/tron
886c5c5fb6341ad85e4a9f5d6f5ecb6bbc0d8322
[ "BSD-3-Clause" ]
null
null
null
import os.path import tron.Misc from tron import g, hub from tron.Hub.Command.Encoders.ASCIICmdEncoder import ASCIICmdEncoder from tron.Hub.Nub.SocketActorNub import SocketActorNub from tron.Hub.Reply.Decoders.ASCIIReplyDecoder import ASCIIReplyDecoder name = 'hal'
24.369565
77
0.637823
e68682ac6ba97f9b172ff277c3a2a87e5c65354c
1,761
py
Python
tests/fixtures/defxmlschema/chapter15.py
gramm/xsdata
082c780757c6d76a5c31a6757276ef6912901ed2
[ "MIT" ]
null
null
null
tests/fixtures/defxmlschema/chapter15.py
gramm/xsdata
082c780757c6d76a5c31a6757276ef6912901ed2
[ "MIT" ]
null
null
null
tests/fixtures/defxmlschema/chapter15.py
gramm/xsdata
082c780757c6d76a5c31a6757276ef6912901ed2
[ "MIT" ]
null
null
null
from dataclasses import dataclass, field from decimal import Decimal from typing import Optional from xsdata.models.datatype import XmlDate
20.241379
42
0.473027
e68781e0de8404ad5b22f8d2f250a25084af55ff
1,092
py
Python
extensions/domain.py
anubhavsinha98/oppia
9a64ea2e91d2f471ce22bd39da77b43dccd5b51f
[ "Apache-2.0" ]
1
2019-08-31T17:06:41.000Z
2019-08-31T17:06:41.000Z
extensions/domain.py
anubhavsinha98/oppia
9a64ea2e91d2f471ce22bd39da77b43dccd5b51f
[ "Apache-2.0" ]
null
null
null
extensions/domain.py
anubhavsinha98/oppia
9a64ea2e91d2f471ce22bd39da77b43dccd5b51f
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Domain objects used within multiple extensions.""" from __future__ import absolute_import # pylint: disable=import-only-modules import python_utils
35.225806
77
0.746337
e6885b17b97915311f8a8bd86b9f72a31641ef6d
7,392
py
Python
plugins/modules/oci_database_management_object_privilege_facts.py
LaudateCorpus1/oci-ansible-collection
2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7
[ "Apache-2.0" ]
null
null
null
plugins/modules/oci_database_management_object_privilege_facts.py
LaudateCorpus1/oci-ansible-collection
2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7
[ "Apache-2.0" ]
null
null
null
plugins/modules/oci_database_management_object_privilege_facts.py
LaudateCorpus1/oci-ansible-collection
2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python # Copyright (c) 2020, 2022 Oracle and/or its affiliates. # This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Apache License v2.0 # See LICENSE.TXT for details. # GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { "metadata_version": "1.1", "status": ["preview"], "supported_by": "community", } DOCUMENTATION = """ --- module: oci_database_management_object_privilege_facts short_description: Fetches details about one or multiple ObjectPrivilege resources in Oracle Cloud Infrastructure description: - Fetches details about one or multiple ObjectPrivilege resources in Oracle Cloud Infrastructure - Gets the list of Object Privileges granted for the specified user. version_added: "2.9.0" author: Oracle (@oracle) options: managed_database_id: description: - The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the Managed Database. type: str required: true user_name: description: - The name of the user whose details are to be viewed. type: str required: true name: description: - A filter to return only resources that match the entire name. type: str sort_by: description: - The field to sort information by. Only one sortOrder can be used. The default sort order for 'NAME' is ascending. The 'NAME' sort order is case-sensitive. type: str choices: - "NAME" sort_order: description: - The option to sort information in ascending ('ASC') or descending ('DESC') order. Ascending order is the default order. type: str choices: - "ASC" - "DESC" extends_documentation_fragment: [ oracle.oci.oracle ] """ EXAMPLES = """ - name: List object_privileges oci_database_management_object_privilege_facts: # required managed_database_id: "ocid1.manageddatabase.oc1..xxxxxxEXAMPLExxxxxx" user_name: user_name_example # optional name: name_example sort_by: NAME sort_order: ASC """ RETURN = """ object_privileges: description: - List of ObjectPrivilege resources returned: on success type: complex contains: name: description: - The name of the privilege on the object. returned: on success type: str sample: name_example schema_type: description: - The type of the object. returned: on success type: str sample: schema_type_example owner: description: - The owner of the object. returned: on success type: str sample: owner_example grantor: description: - The name of the user who performed the grant returned: on success type: str sample: grantor_example hierarchy: description: - Indicates whether the privilege was granted with the HIERARCHY OPTION (YES) or not (NO) returned: on success type: str sample: YES object: description: - The name of the object. The object can be any object, including tables, packages, indexes, sequences, and so on. returned: on success type: str sample: object_example grant_option: description: - Indicates whether the privilege was granted with the GRANT OPTION (YES) or not (NO) returned: on success type: str sample: YES common: description: - "Indicates how the grant was made. Possible values: YES if the role was granted commonly (CONTAINER=ALL was used) NO if the role was granted locally (CONTAINER=ALL was not used)" returned: on success type: str sample: YES inherited: description: - Indicates whether the role grant was inherited from another container (YES) or not (NO) returned: on success type: str sample: YES sample: [{ "name": "name_example", "schema_type": "schema_type_example", "owner": "owner_example", "grantor": "grantor_example", "hierarchy": "YES", "object": "object_example", "grant_option": "YES", "common": "YES", "inherited": "YES" }] """ from ansible.module_utils.basic import AnsibleModule from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import ( OCIResourceFactsHelperBase, get_custom_class, ) try: from oci.database_management import DbManagementClient HAS_OCI_PY_SDK = True except ImportError: HAS_OCI_PY_SDK = False ObjectPrivilegeFactsHelperCustom = get_custom_class("ObjectPrivilegeFactsHelperCustom") if __name__ == "__main__": main()
30.92887
133
0.626759
e6889a8d19aba99a640a29f5b573f28a57dbd412
1,727
py
Python
rbc/externals/stdio.py
guilhermeleobas/rbc
4b568b91c6ce3ef7727fee001169302c3803c4fd
[ "BSD-3-Clause" ]
null
null
null
rbc/externals/stdio.py
guilhermeleobas/rbc
4b568b91c6ce3ef7727fee001169302c3803c4fd
[ "BSD-3-Clause" ]
null
null
null
rbc/externals/stdio.py
guilhermeleobas/rbc
4b568b91c6ce3ef7727fee001169302c3803c4fd
[ "BSD-3-Clause" ]
null
null
null
"""https://en.cppreference.com/w/c/io """ from rbc import irutils from llvmlite import ir from rbc.targetinfo import TargetInfo from numba.core import cgutils, extending from numba.core import types as nb_types from rbc.errors import NumbaTypeError # some errors are available for Numba >= 0.55 int32_t = ir.IntType(32)
28.783333
92
0.682687
e689526fba8d369acce37c9eab4574f56f8a1f4b
991
py
Python
setup.py
clach04/discoverhue
8f35cbc8ff9b5aab80b8be0443427058c1da51ed
[ "MIT" ]
10
2017-09-26T22:34:38.000Z
2021-11-19T22:37:59.000Z
setup.py
clach04/discoverhue
8f35cbc8ff9b5aab80b8be0443427058c1da51ed
[ "MIT" ]
7
2018-02-04T19:38:03.000Z
2021-10-30T13:20:33.000Z
setup.py
clach04/discoverhue
8f35cbc8ff9b5aab80b8be0443427058c1da51ed
[ "MIT" ]
4
2019-06-28T15:26:45.000Z
2022-01-20T02:26:05.000Z
from setuptools import setup try: import pypandoc long_description = pypandoc.convert_file('README.md', 'rst', extra_args=()) except ImportError: import codecs long_description = codecs.open('README.md', encoding='utf-8').read() long_description = '\n'.join(long_description.splitlines()) setup( name='discoverhue', description='Auto discovery of Hue bridges', long_description=long_description, version='1.0.2', url='https://github.com/Overboard/discoverhue', author='Overboard', author_email='[email protected]', license='MIT', classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], keywords='philips hue', packages=['discoverhue'], install_requires=['httpfind'], )
26.078947
79
0.649849
e68a7efe5fb704c535ff7a5982b5a18ddc07817d
6,024
py
Python
utils/logmmse.py
dbonattoj/Real-Time-Voice-Cloning
7ce361b0e900cb0fad4289884f526578ba276481
[ "MIT" ]
3
2020-07-10T02:23:00.000Z
2021-08-17T12:35:09.000Z
utils/logmmse.py
amoliu/Real-Time-Voice-Cloning
7808d6f80aa9bbaffe367fde07b1c6f96cd3697e
[ "MIT" ]
1
2020-09-30T09:29:57.000Z
2020-10-31T15:38:50.000Z
utils/logmmse.py
amoliu/Real-Time-Voice-Cloning
7808d6f80aa9bbaffe367fde07b1c6f96cd3697e
[ "MIT" ]
5
2020-04-23T10:52:30.000Z
2021-08-17T12:35:19.000Z
# The MIT License (MIT) # # Copyright (c) 2015 braindead # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # # This code was extracted from the logmmse package (https://pypi.org/project/logmmse/) and I # simply modified the interface to meet my needs. import numpy as np import math from scipy.special import expn from collections import namedtuple NoiseProfile = namedtuple("NoiseProfile", "sampling_rate window_size len1 len2 win n_fft noise_mu2") def profile_noise(noise, sampling_rate, window_size=0): """ Creates a profile of the noise in a given waveform. :param noise: a waveform containing noise ONLY, as a numpy array of floats or ints. :param sampling_rate: the sampling rate of the audio :param window_size: the size of the window the logmmse algorithm operates on. A default value will be picked if left as 0. :return: a NoiseProfile object """ noise, dtype = to_float(noise) noise += np.finfo(np.float64).eps if window_size == 0: window_size = int(math.floor(0.02 * sampling_rate)) if window_size % 2 == 1: window_size = window_size + 1 perc = 50 len1 = int(math.floor(window_size * perc / 100)) len2 = int(window_size - len1) win = np.hanning(window_size) win = win * len2 / np.sum(win) n_fft = 2 * window_size noise_mean = np.zeros(n_fft) n_frames = len(noise) // window_size for j in range(0, window_size * n_frames, window_size): noise_mean += np.absolute(np.fft.fft(win * noise[j:j + window_size], n_fft, axis=0)) noise_mu2 = (noise_mean / n_frames) ** 2 return NoiseProfile(sampling_rate, window_size, len1, len2, win, n_fft, noise_mu2) def denoise(wav, noise_profile: NoiseProfile, eta=0.15): """ Cleans the noise from a speech waveform given a noise profile. The waveform must have the same sampling rate as the one used to create the noise profile. :param wav: a speech waveform as a numpy array of floats or ints. :param noise_profile: a NoiseProfile object that was created from a similar (or a segment of the same) waveform. :param eta: voice threshold for noise update. While the voice activation detection value is below this threshold, the noise profile will be continuously updated throughout the audio. Set to 0 to disable updating the noise profile. :return: the clean wav as a numpy array of floats or ints of the same length. """ wav, dtype = to_float(wav) wav += np.finfo(np.float64).eps p = noise_profile nframes = int(math.floor(len(wav) / p.len2) - math.floor(p.window_size / p.len2)) x_final = np.zeros(nframes * p.len2) aa = 0.98 mu = 0.98 ksi_min = 10 ** (-25 / 10) x_old = np.zeros(p.len1) xk_prev = np.zeros(p.len1) noise_mu2 = p.noise_mu2 for k in range(0, nframes * p.len2, p.len2): insign = p.win * wav[k:k + p.window_size] spec = np.fft.fft(insign, p.n_fft, axis=0) sig = np.absolute(spec) sig2 = sig ** 2 gammak = np.minimum(sig2 / noise_mu2, 40) if xk_prev.all() == 0: ksi = aa + (1 - aa) * np.maximum(gammak - 1, 0) else: ksi = aa * xk_prev / noise_mu2 + (1 - aa) * np.maximum(gammak - 1, 0) ksi = np.maximum(ksi_min, ksi) log_sigma_k = gammak * ksi/(1 + ksi) - np.log(1 + ksi) vad_decision = np.sum(log_sigma_k) / p.window_size if vad_decision < eta: noise_mu2 = mu * noise_mu2 + (1 - mu) * sig2 a = ksi / (1 + ksi) vk = a * gammak ei_vk = 0.5 * expn(1, np.maximum(vk, 1e-8)) hw = a * np.exp(ei_vk) sig = sig * hw xk_prev = sig ** 2 xi_w = np.fft.ifft(hw * spec, p.n_fft, axis=0) xi_w = np.real(xi_w) x_final[k:k + p.len2] = x_old + xi_w[0:p.len1] x_old = xi_w[p.len1:p.window_size] output = from_float(x_final, dtype) output = np.pad(output, (0, len(wav) - len(output)), mode="constant") return output
36.957055
100
0.659529
e68aea4ed97106ccbd90e2eca6ee1a3772751cb0
3,780
py
Python
lib/core/session.py
6un9-h0-Dan/CIRTKit
58b8793ada69320ffdbdd4ecdc04a3bb2fa83c37
[ "MIT" ]
97
2017-12-18T15:19:28.000Z
2022-03-25T07:10:00.000Z
lib/core/session.py
robertdigital/CIRTKit
58b8793ada69320ffdbdd4ecdc04a3bb2fa83c37
[ "MIT" ]
1
2019-01-29T16:29:27.000Z
2019-01-29T16:29:27.000Z
lib/core/session.py
robertdigital/CIRTKit
58b8793ada69320ffdbdd4ecdc04a3bb2fa83c37
[ "MIT" ]
21
2018-04-04T18:12:13.000Z
2021-06-12T09:40:58.000Z
# This file is part of Viper - https://github.com/viper-framework/viper # See the file 'LICENSE' for copying permission. import time import datetime from lib.common.out import * from lib.common.objects import File from lib.core.database import Database from lib.core.investigation import __project__ __sessions__ = Sessions()
36
101
0.603439
e68c436db086a9f75f4ec9a1c59f8bdd8afa7f45
1,028
py
Python
src/simple_report/xls/document.py
glibin/simple-report
1e68b2fe568d6f7a7d9332d0e83b9a21661419e0
[ "Apache-2.0" ]
null
null
null
src/simple_report/xls/document.py
glibin/simple-report
1e68b2fe568d6f7a7d9332d0e83b9a21661419e0
[ "Apache-2.0" ]
null
null
null
src/simple_report/xls/document.py
glibin/simple-report
1e68b2fe568d6f7a7d9332d0e83b9a21661419e0
[ "Apache-2.0" ]
null
null
null
#coding: utf-8 import xlrd from simple_report.core.document_wrap import BaseDocument, SpreadsheetDocument from simple_report.xls.workbook import Workbook from simple_report.xls.output_options import XSL_OUTPUT_SETTINGS
25.7
79
0.614786
e68c5bbc6721a5ef393bdd04f567f863f9c93e3b
3,810
py
Python
tests/ut/datavisual/common/test_error_handler.py
zengchen1024/mindinsight
228a448b46707e889efc1fb23502158e27ab56ca
[ "Apache-2.0" ]
null
null
null
tests/ut/datavisual/common/test_error_handler.py
zengchen1024/mindinsight
228a448b46707e889efc1fb23502158e27ab56ca
[ "Apache-2.0" ]
null
null
null
tests/ut/datavisual/common/test_error_handler.py
zengchen1024/mindinsight
228a448b46707e889efc1fb23502158e27ab56ca
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """ Function: Test error handler. Usage: pytest tests/ut/datavisual """ from unittest.mock import patch from werkzeug.exceptions import MethodNotAllowed, NotFound from ...backend.datavisual.conftest import TRAIN_ROUTES from ..mock import MockLogger from ....utils.tools import get_url from mindinsight.datavisual.processors import scalars_processor from mindinsight.datavisual.processors.scalars_processor import ScalarsProcessor
36.990291
98
0.683727
e68c634de73f166e370b403383fc377943dc8b21
4,796
py
Python
pipeline_sdk/api/build/cancel_build_pb2.py
easyopsapis/easyops-api-python
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
[ "Apache-2.0" ]
5
2019-07-31T04:11:05.000Z
2021-01-07T03:23:20.000Z
pipeline_sdk/api/build/cancel_build_pb2.py
easyopsapis/easyops-api-python
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
[ "Apache-2.0" ]
null
null
null
pipeline_sdk/api/build/cancel_build_pb2.py
easyopsapis/easyops-api-python
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: cancel_build.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='cancel_build.proto', package='build', syntax='proto3', serialized_options=None, serialized_pb=_b('\n\x12\x63\x61ncel_build.proto\x12\x05\x62uild\x1a\x1bgoogle/protobuf/empty.proto\"!\n\rCancelRequest\x12\x10\n\x08\x62uild_id\x18\x01 \x01(\t\"o\n\x15\x43\x61ncelResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12$\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\x16.google.protobuf.Emptyb\x06proto3') , dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,]) _CANCELREQUEST = _descriptor.Descriptor( name='CancelRequest', full_name='build.CancelRequest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='build_id', full_name='build.CancelRequest.build_id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=58, serialized_end=91, ) _CANCELRESPONSEWRAPPER = _descriptor.Descriptor( name='CancelResponseWrapper', full_name='build.CancelResponseWrapper', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code', full_name='build.CancelResponseWrapper.code', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='codeExplain', full_name='build.CancelResponseWrapper.codeExplain', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='build.CancelResponseWrapper.error', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='build.CancelResponseWrapper.data', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=93, serialized_end=204, ) _CANCELRESPONSEWRAPPER.fields_by_name['data'].message_type = google_dot_protobuf_dot_empty__pb2._EMPTY DESCRIPTOR.message_types_by_name['CancelRequest'] = _CANCELREQUEST DESCRIPTOR.message_types_by_name['CancelResponseWrapper'] = _CANCELRESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR) CancelRequest = _reflection.GeneratedProtocolMessageType('CancelRequest', (_message.Message,), { 'DESCRIPTOR' : _CANCELREQUEST, '__module__' : 'cancel_build_pb2' # @@protoc_insertion_point(class_scope:build.CancelRequest) }) _sym_db.RegisterMessage(CancelRequest) CancelResponseWrapper = _reflection.GeneratedProtocolMessageType('CancelResponseWrapper', (_message.Message,), { 'DESCRIPTOR' : _CANCELRESPONSEWRAPPER, '__module__' : 'cancel_build_pb2' # @@protoc_insertion_point(class_scope:build.CancelResponseWrapper) }) _sym_db.RegisterMessage(CancelResponseWrapper) # @@protoc_insertion_point(module_scope)
35.791045
399
0.755004
e68dece75266882db686c493e81051a931627936
5,118
py
Python
src/.ipynb_checkpoints/headpose_model-checkpoint.py
geochri/Intel_Edge_AI-Computer_Pointer_controller
068947fa0cbe0c5d1b74e2c0eb69a85bbc439131
[ "MIT" ]
null
null
null
src/.ipynb_checkpoints/headpose_model-checkpoint.py
geochri/Intel_Edge_AI-Computer_Pointer_controller
068947fa0cbe0c5d1b74e2c0eb69a85bbc439131
[ "MIT" ]
3
2021-03-19T14:38:26.000Z
2022-03-12T00:43:27.000Z
src/.ipynb_checkpoints/headpose_model-checkpoint.py
geochri/Intel_Edge_AI-Computer_Pointer_controller
068947fa0cbe0c5d1b74e2c0eb69a85bbc439131
[ "MIT" ]
null
null
null
''' This is a sample class for a model. You may choose to use it as-is or make any changes to it. This has been provided just to give you an idea of how to structure your model class. ''' from openvino.inference_engine import IENetwork, IECore import numpy as np import os import cv2 import sys
41.609756
107
0.59789
e691c0247838523436befe1e1ccaf96b1e1135db
374
py
Python
src/minisaml/internal/constants.py
HENNGE/minisaml
d96aa5d294eee60521ad3c7084e8659b25935cee
[ "Apache-2.0" ]
2
2020-09-13T15:55:50.000Z
2021-01-07T07:40:24.000Z
src/minisaml/internal/constants.py
HENNGE/minisaml
d96aa5d294eee60521ad3c7084e8659b25935cee
[ "Apache-2.0" ]
11
2020-08-26T12:27:39.000Z
2021-11-17T16:10:00.000Z
src/minisaml/internal/constants.py
HENNGE/minisaml
d96aa5d294eee60521ad3c7084e8659b25935cee
[ "Apache-2.0" ]
1
2021-10-07T11:49:28.000Z
2021-10-07T11:49:28.000Z
NAMES_SAML2_PROTOCOL = "urn:oasis:names:tc:SAML:2.0:protocol" NAMES_SAML2_ASSERTION = "urn:oasis:names:tc:SAML:2.0:assertion" NAMEID_FORMAT_UNSPECIFIED = "urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified" BINDINGS_HTTP_POST = "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" DATE_TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" DATE_TIME_FORMAT_FRACTIONAL = "%Y-%m-%dT%H:%M:%S.%fZ"
53.428571
83
0.759358
e692205969e07efd17736b63f7c1d2bf34e22ac0
833
py
Python
Contests/Snackdown19_Qualifier/CHEFPRMS.py
PK-100/Competitive_Programming
d0863feaaa99462b2999e85dcf115f7a6c08bb8d
[ "MIT" ]
70
2018-06-25T21:20:15.000Z
2022-03-24T03:55:17.000Z
Contests/Snackdown19_Qualifier/CHEFPRMS.py
An3sha/Competitive_Programming
ee7eadf51939a360d0b004d787ebabda583e92f0
[ "MIT" ]
4
2018-09-04T13:12:20.000Z
2021-06-20T08:29:12.000Z
Contests/Snackdown19_Qualifier/CHEFPRMS.py
An3sha/Competitive_Programming
ee7eadf51939a360d0b004d787ebabda583e92f0
[ "MIT" ]
24
2018-12-26T05:15:32.000Z
2022-01-23T23:04:54.000Z
import math for _ in range(int(input())): n=int(input()) flag=0 for i in range(2,n//2+1): if check(i)==True and check(n-i)==True: #print(i,n-i,square(i),square(n-i),"Yes") print("YES") flag=1 break if flag==0: #print(i,n-i,square(i),square(n-i),"No") print("NO")
21.921053
53
0.457383
e692cff5589dc59f4785c76fbfa11c53ff5a1d4e
305
py
Python
setup.py
arokem/afq-deep-learning
61d7746f03914d63c56253d10d0f6a21e6c78e90
[ "BSD-3-Clause" ]
null
null
null
setup.py
arokem/afq-deep-learning
61d7746f03914d63c56253d10d0f6a21e6c78e90
[ "BSD-3-Clause" ]
null
null
null
setup.py
arokem/afq-deep-learning
61d7746f03914d63c56253d10d0f6a21e6c78e90
[ "BSD-3-Clause" ]
2
2021-12-01T17:04:39.000Z
2022-01-20T22:53:40.000Z
from setuptools import find_packages, setup setup( name='src', packages=find_packages(), version='0.1.0', description='This repository hosts some work-in-progress experiments applying deep learning to predict age using tractometry data.', author='Joanna Qiao', license='BSD-3', )
27.727273
136
0.718033
e692fc94ab5c1ffa86ca1f2d1e72224d55aaebca
8,474
py
Python
make_base_container.py
thiagodasilva/runway
a5455e885302df534fcfff0470881fbd2ad8eed5
[ "Apache-2.0" ]
null
null
null
make_base_container.py
thiagodasilva/runway
a5455e885302df534fcfff0470881fbd2ad8eed5
[ "Apache-2.0" ]
null
null
null
make_base_container.py
thiagodasilva/runway
a5455e885302df534fcfff0470881fbd2ad8eed5
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 import argparse import os import random import requests import sys import tempfile import uuid from libs import colorprint from libs.cli import run_command SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__)) # assume well-known lvm volume group on host # ...later we'll figure out how to make this dynamic VG_NAME = "swift-runway-vg01" SWIFTSTACK_IMAGES_PREFIX = "ss-" SWIFTSTACK_IMAGES_BASE_URL = \ "https://tellus.swiftstack.com/v1/AUTH_runway/lxd-images" IMAGE_MANIFEST_OBJECT_NAME = "manifest.json" UNIFIED_TARBALL_TYPE = "unified" SPLIT_TARBALL_TYPE = "split" TARBALL_TYPES = [UNIFIED_TARBALL_TYPE, SPLIT_TARBALL_TYPE] def import_image(manifest, alias): ''' There are 2 possible image formats: unified and split. We support both. For unified format, the manifest will look like this: { "tarball_type": "unified", "fingerprint": "629d2c18b7bb0b52b80dfe62ae309937123d05b563ef057233e7802c9e18c018", "tarball-object": "centos7.5/629d2c18b7bb0b52b80dfe62ae309937123d05b563ef057233e7802c9e18c018.tar.gz" } For split format, the manifest will look like this: { "tarball_type": "split", "fingerprint": "22abbefe0c68943f264a7139c7a699a0b2adfbcf46fc661d2e89b1232301a5de", "metadata-object": "centos7.5/meta-22abbefe0c68943f264a7139c7a699a0b2adfbcf46fc661d2e89b1232301a5de.tar.xz", "rootfs-object": "centos7.5/22abbefe0c68943f264a7139c7a699a0b2adfbcf46fc661d2e89b1232301a5de.squashfs" } ''' if manifest["tarball_type"] not in TARBALL_TYPES: raise Exception("Invalid tarball type: {}".format( manifest["tarball_type"])) elif manifest["tarball_type"] == UNIFIED_TARBALL_TYPE: import_unified_image(manifest, alias) elif manifest["tarball_type"] == SPLIT_TARBALL_TYPE: import_split_image(manifest, alias) else: raise Exception("Tarball type '{}' is valid, but a method to import " "it has not been implemented yet.") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('distro', type=str, help='Container distro') parser.add_argument('cname', metavar='containername', help='Container ' 'name') parser.add_argument('volsize', help='Volume size') parser.add_argument('volcount', type=int, help='Volume count') parser.add_argument('baseimage', nargs='?', help='Base image. Defaults: \'images:centos/7/amd64\' ' 'for RHEL distro, \'ubuntu:16.04\' otherwise') args = parser.parse_args() distro = args.distro container_name = args.cname base_image = args.baseimage volume_size = args.volsize volume_count = args.volcount if is_swiftstack_hosted_image(distro): import_image_if_needed(distro) default_image = distro else: default_image = get_default_image(distro) if base_image is None: base_image = default_image try: # make a container profile that maps 8 block devices to the guest rand_file_name = str(uuid.UUID(int=random.getrandbits(128))) run_command("./make_lxc_profile.py {} {} {} {} > " "/tmp/{}".format(container_name, VG_NAME, volume_size, volume_count, rand_file_name), cwd=SCRIPT_DIR, shell=True) run_command("lxc profile create {}-profile".format(container_name)) run_command("cat /tmp/{} | lxc profile edit {}-profile".format( rand_file_name, container_name), cwd=SCRIPT_DIR, shell=True) # launch the new container print("Trying to launch container from base image " "{}".format(base_image)) run_command("lxc launch {} {} -p {}-profile || " "lxc launch {} {} -p {}-profile".format(base_image, container_name, container_name, default_image, container_name, container_name), shell=True) except Exception as e: exit_with_error(str(e))
36.683983
116
0.628039
e693812c79b01a653cf7ed97ebf4b0c9deae4584
1,687
py
Python
exercicios_antigos/ex_01.py
jfklima/prog_pratica
72c795e3372e46f04ce0c92c05187aec651777cf
[ "MIT" ]
null
null
null
exercicios_antigos/ex_01.py
jfklima/prog_pratica
72c795e3372e46f04ce0c92c05187aec651777cf
[ "MIT" ]
null
null
null
exercicios_antigos/ex_01.py
jfklima/prog_pratica
72c795e3372e46f04ce0c92c05187aec651777cf
[ "MIT" ]
null
null
null
"""Criar uma funo que retorne min e max de uma sequncia numrica aleatria. S pode usar if, comparaes, recurso e funes que sejam de sua autoria. Se quiser usar laos tambm pode. Deve informar via docstring qual a complexidade de tempo e espao da sua soluo """ from math import inf def minimo_e_maximo(sequencia_numerica): ''' Retorna o minimo e o maximo de uma sequncia numrica aleatria. Complexidade: execuo: O(n) espao: O(3) ''' maximo = -inf # 1 minimo = +inf # 1 for elem in sequencia_numerica: # 1 if elem > maximo: # 2 maximo = elem # 1 if elem < minimo: # 2 minimo = elem # 2 return minimo, maximo # 1 # print(minimo_e_maximo([1, 2, 3, 4])) # print(minimo_e_maximo([1, 3, 10, 12, 44, 2, 24, 25])) # print(minimo_e_maximo([88, 66, 10, 2, 8])) print(recursivo_minmax([1, 2, 3, 4]))
23.760563
72
0.627742
e693c2c06b451b4433b40c8836d35627ae32d7b5
806
py
Python
docs/demos/theme_explorer/util.py
harisbal/dash-bootstrap-components
d7c91c08e0821ccfd81330db912cde71ec57c171
[ "Apache-2.0" ]
1
2021-05-08T08:21:41.000Z
2021-05-08T08:21:41.000Z
docs/demos/theme_explorer/util.py
harisbal/dash-bootstrap-components
d7c91c08e0821ccfd81330db912cde71ec57c171
[ "Apache-2.0" ]
null
null
null
docs/demos/theme_explorer/util.py
harisbal/dash-bootstrap-components
d7c91c08e0821ccfd81330db912cde71ec57c171
[ "Apache-2.0" ]
null
null
null
import dash_bootstrap_components as dbc import dash_html_components as html DBC_DOCS = ( "https://dash-bootstrap-components.opensource.faculty.ai/docs/components/" )
23.028571
78
0.473945
e693c649026985a8de2994906ab2b8b27870d123
2,858
py
Python
pytorch_toolbox/visualization/visdom_logger.py
MathGaron/pytorch_toolbox
2afd13e50ba71dfce66467a4b070d9b922668502
[ "MIT" ]
10
2018-02-26T04:51:11.000Z
2021-10-01T02:30:37.000Z
pytorch_toolbox/visualization/visdom_logger.py
MathGaron/pytorch_toolbox
2afd13e50ba71dfce66467a4b070d9b922668502
[ "MIT" ]
9
2017-11-16T16:11:16.000Z
2020-02-13T13:10:55.000Z
pytorch_toolbox/visualization/visdom_logger.py
MathGaron/pytorch_toolbox
2afd13e50ba71dfce66467a4b070d9b922668502
[ "MIT" ]
7
2018-02-12T19:06:14.000Z
2021-03-25T19:13:51.000Z
''' The visualization class provides an easy access to some of the visdom functionalities Accept as input a number that will be ploted over time or an image of type np.ndarray ''' from visdom import Visdom import numpy as np import numbers
35.283951
120
0.569979
e6957e411e3b025a67a76d0f0a74f5d86329bb6f
2,683
py
Python
analytical/conditionnumber.py
gyyang/olfaction_evolution
434baa85b91f450e1ab63c6b9eafb8d370f1df96
[ "MIT" ]
9
2021-10-11T01:16:23.000Z
2022-01-13T14:07:08.000Z
analytical/conditionnumber.py
gyyang/olfaction_evolution
434baa85b91f450e1ab63c6b9eafb8d370f1df96
[ "MIT" ]
1
2021-10-30T09:49:08.000Z
2021-10-30T09:49:08.000Z
analytical/conditionnumber.py
gyyang/olfaction_evolution
434baa85b91f450e1ab63c6b9eafb8d370f1df96
[ "MIT" ]
null
null
null
"""Analyze condition number of the network.""" import numpy as np import matplotlib.pyplot as plt # import model def _get_sparse_mask(nx, ny, non, complex=False, nOR=50): """Generate a binary mask. The mask will be of size (nx, ny) For all the nx connections to each 1 of the ny units, only non connections are 1. Args: nx: int ny: int non: int, must not be larger than nx Return: mask: numpy array (nx, ny) """ mask = np.zeros((nx, ny)) if not complex: mask[:non] = 1 for i in range(ny): np.random.shuffle(mask[:, i]) # shuffling in-place return mask.astype(np.float32) n_kc_claws = np.arange(1, 50) conds = np.array([get_logcond(n_kc_claw=n) for n in n_kc_claws]) plt.figure() plt.plot(n_kc_claws, conds, 'o-') plt.xticks(n_kc_claws) plt.xlabel('N_KC_claw') plt.show()
27.10101
85
0.621319
e6960adb05d4b964e50fe6cceef1e01091d1811d
2,327
py
Python
FusionIIIT/applications/placement_cell/api/serializers.py
29rj/Fusion
bc2941a67532e183adeb0bc4042df0b182b9e3aa
[ "bzip2-1.0.6" ]
29
2019-02-20T15:35:33.000Z
2022-03-22T11:10:57.000Z
FusionIIIT/applications/placement_cell/api/serializers.py
29rj/Fusion
bc2941a67532e183adeb0bc4042df0b182b9e3aa
[ "bzip2-1.0.6" ]
409
2019-01-17T19:30:51.000Z
2022-03-31T16:28:45.000Z
FusionIIIT/applications/placement_cell/api/serializers.py
29rj/Fusion
bc2941a67532e183adeb0bc4042df0b182b9e3aa
[ "bzip2-1.0.6" ]
456
2019-01-12T11:01:13.000Z
2022-03-30T17:06:52.000Z
from rest_framework.authtoken.models import Token from rest_framework import serializers from applications.placement_cell.models import (Achievement, Course, Education, Experience, Has, Patent, Project, Publication, Skill, PlacementStatus, NotifyStudent)
27.376471
89
0.644607
e6973c5ea944d5bc8b7dc232052cd5073acf79bf
253
py
Python
concat_col_app/factories.py
thinkAmi-sandbox/django-datatables-view-sample
ac3df721089489e61c09ac75d320be3704c72105
[ "Unlicense" ]
null
null
null
concat_col_app/factories.py
thinkAmi-sandbox/django-datatables-view-sample
ac3df721089489e61c09ac75d320be3704c72105
[ "Unlicense" ]
null
null
null
concat_col_app/factories.py
thinkAmi-sandbox/django-datatables-view-sample
ac3df721089489e61c09ac75d320be3704c72105
[ "Unlicense" ]
null
null
null
import factory from concat_col_app.models import Color, Apple
18.071429
54
0.73913
e698cce58860b9d7c8249a1734c7596543b84bc7
1,843
py
Python
defects4cpp/errors/argparser.py
HansolChoe/defects4cpp
cb9e3db239c50e6ec38127cec117865f0ee7a5cf
[ "MIT" ]
10
2021-06-23T01:53:19.000Z
2022-03-31T03:14:01.000Z
defects4cpp/errors/argparser.py
HansolChoe/defects4cpp
cb9e3db239c50e6ec38127cec117865f0ee7a5cf
[ "MIT" ]
34
2021-05-27T01:09:04.000Z
2022-03-28T07:53:35.000Z
defects4cpp/errors/argparser.py
HansolChoe/defects4cpp
cb9e3db239c50e6ec38127cec117865f0ee7a5cf
[ "MIT" ]
6
2021-09-03T07:16:56.000Z
2022-03-29T07:30:35.000Z
from pathlib import Path from typing import Dict from errors.common.exception import DppError
29.253968
90
0.683668
e6990f7310e89eaf51795fa05ea2ca52396ff9f9
161
py
Python
utils/__init__.py
wang97zh/EVS-Net-1
3a8457c2d5281b8805ec523f9ced738ccf49d5f5
[ "MIT" ]
null
null
null
utils/__init__.py
wang97zh/EVS-Net-1
3a8457c2d5281b8805ec523f9ced738ccf49d5f5
[ "MIT" ]
null
null
null
utils/__init__.py
wang97zh/EVS-Net-1
3a8457c2d5281b8805ec523f9ced738ccf49d5f5
[ "MIT" ]
null
null
null
from .utility import * from .tricks import * from .tensorlog import * from .self_op import * from .resume import * from .optims import * from .metric import *
16.1
24
0.726708
e69960fc13118fa865fc6b90dfac61ac3e974383
1,290
py
Python
model-optimizer/extensions/front/mxnet/arange_ext.py
calvinfeng/openvino
11f591c16852637506b1b40d083b450e56d0c8ac
[ "Apache-2.0" ]
null
null
null
model-optimizer/extensions/front/mxnet/arange_ext.py
calvinfeng/openvino
11f591c16852637506b1b40d083b450e56d0c8ac
[ "Apache-2.0" ]
19
2021-03-26T08:11:00.000Z
2022-02-21T13:06:26.000Z
model-optimizer/extensions/front/mxnet/arange_ext.py
calvinfeng/openvino
11f591c16852637506b1b40d083b450e56d0c8ac
[ "Apache-2.0" ]
1
2021-07-28T17:30:46.000Z
2021-07-28T17:30:46.000Z
""" Copyright (C) 2018-2021 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import numpy as np from extensions.ops.range import Range from mo.front.extractor import FrontExtractorOp from mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs from mo.graph.graph import Node
32.25
73
0.694574
e69993a645167fee1fbafcf116e0729c914350fa
15,381
py
Python
fold_cur_trans.py
lucasforever24/arcface_noonan
9d805a0d4d478e347a9084ad6ce24fe4c8dc5e65
[ "MIT" ]
null
null
null
fold_cur_trans.py
lucasforever24/arcface_noonan
9d805a0d4d478e347a9084ad6ce24fe4c8dc5e65
[ "MIT" ]
null
null
null
fold_cur_trans.py
lucasforever24/arcface_noonan
9d805a0d4d478e347a9084ad6ce24fe4c8dc5e65
[ "MIT" ]
null
null
null
import cv2 from PIL import Image import argparse from pathlib import Path from multiprocessing import Process, Pipe,Value,Array import torch from config import get_config from mtcnn import MTCNN from Learner_trans_tf import face_learner from utils import load_facebank, draw_box_name, prepare_facebank, save_label_score, label_binarize from sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score from sklearn.model_selection import KFold import os import glob import shutil import numpy as np import matplotlib.pyplot as plt import matplotlib.colors as mcolors import datetime if __name__ == '__main__': parser = argparse.ArgumentParser(description='for face verification') parser.add_argument("-ds", "--dataset_dir", help="where to get data", default="noonan", type=str) parser.add_argument('-sd','--stored_result_dir',help='where to store data as np arrays', default="results/trans/", type=str) parser.add_argument("-k", "--kfold", help="returns the number of splitting iterations in the cross-validator.", default=10, type=int) parser.add_argument("-e", "--epochs", help="training epochs", default=20, type=int) parser.add_argument("-n", "--names_considered", help="names for different types considered, separated by commas", default="normal,noonan,others", type=str) parser.add_argument("-g", "--gpu_id", help="gpu id to use", default="", type=str) parser.add_argument("-s", "--use_shuffled_kfold", help="whether to use shuffled kfold.", action="store_true") parser.add_argument("-rs", "--random_seed", help="random seed used for k-fold split.", default=6, type=int) parser.add_argument("-tta", "--tta", help="whether test time augmentation",action="store_true") parser.add_argument("-a", "--additional_data_dir", help="where to get the additional data", default="", type=str) parser.add_argument("-ta", "--additional_test_or_train", help="use additional data in only train, or test, or both", default="", type=str) parser.add_argument("-as", "--stylegan_data_dir", help="where to get the additional data", default="", type=str) parser.add_argument("-ts", "--stylegan_test_or_train", help="use stylegan data in only train, or test, or both", default="", type=str) parser.add_argument("-tf", "--transfer", help="how many layer(s) used for transfer learning, " "but 0 means retraining the whole network.", default=0, type=int) parser.add_argument("-ac", "--arch", help="types of model used for encoder", default="mobile", type=str) args = parser.parse_args() for arg in vars(args): print(arg+':', getattr(args, arg)) emore_dir = 'faces_emore' conf = get_config(True, args) conf.emore_folder = conf.data_path/emore_dir mtcnn = MTCNN() print('mtcnn loaded') names_considered = args.names_considered.strip().split(',') exp_name = args.dataset_dir[:4] if args.additional_data_dir: if 'LAG' in args.additional_data_dir: exp_name += '_lag' elif 'literature' in args.additional_data_dir: exp_name += '_ltr' if args.kfold != 10: exp_name += ('_k' + str(args.kfold)) if args.epochs != 20: exp_name += ('_e' + str(args.epochs)) if args.transfer != 0 and args.transfer != 1: exp_name += ('_td' + str(args.transfer)) if args.use_shuffled_kfold: exp_name += ('_s' + str(args.random_seed)) print(exp_name) # prepare folders raw_dir = 'raw_112' verify_type = 'trans' if args.use_shuffled_kfold: verify_type += '_shuffled' # train_dir = conf.facebank_path/args.dataset_dir/verify_type/'train' train_dir = conf.emore_folder/'imgs' test_dir = conf.emore_folder/'test' conf.facebank_path = train_dir if os.path.exists(train_dir): shutil.rmtree(train_dir) if os.path.exists(test_dir): shutil.rmtree(test_dir) os.mkdir(train_dir) os.mkdir(test_dir) for name in names_considered: os.makedirs(str(train_dir) + '/' + name, exist_ok=True) os.makedirs(str(test_dir) + '/' + name, exist_ok=True) if args.stylegan_data_dir: #e.g. smile_refine_mtcnn_112_divi full_stylegan_dir = str(conf.data_path/'facebank'/'stylegan'/args.stylegan_data_dir) stylegan_folders = os.listdir(full_stylegan_dir) if args.additional_data_dir: full_additional_dir = str(conf.data_path/'facebank'/args.additional_data_dir) # init kfold if args.use_shuffled_kfold: kf = KFold(n_splits=args.kfold, shuffle=True, random_state=args.random_seed) else: kf = KFold(n_splits=args.kfold, shuffle=False, random_state=None) # collect and split raw data data_dict = {} idx_gen = {} for name in names_considered: tmp_list = glob.glob(str(conf.data_path/'facebank'/args.dataset_dir/raw_dir) + '/' + name + '*') if 'innm' in args.stylegan_data_dir: tmp_list = tmp_list + glob.glob(str(full_stylegan_dir) + '/' + name + '*') stylegan_folders = [] print(str(conf.data_path/'facebank'/args.dataset_dir/raw_dir)) data_dict[name] = np.array(tmp_list) idx_gen[name] = kf.split(data_dict[name]) if 'literature' in args.additional_data_dir: data_dict['ltr'] = np.array(glob.glob(str(full_additional_dir) + '/*')) idx_gen['ltr'] = kf.split(data_dict['ltr']) score_names = [] scores = [] wrong_names = [] args.stored_result_path = args.stored_result_dir + os.sep + str(datetime.datetime.now())[:19] if not os.path.exists(args.stored_result_path): os.mkdir(args.stored_result_path) # for fold_idx, (train_index, test_index) in enumerate(kf.split(data_dict[names_considered[0]])): for fold_idx in range(args.kfold): train_set = {} test_set = {} for name in names_considered: (train_index, test_index) = next(idx_gen[name]) train_set[name], test_set[name] = data_dict[name][train_index], data_dict[name][test_index] if 'ltr' in data_dict.keys(): (train_index, test_index) = next(idx_gen['ltr']) train_set['ltr'], test_set['ltr'] = data_dict['ltr'][train_index], data_dict['ltr'][test_index] if 'train' in args.additional_test_or_train: train_set['noonan'] = np.concatenate((train_set['noonan'], train_set['ltr'])) if 'test' in args.additional_test_or_train: test_set['noonan'] = np.concatenate((test_set['noonan'], test_set['ltr'])) # remove previous data prev = glob.glob(str(train_dir) + '/*/*') for p in prev: os.remove(p) prev = glob.glob(str(test_dir) + '/*/*') for p in prev: os.remove(p) # save trains to conf.facebank_path/args.dataset_dir/'train' and # tests to conf.data_path/'facebank'/args.dataset_dir/'test' # count unbalanced data train_count = {} test_count = {} for name in names_considered: train_count[name] = 0 for i in range(len(train_set[name])): img_folder = str(train_set[name][i]) for img in os.listdir(img_folder): shutil.copy(img_folder + os.sep + str(img), os.path.join(str(train_dir), name, str(img))) train_count[name] += 1 # addition data from stylegan if 'interp' not in data_dict.keys(): folder = os.path.basename(train_set[name][i]) if args.stylegan_data_dir and ('train' in args.stylegan_test_or_train) and (folder in stylegan_folders): for img in os.listdir(full_stylegan_dir + os.sep + folder): shutil.copy(os.path.join(full_stylegan_dir, folder, str(img)), os.path.join(str(train_dir), name, str(img))) # ('/'.join(train_set[name][i].strip().split('/')[:-2]) + # '/' + verify_type + '/train/' + name + os.sep + img)) train_count[name] += 1 # test for i in range(len(test_set[name])): test_count[name] = 0 img_folder = str(test_set[name][i]) for img in os.listdir(img_folder): shutil.copy(img_folder + os.sep + str(img), os.path.join(str(test_dir), name, str(img))) test_count[name] += 1 # addition data from stylegan if 'interp' not in data_dict.keys(): folder = os.path.basename(test_set[name][i]) if args.stylegan_data_dir and ('test' in args.stylegan_test_or_train) and (folder in stylegan_folders): # and # (folder not in ['noonan7','noonan19','noonan23','normal9','normal20','normal23'])): for img in os.listdir(full_stylegan_dir + os.sep + folder): shutil.copy(os.path.join(full_stylegan_dir, folder, str(img)), os.path.join(str(test_dir), name, str(img))) test_count[name] += 1 print(train_count, test_count) # deal with unbalanced data """ if train_count['normal'] // train_count['noonan'] > 1: aug_num = train_count['normal'] // train_count['noonan'] - 1 for img in os.listdir(os.path.join(str(train_dir), 'noonan')): for aug_idx in range(aug_num): aug_img = img[:img.rfind('.')] + '_' + str(aug_idx) + img[img.rfind('.'):] shutil.copy(os.path.join(str(train_dir), 'noonan', img), os.path.join(str(train_dir), 'noonan', aug_img)) """ if 'fake' in args.additional_data_dir: fake_dict = {'noonan':'normal', 'normal':'noonan'} full_additional_dir = conf.data_path/'facebank'/'noonan+normal'/args.additional_data_dir add_data = glob.glob(str(full_additional_dir) + os.sep + '*.png') print('additional:', args.additional_data_dir, len(add_data)) for name in names_considered: for img_f in add_data: if name in img_f.strip().split(os.sep)[-1]: # print('source:', img_f) # print('copy to:', img_f.replace(str(full_additional_dir), # str(train_dir) + os.sep + fake_dict[name])) # print('copy to:', img_f.replace(args.additional_data_dir, # verify_type + '/train/' + name)) shutil.copy(img_f, os.path.join(str(train_dir), fake_dict[name], os.path.basename(img_f))) print(fold_idx) print('datasets ready') conf_train = get_config(True, args) conf_train.emore_folder = conf.data_path/emore_dir conf_train.stored_result_dir = args.stored_result_path learner = face_learner(conf=conf_train, transfer=args.transfer, ext=exp_name+'_'+str(fold_idx)) # conf, inference=False, transfer=0 if args.transfer != 0: learner.load_state(conf.save_path, False, True) print('learner loaded') learner.train(conf_train, args.epochs) print('learner retrained.') learner.save_state() print('Model is saved') # prepare_facebank targets, names, names_idx = prepare_facebank(conf, learner.model, mtcnn, tta = args.tta) print('names_classes:', names) noonan_idx = names_idx['noonan'] print('facebank updated') for path in test_dir.iterdir(): if path.is_file(): continue # print(path) for fil in path.iterdir(): # print(fil) orig_name = ''.join([i for i in fil.name.strip().split('.')[0].split('_')[0] if not i.isdigit()]) for name in names_idx.keys(): if name in orig_name: score_names.append(names_idx[name]) """ if orig_name not in names_considered: print("Un-considered name:", fil.name) continue """ frame = cv2.imread(str(fil)) image = Image.fromarray(frame) faces = [image,] distance = learner.binfer(conf, faces, targets, args.tta) label = score_names[-1] score = np.exp(distance.dot(-1)) pred = np.argmax(score, 1) if pred != label: wrong_names.append(orig_name) scores.append(score) score_names = np.array(score_names) wrong_names = np.array(wrong_names) score_np = np.squeeze(np.array(scores)) n_classes = score_np.shape[1] score_names = label_binarize(score_names, classes=range(n_classes)) score_sum = np.zeros([score_np.shape[0], 1]) for i in range(n_classes): score_sum += score_np[:, i, None] # keep the dimension relative_scores = (score_np / score_sum) total_scores = relative_scores.ravel() total_names = score_names.ravel() name_path = os.path.join(args.stored_result_path, 'wrong_names.npy') save_label_score(name_path, wrong_names) label_path = os.path.join(args.stored_result_path, 'labels_trans.npy') save_label_score(label_path, score_names) score_path = os.path.join(args.stored_result_path, 'scores_trans.npy') save_label_score(score_path, relative_scores) print('saved!') # Compute ROC curve and ROC area for noonan fpr, tpr, _ = roc_curve(total_names, total_scores) #scores_np[:, noonan_idx] roc_auc = auc(fpr, tpr) # For PR curve precision, recall, _ = precision_recall_curve(total_names, total_scores) average_precision = average_precision_score(total_names, total_scores) # plots plt.figure() colors = list(mcolors.TABLEAU_COLORS) lw = 2 plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.4f)' % roc_auc) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('ROC_{}'.format(exp_name)) plt.legend(loc="lower right") plt.savefig(args.stored_result_path + os.sep + '/fp_tp_{}.png'.format(exp_name)) plt.close() # plt.show() plt.figure() plt.step(recall, precision, where='post') plt.xlabel('Recall') plt.ylabel('Precision') plt.ylim([0.0, 1.05]) plt.xlim([0.0, 1.0]) plt.title('Average precision score ({}): AP={:0.4f}'.format(exp_name, average_precision)) plt.savefig(args.stored_result_path + os.sep + '/pr_{}.png'.format(exp_name)) plt.close()
44.453757
124
0.594565
e699c205aa18e90414c7e2eebb09f229e7cbf13e
2,603
py
Python
examples/tryclass.py
manahter/dirio
c33fcd6c114ffb275d7147156c7041389fab6cfc
[ "MIT" ]
null
null
null
examples/tryclass.py
manahter/dirio
c33fcd6c114ffb275d7147156c7041389fab6cfc
[ "MIT" ]
null
null
null
examples/tryclass.py
manahter/dirio
c33fcd6c114ffb275d7147156c7041389fab6cfc
[ "MIT" ]
null
null
null
import time def event_call(other_arg, kwarg="-", result=None): """Call this metod, on returned result""" print(f"Bind Result, {result}\n"*10) print("other_arg", other_arg) print("kwarg", kwarg) if __name__ == "__main__": try: from dirio import Dirio except: from ..dirio import Dirio dr_cls = Dirio(target=TryClass, args=(888,), kwargs={}, worker=False) print("Starting values :", dr_cls.value, dr_cls) print("\n"*2) print("Wait 1 sec for your reply. metod 1 :", dr_cls.metod1(5, val2="1", dr_wait=1)) print("Wait until the reply comes. metod 1 :", dr_cls.metod1(5, val2="1", dr_wait=-1)) code0 = dr_cls.metod1(5, val2="1", dr_code=True) print("Metod 1, call, via bind to func", dr_cls.dr_bind(code0, event_call, args=("OtHeR aRg", ), kwargs={"kwarg": "KwArG"})) while True: # dr_cls.dr_binds_check() print("Run the method and give us the response reading code : dr_code=True") code1 = dr_cls.metod1(5, val2="1", dr_code=True) print("Is there data in the reading code? : dr_code=43534") while not dr_cls.metod1(dr_code=code1): print("We are waiting for the data with this code :", code1) time.sleep(.5) print("Returned metod 1 data :", dr_cls.metod1(dr_code=code1)) print("Methods called this way give the last return value : nothing or dr_code=False") code2 = dr_cls.metod2(10, val2="2", dr_code=True) print("Search by code only :", dr_cls.dr_code(code2, wait=1)) print("Trying metod 2, called and returned :", dr_cls.metod2(10, val2="2", dr_code=False)) print("Trying metod 3, called and returned :", dr_cls.metod3(15, val2="3")) print("\n"*2) time.sleep(3) dr_cls.dr_terminate()
30.988095
128
0.594314
e69afae741859fe05b5f191d930aaa0cc0138694
3,204
py
Python
qiskit/providers/basebackend.py
ismaila-at-za-ibm/qiskit-terra
08303ec98ac7b33fde55266dc3a74466fbdcae95
[ "Apache-2.0" ]
2
2021-09-06T19:25:36.000Z
2021-11-17T10:46:12.000Z
qiskit/providers/basebackend.py
ismaila-at-za-ibm/qiskit-terra
08303ec98ac7b33fde55266dc3a74466fbdcae95
[ "Apache-2.0" ]
null
null
null
qiskit/providers/basebackend.py
ismaila-at-za-ibm/qiskit-terra
08303ec98ac7b33fde55266dc3a74466fbdcae95
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright 2017, IBM. # # This source code is licensed under the Apache License, Version 2.0 found in # the LICENSE.txt file in the root directory of this source tree. """This module implements the abstract base class for backend modules. To create add-on backend modules subclass the Backend class in this module. Doing so requires that the required backend interface is implemented. """ from abc import ABC, abstractmethod from qiskit.version import __version__ from .models import BackendStatus def properties(self): """Return backend properties. Returns: BackendProperties: the configuration for the backend. If the backend does not support properties, it returns ``None``. """ return None def provider(self): """Return the backend Provider. Returns: BaseProvider: the Provider responsible for the backend. """ return self._provider def status(self): """Return backend status. Returns: BackendStatus: the status of the backend. """ return BackendStatus(backend_name=self.name(), backend_version=__version__, operational=True, pending_jobs=0, status_msg='') def name(self): """Return backend name. Returns: str: the name of the backend. """ return self._configuration.backend_name def __repr__(self): """Official string representation of a Backend. Note that, by Qiskit convention, it is consciously *not* a fully valid Python expression. Subclasses should provide 'a string of the form <...some useful description...>'. [0] [0] https://docs.python.org/3/reference/datamodel.html#object.__repr__ """ return "<{}('{}') from {}()>".format(self.__class__.__name__, self.name(), self._provider)
30.226415
80
0.604869
e69c64799a3175f6ca7da109f5305d614b082638
487
py
Python
arrays/jump2/Solution.py
shahbagdadi/py-algo-n-ds
ff689534b771ddb4869b001b20a0e21b4896bb0a
[ "MIT" ]
null
null
null
arrays/jump2/Solution.py
shahbagdadi/py-algo-n-ds
ff689534b771ddb4869b001b20a0e21b4896bb0a
[ "MIT" ]
null
null
null
arrays/jump2/Solution.py
shahbagdadi/py-algo-n-ds
ff689534b771ddb4869b001b20a0e21b4896bb0a
[ "MIT" ]
null
null
null
from typing import List import sys s = Solution() ans = s.jump([3,2,1,0,4]) print(ans)
27.055556
100
0.523614
e69c81543af0469c06adb5c970083f2d456e2ede
1,881
py
Python
share/tests.py
shared-tw/shared-tw
90dcf92744b4e0ec9e9aa085026b5543c9c3922c
[ "MIT" ]
2
2021-12-09T10:39:37.000Z
2022-02-22T09:01:26.000Z
share/tests.py
shared-tw/backend
90dcf92744b4e0ec9e9aa085026b5543c9c3922c
[ "MIT" ]
3
2021-07-03T12:56:38.000Z
2021-07-04T05:53:43.000Z
share/tests.py
shared-tw/shared-tw
90dcf92744b4e0ec9e9aa085026b5543c9c3922c
[ "MIT" ]
null
null
null
import unittest from . import states
33
80
0.700159
e69ec2353a5fed95b6dce8a05f828517c6009931
2,137
py
Python
app/extensions.py
grow/airpress
b46e951b27b8216f51f0fade3695049455866825
[ "MIT" ]
1
2017-07-07T20:15:14.000Z
2017-07-07T20:15:14.000Z
app/extensions.py
grow/airpress
b46e951b27b8216f51f0fade3695049455866825
[ "MIT" ]
4
2020-03-24T15:24:51.000Z
2021-06-01T21:42:43.000Z
app/extensions.py
grow/airpress
b46e951b27b8216f51f0fade3695049455866825
[ "MIT" ]
1
2016-12-15T00:03:13.000Z
2016-12-15T00:03:13.000Z
from jinja2 import nodes from jinja2.ext import Extension
37.491228
75
0.630323
e6a0c4454894632f570e8f7308cb8d060eed1f45
767
py
Python
modtox/Helpers/helpers.py
danielSoler93/modtox
757234140cc780f57d031b46d9293fc2bf95d18d
[ "Apache-2.0" ]
4
2019-09-22T22:57:30.000Z
2020-03-18T13:20:50.000Z
modtox/Helpers/helpers.py
danielSoler93/ModTox
757234140cc780f57d031b46d9293fc2bf95d18d
[ "Apache-2.0" ]
21
2019-09-16T11:07:13.000Z
2019-11-20T15:06:06.000Z
modtox/Helpers/helpers.py
danielSoler93/ModTox
757234140cc780f57d031b46d9293fc2bf95d18d
[ "Apache-2.0" ]
2
2019-09-07T17:07:55.000Z
2020-03-18T13:20:52.000Z
import os def retrieve_molecule_number(pdb, resname): """ IDENTIFICATION OF MOLECULE NUMBER BASED ON THE TER'S """ count = 0 with open(pdb, 'r') as x: lines = x.readlines() for i in lines: if i.split()[0] == 'TER': count += 1 if i.split()[3] == resname: molecule_number = count + 1 break return molecule_number
23.96875
68
0.573664
e6a0dd14d03a3e676bea433343d789bde96e6abd
666
py
Python
bbio/platform/beaglebone/api.py
efargas/PyBBIO
b0b15fc52befd56e817dbc5876f738e70ef05541
[ "MIT" ]
null
null
null
bbio/platform/beaglebone/api.py
efargas/PyBBIO
b0b15fc52befd56e817dbc5876f738e70ef05541
[ "MIT" ]
null
null
null
bbio/platform/beaglebone/api.py
efargas/PyBBIO
b0b15fc52befd56e817dbc5876f738e70ef05541
[ "MIT" ]
null
null
null
# api.py # Part of PyBBIO # github.com/alexanderhiam/PyBBIO # MIT License # # Beaglebone platform API file. from bbio.platform.platform import detect_platform PLATFORM = detect_platform() if "3.8" in PLATFORM: from bone_3_8.adc import analog_init, analog_cleanup from bone_3_8.pwm import pwm_init, pwm_cleanup from serial_port import serial_cleanup elif "3.2" in PLATFORM: from bone_3_2.adc import analog_init, analog_cleanup from bone_3_2.pwm import pwm_init, pwm_cleanup from serial_port import serial_cleanup
21.483871
54
0.77027
e6a1e01053fb282362b9b417d81cb0cf76a2bbed
21,947
py
Python
tryhackme/http.py
GnarLito/tryhackme.py
20b4dd6a15c13c57e7a7be7f59913b937a992e4b
[ "MIT" ]
null
null
null
tryhackme/http.py
GnarLito/tryhackme.py
20b4dd6a15c13c57e7a7be7f59913b937a992e4b
[ "MIT" ]
16
2021-11-22T07:51:32.000Z
2021-12-14T00:07:48.000Z
tryhackme/http.py
GnarLito/tryhackme.py
20b4dd6a15c13c57e7a7be7f59913b937a992e4b
[ "MIT" ]
null
null
null
import re import sys from urllib.parse import quote as _uriquote import requests from . import __version__, errors, utils from .converters import _county_types, _leaderboard_types, _vpn_types, _not_none from . import checks from .cog import request_cog GET='get' POST='post' def get_public_paths(self, **attrs): return self.request(RouteList.get_public_paths(), **attrs) def get_path_summary(self, **attrs): return self.request(RouteList.get_path_summary(), **attrs) # * modules # * games # * VPN # * VM def get_machine_running(self, **attrs): return self.request(RouteList.get_machine_running(), **attrs) # * user -badge def get_user_badges(self, username, **attrs): return self.request(RouteList.get_user_badges(username=username), **attrs) def get_all_badges(self, **attrs): return self.request(RouteList.get_all_badges(), **attrs) # * user -team # * user -notifications # * user -messages # * user -room def get_user_completed_rooms_count(self, username, **attrs): return self.request(RouteList.get_user_completed_rooms_count(username=username), **attrs) def get_user_completed_rooms(self, username, limit:int=10, page:int=1, **attrs): return self.request(RouteList.get_user_completed_rooms(username=username, options={"limit": limit, "page": page}), **attrs) def get_user_created_rooms(self, username, limit:int=10, page:int=1, **attrs): return self.request(RouteList.get_user_created_rooms(username=username, options={"limit": limit, "page": page}), **attrs) # * user # * room def get_room_votes(self, room_code, **attrs): return self.request(RouteList.get_room_votes(room_code=room_code), **attrs) def get_room_details(self, room_code, loadWriteUps: bool=True, loadCreators: bool=True, loadUser: bool=True, **attrs): return self.request(RouteList.get_room_details(room_code=room_code, options={"loadWriteUps": loadWriteUps, "loadCreators": loadCreators, "loadUser": loadUser}), **attrs).get(room_code, {}) def get_room_tasks(self, room_code, **attrs): return self.request(RouteList.get_room_tasks(room_code=room_code), **attrs)
48.879733
196
0.645008
e6a26bf564f5d9a437cee65264d1566e43a4893e
10,198
py
Python
flatlander/runner/experiment_runner.py
wullli/flatlander
2c7fbd3d025f2a05c40895ec735a92d7a6bfb1ad
[ "MIT" ]
3
2020-12-30T04:18:42.000Z
2022-03-17T13:15:30.000Z
flatlander/runner/experiment_runner.py
wullli/flatlander
2c7fbd3d025f2a05c40895ec735a92d7a6bfb1ad
[ "MIT" ]
null
null
null
flatlander/runner/experiment_runner.py
wullli/flatlander
2c7fbd3d025f2a05c40895ec735a92d7a6bfb1ad
[ "MIT" ]
null
null
null
import os from argparse import ArgumentParser from pathlib import Path import gym import ray import ray.tune.result as ray_results import yaml from gym.spaces import Tuple from ray.cluster_utils import Cluster from ray.rllib.utils import try_import_tf, try_import_torch from ray.tune import run_experiments, register_env from ray.tune.logger import TBXLogger from ray.tune.resources import resources_to_json from ray.tune.tune import _make_scheduler from ray.tune.utils import merge_dicts from flatlander.envs import get_eval_config from flatlander.envs.flatland_sparse import FlatlandSparse from flatlander.envs.observations import make_obs from flatlander.envs.utils.global_gym_env import GlobalFlatlandGymEnv from flatlander.envs.utils.gym_env_fill_missing import FillingFlatlandGymEnv from flatlander.logging.custom_metrics import on_episode_end from flatlander.logging.wandb_logger import WandbLogger from flatlander.utils.loader import load_envs, load_models ray_results.DEFAULT_RESULTS_DIR = os.path.join(os.getcwd(), "..", "..", "..", "flatland-challenge-data/results")
44.72807
115
0.585507
e6a4e0e5dfdac6166da22e4d8c2409f996b05e0d
7,273
py
Python
syslib/utils_keywords.py
rahulmah/sample-cloud-native-toolchain-tutorial-20170720084529291
08540c0f083a25b5b4e7a4c839080fe54383038c
[ "Apache-2.0" ]
1
2019-01-19T09:32:18.000Z
2019-01-19T09:32:18.000Z
syslib/utils_keywords.py
rahulmah/sample-cloud-native-toolchain-tutorial-20170720084529291
08540c0f083a25b5b4e7a4c839080fe54383038c
[ "Apache-2.0" ]
null
null
null
syslib/utils_keywords.py
rahulmah/sample-cloud-native-toolchain-tutorial-20170720084529291
08540c0f083a25b5b4e7a4c839080fe54383038c
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python r""" This module contains keyword functions to supplement robot's built in functions and use in test where generic robot keywords don't support. """ import time from robot.libraries.BuiltIn import BuiltIn from robot.libraries import DateTime import re ############################################################################### def run_until_keyword_fails(retry, retry_interval, name, *args): r""" Execute a robot keyword repeatedly until it either fails or the timeout value is exceeded. Note: Opposite of robot keyword "Wait Until Keyword Succeeds". Description of argument(s): retry Max timeout time in hour(s). retry_interval Time interval in minute(s) for looping. name Robot keyword to execute. args Robot keyword arguments. """ # Convert the retry time in seconds retry_seconds = DateTime.convert_time(retry) timeout = time.time() + int(retry_seconds) # Convert the interval time in seconds interval_seconds = DateTime.convert_time(retry_interval) interval = int(interval_seconds) BuiltIn().log(timeout) BuiltIn().log(interval) while True: status = BuiltIn().run_keyword_and_return_status(name, *args) # Return if keywords returns as failure. if status is False: BuiltIn().log("Failed as expected") return False # Return if retry timeout as success. elif time.time() > timeout > 0: BuiltIn().log("Max retry timeout") return True time.sleep(interval) BuiltIn().log(time.time()) return True ############################################################################### ############################################################################### def htx_error_log_to_list(htx_error_log_output): r""" Parse htx error log output string and return list of strings in the form "<field name>:<field value>". The output of this function may be passed to the build_error_dict function. Description of argument(s): htx_error_log_output Error entry string containing the stdout generated by "htxcmdline -geterrlog". Example of htx_error_log_output contents: ######################## Result Starts Here ############################### Currently running ECG/MDT : /usr/lpp/htx/mdt/mdt.whit =========================== --------------------------------------------------------------------- Device id:/dev/nvidia0 Timestamp:Mar 29 19:41:54 2017 err=00000027 sev=1 Exerciser Name:hxenvidia Serial No:Not Available Part No:Not Available Location:Not Available FRU Number:Not Available Device:Not Available Error Text:cudaEventSynchronize for stopEvent returned err = 0039 from file , line 430. --------------------------------------------------------------------- --------------------------------------------------------------------- Device id:/dev/nvidia0 Timestamp:Mar 29 19:41:54 2017 err=00000027 sev=1 Exerciser Name:hxenvidia Serial No:Not Available Part No:Not Available Location:Not Available FRU Number:Not Available Device:Not Available Error Text:Hardware Exerciser stopped on error --------------------------------------------------------------------- ######################### Result Ends Here ################################ Example output: Returns the lists of error string per entry ['Device id:/dev/nvidia0', 'Timestamp:Mar 29 19:41:54 2017', 'err=00000027', 'sev=1', 'Exerciser Name:hxenvidia', 'Serial No:Not Available', 'Part No:Not Available', 'Location:Not Available', 'FRU Number:Not Available', 'Device:Not Available', 'Error Text:cudaEventSynchronize for stopEvent returned err = 0039 from file , line 430.'] """ # List which will hold all the list of entries. error_list = [] temp_error_list = [] parse_walk = False for line in htx_error_log_output.splitlines(): # Skip lines starting with "#" if line.startswith("#"): continue # Mark line starting with "-" and set parse flag. if line.startswith("-") and parse_walk is False: parse_walk = True continue # Mark line starting with "-" and reset parse flag. # Set temp error list to EMPTY. elif line.startswith("-"): error_list.append(temp_error_list) parse_walk = False temp_error_list = [] # Add entry to list if line is not emtpy elif parse_walk: temp_error_list.append(str(line)) return error_list ############################################################################### ############################################################################### def build_error_dict(htx_error_log_output): r""" Builds error list into a list of dictionary entries. Description of argument(s): error_list Error list entries. Example output dictionary: { 0: { 'sev': '1', 'err': '00000027', 'Timestamp': 'Mar 29 19:41:54 2017', 'Part No': 'Not Available', 'Serial No': 'Not Available', 'Device': 'Not Available', 'FRU Number': 'Not Available', 'Location': 'Not Available', 'Device id': '/dev/nvidia0', 'Error Text': 'cudaEventSynchronize for stopEvent returned err = 0039 from file , line 430.', 'Exerciser Name': 'hxenvidia' }, 1: { 'sev': '1', 'err': '00000027', 'Timestamp': 'Mar 29 19:41:54 2017', 'Part No': 'Not Available', 'Serial No': 'Not Available', 'Device': 'Not Available', 'FRU Number': 'Not Available', 'Location': 'Not Available', 'Device id': '/dev/nvidia0', 'Error Text': 'Hardware Exerciser stopped on error', 'Exerciser Name': 'hxenvidia' } }, """ # List which will hold all the list of entries. error_list = [] error_list = htx_error_log_to_list(htx_error_log_output) # dictionary which holds the error dictionry entry. error_dict = {} temp_error_dict = {} error_index = 0 # Loop through the error list. for entry_list in error_list: # Loop through the first error list entry. for entry in entry_list: # Split string into list for key value update. # Example: 'Device id:/dev/nvidia0' # Example: 'err=00000027' parm_split = re.split("[:=]", entry) # Populate temp dictionary with key value pair data. temp_error_dict[str(parm_split[0])] = parm_split[1] # Update the master dictionary per entry index. error_dict[error_index] = temp_error_dict # Reset temp dict to EMPTY and increment index count. temp_error_dict = {} error_index += 1 return error_dict ###############################################################################
32.61435
79
0.54063
e6a5916da8516ca978c7505bb56075d47bacaa77
826
py
Python
tools/webcam/webcam_apis/nodes/__init__.py
ivmtorres/mmpose
662cb50c639653ae2fc19d3421ce10bd02246b85
[ "Apache-2.0" ]
1
2022-02-13T12:27:40.000Z
2022-02-13T12:27:40.000Z
tools/webcam/webcam_apis/nodes/__init__.py
ivmtorres/mmpose
662cb50c639653ae2fc19d3421ce10bd02246b85
[ "Apache-2.0" ]
null
null
null
tools/webcam/webcam_apis/nodes/__init__.py
ivmtorres/mmpose
662cb50c639653ae2fc19d3421ce10bd02246b85
[ "Apache-2.0" ]
null
null
null
# Copyright (c) OpenMMLab. All rights reserved. from .builder import NODES from .faceswap_nodes import FaceSwapNode from .frame_effect_nodes import (BackgroundNode, BugEyeNode, MoustacheNode, NoticeBoardNode, PoseVisualizerNode, SaiyanNode, SunglassesNode) from .helper_nodes import ModelResultBindingNode, MonitorNode, RecorderNode from .mmdet_nodes import DetectorNode from .mmpose_nodes import TopDownPoseEstimatorNode from .xdwendwen_nodes import XDwenDwenNode __all__ = [ 'NODES', 'PoseVisualizerNode', 'DetectorNode', 'TopDownPoseEstimatorNode', 'MonitorNode', 'BugEyeNode', 'SunglassesNode', 'ModelResultBindingNode', 'NoticeBoardNode', 'RecorderNode', 'FaceSwapNode', 'MoustacheNode', 'SaiyanNode', 'BackgroundNode', 'XDwenDwenNode' ]
45.888889
78
0.74092
e6a5f147ff440a3daeccaecdee477658d01cb25a
4,044
py
Python
DBParser/DBMove.py
lelle1234/Db2Utils
55570a1afbe6d4abe61c31952bc178c2443f4e5b
[ "Apache-2.0" ]
4
2020-02-27T13:56:37.000Z
2022-02-07T23:07:24.000Z
DBParser/DBMove.py
lelle1234/Db2Utils
55570a1afbe6d4abe61c31952bc178c2443f4e5b
[ "Apache-2.0" ]
null
null
null
DBParser/DBMove.py
lelle1234/Db2Utils
55570a1afbe6d4abe61c31952bc178c2443f4e5b
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python3 import ibm_db import getopt import sys import os from toposort import toposort_flatten db = None host = "localhost" port = "50000" user = None pwd = None outfile = None targetdb = None try: opts, args = getopt.getopt(sys.argv[1:], "h:d:P:u:p:o:t:") except getopt.GetoptError: sys.exit(-1) for o, a in opts: if o == "-d": db = a if o == "-h": host = a if o == "-P": port = a if o == "-u": user = a if o == "-p": pwd = a if o == "-t": targetdb = a if db is None or user is None or pwd is None or targetdb is None: print("Usage: DBMove.py [-h <host> -P <port>] -d <db> -u <user> -p <pwd> -t <target>") sys.exit(1) db = db.upper() targetdb = targetdb.upper() cfg = (db, host, port, user, pwd) conn = ibm_db.connect("DATABASE=%s; HOSTNAME=%s; PORT=%s; PROTOCOL=TCPIP; UID=%s; PWD=%s" % cfg, "", "") get_db_type = "values nya.get_db_type()" find_edges = """ SELECT rtrim(t.tabschema) || '.' || rtrim(t.tabname) , coalesce(rtrim(r.reftabschema) || '.' || rtrim(r.reftabname), 'dummy') FROM syscat.tables t LEFT JOIN syscat.references r ON (t.tabschema, t.tabname) = (r.tabschema, r.tabname) WHERE t.tabschema not like 'SYS%' AND t.type = 'T' AND rtrim(t.tabschema) not like 'NYA_%' AND t.tabschema <> 'TMP' ORDER BY 1 """ identity_skip = """ select rtrim(tabschema) || '.' || rtrim(tabname) from syscat.columns where identity = 'Y' and generated = 'D' """ stmt = ibm_db.prepare(conn, get_db_type) ibm_db.execute(stmt, ()) tpl = ibm_db.fetch_tuple(stmt) db_type = tpl[0] edges = dict() stmt = ibm_db.prepare(conn, find_edges) ibm_db.execute(stmt, ()) tpl = ibm_db.fetch_tuple(stmt) while tpl: n1, n2 = tpl try: edges[n1].add(n2) except KeyError: edges[n1] = set() edges[n1].add(n2) tpl = ibm_db.fetch_tuple(stmt) sorted_nodes = list(toposort_flatten(edges)) # print(sorted_nodes) identity_skip_arr = [] edges = dict() stmt = ibm_db.prepare(conn, identity_skip) ibm_db.execute(stmt, ()) tpl = ibm_db.fetch_tuple(stmt) while tpl: identity_skip_arr.append(tpl[0]) tpl = ibm_db.fetch_tuple(stmt) # print(identity_skip) os.makedirs(db, exist_ok=True) export_file = open("%s/export.sql" % db, "w") load_file = open("%s/load.sql" % db, "w") export_file.write("connect to %s;\n" % db) load_file.write("connect to %s;\n" % targetdb) if db_type == "N": load_file.write("""set integrity for nya.person off;\n""") load_file.write("""alter table nya.person alter column EMAIL_UC drop generated alter column NORMALIZED_FIRSTNAME drop generated alter column NORMALIZED_LASTNAME drop generated;\n""") load_file.write("""set integrity for nya.person immediate checked;\n""") for t in sorted_nodes: if t == "dummy": continue export_file.write("export to %s.ixf of ixf lobs to . modified by codepage=819 messages export_%s.msg select * from %s;\n" % (t,t,t)) identityskip = "identityoverride" if t in identity_skip_arr: identityskip = " " load_file.write("load from %s.ixf of ixf lobs from . modified by generatedoverride %s messages load_%s.msg replace into %s;\n" % (t, identityskip, t, t)) if db_type == "N": load_file.write("""set integrity for nya.person off;\n""") load_file.write("""alter table nya.person alter column EMAIL_UC set generated always as ( upper(email)) alter column NORMALIZED_FIRSTNAME set generated always as ( NYA.REMOVE_DIACRITICS( FIRSTNAME ) ) alter column NORMALIZED_LASTNAME set generated always as ( NYA.REMOVE_DIACRITICS( LASTNAME ) );\n""") load_file.write("""set integrity for nya.person immediate checked force generated;\n""") load_file.write("""echo set integrity for all tables;\n""") export_file.write("connect reset;\n") load_file.write("connect reset;\n") export_file.close() load_file.close()
29.304348
157
0.633778
e6a6b8f37ebe80036ee8d9a83872d377cb863d68
732
py
Python
utils/glove.py
MirunaPislar/Word2vec
e9dd01488f081a7b8d7c00a0b21efe0d401d4927
[ "MIT" ]
13
2018-05-19T22:29:27.000Z
2022-03-25T13:28:17.000Z
utils/glove.py
MirunaPislar/Word2vec
e9dd01488f081a7b8d7c00a0b21efe0d401d4927
[ "MIT" ]
1
2019-01-14T09:55:50.000Z
2019-01-25T22:17:03.000Z
utils/glove.py
MirunaPislar/Word2vec
e9dd01488f081a7b8d7c00a0b21efe0d401d4927
[ "MIT" ]
6
2018-05-19T22:29:29.000Z
2022-03-11T12:00:37.000Z
import numpy as np DEFAULT_FILE_PATH = "utils/datasets/glove.6B.50d.txt" def loadWordVectors(tokens, filepath=DEFAULT_FILE_PATH, dimensions=50): """Read pretrained GloVe vectors""" wordVectors = np.zeros((len(tokens), dimensions)) with open(filepath) as ifs: for line in ifs: line = line.strip() if not line: continue row = line.split() token = row[0] if token not in tokens: continue data = [float(x) for x in row[1:]] if len(data) != dimensions: raise RuntimeError("wrong number of dimensions") wordVectors[tokens[token]] = np.asarray(data) return wordVectors
33.272727
71
0.577869
e6aa6635d278553660a8a5b50b4098367fae31a5
2,446
py
Python
composer/profiler/__init__.py
stanford-crfm/composer
4996fbd818971afd6439961df58b531d9b47a37b
[ "Apache-2.0" ]
null
null
null
composer/profiler/__init__.py
stanford-crfm/composer
4996fbd818971afd6439961df58b531d9b47a37b
[ "Apache-2.0" ]
null
null
null
composer/profiler/__init__.py
stanford-crfm/composer
4996fbd818971afd6439961df58b531d9b47a37b
[ "Apache-2.0" ]
null
null
null
# Copyright 2021 MosaicML. All Rights Reserved. """Performance profiling tools. The profiler gathers performance metrics during a training run that can be used to diagnose bottlenecks and facilitate model development. The metrics gathered include: * Duration of each :class:`.Event` during training * Time taken by the data loader to return a batch * Host metrics such as CPU, system memory, disk and network utilization over time * Execution order, latency and attributes of PyTorch operators and GPU kernels (see :doc:`profiler`) The following example demonstrates how to setup and perform profiling on a simple training run. .. literalinclude:: ../../../examples/profiler_demo.py :language: python :linenos: :emphasize-lines: 6, 27-49 It is required to specify an output ``profiler_trace_file`` during :class:`.Trainer` initialization to enable profiling. The ``profiler_trace_file`` will contain the profiling trace data once the profiling run completes. By default, the :class:`.Profiler`, :class:`.DataloaderProfiler` and :class:`.SystemProfiler` will be active. The :class:`.TorchProfiler` is **disabled** by default. To activate the :class:`.TorchProfiler`, the ``torch_profiler_trace_dir`` must be specified *in addition* to the ``profiler_trace_file`` argument. The ``torch_profiler_trace_dir`` will contain the Torch Profiler traces once the profiling run completes. The :class:`.Profiler` will automatically merge the Torch traces in the ``torch_profiler_trace_dir`` into the ``profiler_trace_file``, allowing users to view a unified trace. The complete traces can be viewed by in a Google Chrome browser navigating to ``chrome://tracing`` and loading the ``profiler_trace_file``. Here is an example trace file: .. image:: https://storage.googleapis.com/docs.mosaicml.com/images/profiler/profiler_trace_example.png :alt: Example Profiler Trace File :align: center Additonal details an be found in the Profiler Guide. """ from composer.profiler._event_handler import ProfilerEventHandler from composer.profiler._profiler import Marker, Profiler from composer.profiler._profiler_action import ProfilerAction # All needs to be defined properly for sphinx autosummary __all__ = [ "Marker", "Profiler", "ProfilerAction", "ProfilerEventHandler", ] Marker.__module__ = __name__ Profiler.__module__ = __name__ ProfilerAction.__module__ = __name__ ProfilerEventHandler.__module__ = __name__
44.472727
146
0.780867
e6ab4939fc5a6bc71ee2ae80221a8f7dd6549b7a
2,753
py
Python
gremlin-python/src/main/jython/setup.py
EvKissle/tinkerpop
84195e38fc22a1a089c345fade9c75711e6cfdfe
[ "Apache-2.0" ]
null
null
null
gremlin-python/src/main/jython/setup.py
EvKissle/tinkerpop
84195e38fc22a1a089c345fade9c75711e6cfdfe
[ "Apache-2.0" ]
null
null
null
gremlin-python/src/main/jython/setup.py
EvKissle/tinkerpop
84195e38fc22a1a089c345fade9c75711e6cfdfe
[ "Apache-2.0" ]
null
null
null
''' Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import codecs import os import sys import time from setuptools import setup # Folder containing the setup.py root = os.path.dirname(os.path.abspath(__file__)) # Path to __version__ module version_file = os.path.join(root, 'gremlin_python', '__version__.py') # Check if this is a source distribution. # If not create the __version__ module containing the version if not os.path.exists(os.path.join(root, 'PKG-INFO')): timestamp = int(os.getenv('TIMESTAMP', time.time() * 1000)) / 1000 fd = codecs.open(version_file, 'w', 'utf-8') fd.write("'''") fd.write(__doc__) fd.write("'''\n") fd.write('version = %r\n' % os.getenv('VERSION', '?').replace('-SNAPSHOT', '.dev-%d' % timestamp)) fd.write('timestamp = %d\n' % timestamp) fd.close() # Load version from gremlin_python import __version__ version = __version__.version install_requires = [ 'aenum==1.4.5', 'tornado==4.4.1', 'six==1.10.0' ] if sys.version_info < (3,2): install_requires += ['futures==3.0.5'] setup( name='gremlinpython', version=version, packages=['gremlin_python', 'gremlin_python.driver', 'gremlin_python.driver.tornado', 'gremlin_python.process', 'gremlin_python.structure', 'gremlin_python.structure.io'], license='Apache 2', url='http://tinkerpop.apache.org', description='Gremlin-Python for Apache TinkerPop', long_description=codecs.open("README", "r", "UTF-8").read(), test_suite="tests", data_files=[("", ["LICENSE", "NOTICE"])], setup_requires=[ 'pytest-runner', ], tests_require=[ 'pytest', 'mock' ], install_requires=install_requires, classifiers=[ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", ] )
32.388235
104
0.682528
e6ab63dd0a627fd5e3fd6b78f7716ef38a63c388
1,112
py
Python
src/_bar.py
yoshihikosuzuki/plotly_light
cef2465486e9147e27feae1193a1b4487e4fc543
[ "MIT" ]
null
null
null
src/_bar.py
yoshihikosuzuki/plotly_light
cef2465486e9147e27feae1193a1b4487e4fc543
[ "MIT" ]
null
null
null
src/_bar.py
yoshihikosuzuki/plotly_light
cef2465486e9147e27feae1193a1b4487e4fc543
[ "MIT" ]
null
null
null
from typing import Optional, Sequence import plotly.graph_objects as go def bar(x: Sequence, y: Sequence, text: Optional[Sequence] = None, width: Optional[int] = None, col: Optional[str] = None, opacity: float = 1, name: Optional[str] = None, show_legend: bool = False, show_init: bool = True) -> go.Bar: """Create a simple Trace object of a histogram. positional arguments: @ x : Coordinates of data on x-axis. @ y : Coordinates of data on y-axis. optional arguments: @ col : Color of bars. @ opacity : Opacity of bars. @ name : Display name of the trace in legend. @ show_legend : Show this trace in legend. @ show_init : Show this trace initially. """ return go.Bar(x=x, y=y, text=text, width=width, marker_color=col, opacity=opacity, name=name, showlegend=show_legend, visible=None if show_init else "legendonly")
31.771429
62
0.539568
e6acb4fde9c00fed8d158a1a19ae4c34b7d7d64e
4,029
py
Python
pennylane/templates/subroutines/arbitrary_unitary.py
doomhammerhell/pennylane
f147f22d8d99ba5891edd45a6a1f7dd679c8a23c
[ "Apache-2.0" ]
3
2021-02-22T18:30:55.000Z
2021-02-23T10:54:58.000Z
pennylane/templates/subroutines/arbitrary_unitary.py
doomhammerhell/pennylane
f147f22d8d99ba5891edd45a6a1f7dd679c8a23c
[ "Apache-2.0" ]
null
null
null
pennylane/templates/subroutines/arbitrary_unitary.py
doomhammerhell/pennylane
f147f22d8d99ba5891edd45a6a1f7dd679c8a23c
[ "Apache-2.0" ]
1
2021-03-27T09:03:15.000Z
2021-03-27T09:03:15.000Z
# Copyright 2018-2021 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" Contains the ArbitraryUnitary template. """ import pennylane as qml from pennylane.operation import Operation, AnyWires from pennylane.ops import PauliRot _PAULIS = ["I", "X", "Y", "Z"] def _tuple_to_word(index_tuple): """Convert an integer tuple to the corresponding Pauli word. The Pauli operators are converted as ``0 -> I``, ``1 -> X``, ``2 -> Y``, ``3 -> Z``. Args: index_tuple (Tuple[int]): An integer tuple describing the Pauli word Returns: str: The corresponding Pauli word """ return "".join([_PAULIS[i] for i in index_tuple])
31.476563
107
0.647803
e6aec9eead70cf9709e4908f8e9466e087fc8de3
5,271
py
Python
vae_celeba.py
aidiary/generative-models-pytorch
c9ae23a4ecbe4bf8f82dbaf9e4e3e1e61530e6b0
[ "MIT" ]
null
null
null
vae_celeba.py
aidiary/generative-models-pytorch
c9ae23a4ecbe4bf8f82dbaf9e4e3e1e61530e6b0
[ "MIT" ]
null
null
null
vae_celeba.py
aidiary/generative-models-pytorch
c9ae23a4ecbe4bf8f82dbaf9e4e3e1e61530e6b0
[ "MIT" ]
null
null
null
import pytorch_lightning as pl import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from pytorch_lightning.loggers import TensorBoardLogger from torch.utils.data import DataLoader from torchvision import transforms from torchvision.datasets import CelebA if __name__ == '__main__': # data transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.CenterCrop(148), transforms.Resize(128), transforms.ToTensor() ]) train_dataset = CelebA(root='data', split='train', transform=transform, download=False) val_dataset = CelebA(root='data', split='test', transform=transform, download=False) train_loader = DataLoader(train_dataset, batch_size=32, num_workers=8, shuffle=True, drop_last=True) val_loader = DataLoader(val_dataset, batch_size=32, num_workers=8, shuffle=False, drop_last=True) # model model = VanillaVAE() # training tb_logger = TensorBoardLogger('lightning_logs', name='vanilla_vae_celeba', default_hp_metric=False) trainer = pl.Trainer(gpus=[0], max_epochs=200, logger=tb_logger) trainer.fit(model, train_loader, val_loader)
31.189349
103
0.592677
e6afcad02c1d49dbed0f7930d88f9219376906a4
2,686
py
Python
data/process_data.py
julat/DisasterResponse
140489e521a96dc2ff9c9a95f0ce4e99403f03af
[ "MIT" ]
null
null
null
data/process_data.py
julat/DisasterResponse
140489e521a96dc2ff9c9a95f0ce4e99403f03af
[ "MIT" ]
null
null
null
data/process_data.py
julat/DisasterResponse
140489e521a96dc2ff9c9a95f0ce4e99403f03af
[ "MIT" ]
null
null
null
# Import libraries import sys import pandas as pd from sqlalchemy import create_engine def load_data(messages_filepath, categories_filepath): """ Load the data from the disaster response csvs Parameters: messages_filepath (str): Path to messages csv categories_filepath (str): Path to categories csv Returns: Dataframe: Merged data """ messages = pd.read_csv(messages_filepath) categories = pd.read_csv(categories_filepath) df = pd.merge(messages,categories,on='id') return df def clean_data(df): """ Cleans the categories Parameters: df (DataFrame): Messy DataFrame Returns: Dataframe: Cleaned dataframe """ categories = df['categories'].str.split( pat=';', expand=True) row = categories.iloc[[1]] category_colnames = row.apply(lambda x : x.values[0].split("-")[0]) categories.columns = category_colnames for column in categories: categories[column] = categories[column].astype(str).str[-1:] categories[column] = categories[column].astype(int) categories[column] = categories[column].map(lambda x: 1 if x > 1 else x) df.drop(['categories'], axis=1, inplace=True) df = df = pd.concat([df,categories], axis=1) df.drop_duplicates(inplace=True) return df def save_data(df, database_filename): """ Saves the DataFrame Parameters: df (DataFrame): Cleaned DataFrame database_filename (DataFrame): Path to the SQLite Database """ engine = create_engine('sqlite:///' + database_filename + '.db') df.to_sql(database_filename, engine, index=False, if_exists='replace') if __name__ == '__main__': main()
28.574468
80
0.652271
e6b027e44688ca01138133b153494c3bc7370758
3,658
py
Python
contrail-controller/files/plugins/check_contrail_status_controller.py
atsgen/tf-charms
81110aef700b2f227654d52709614ddb3d62ba17
[ "Apache-2.0" ]
null
null
null
contrail-controller/files/plugins/check_contrail_status_controller.py
atsgen/tf-charms
81110aef700b2f227654d52709614ddb3d62ba17
[ "Apache-2.0" ]
null
null
null
contrail-controller/files/plugins/check_contrail_status_controller.py
atsgen/tf-charms
81110aef700b2f227654d52709614ddb3d62ba17
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 import subprocess import sys import json SERVICES = { 'control': [ 'control', 'nodemgr', 'named', 'dns', ], 'config-database': [ 'nodemgr', 'zookeeper', 'rabbitmq', 'cassandra', ], 'webui': [ 'web', 'job', ], 'config': [ 'svc-monitor', 'nodemgr', 'device-manager', 'api', 'schema', ], } WARNING = 1 CRITICAL = 2 if __name__ == '__main__': cver = sys.argv[1] if '.' in str(cver): if cver == '5.0': version = 500 elif cver == '5.1': version = 510 else: print("CRITICAL: invalid version: {}".format(cver)) sys.exit(CRITICAL) elif not cver.isdigit(): print("CRITICAL: invalid version: {}".format(cver)) sys.exit(CRITICAL) else: version = int(cver) check_contrail_status(SERVICES, version=version)
29.739837
193
0.54538
e6b042b87a1d5f3672a72f7fa6b5679e20f39682
2,693
py
Python
leaderboard-server/leaderboard-server.py
harnitsignalfx/skogaming
c860219c89149d686106dfb7a93d27df39830842
[ "MIT" ]
1
2021-03-01T20:56:24.000Z
2021-03-01T20:56:24.000Z
leaderboard-server/leaderboard-server.py
harnitsignalfx/skogaming
c860219c89149d686106dfb7a93d27df39830842
[ "MIT" ]
null
null
null
leaderboard-server/leaderboard-server.py
harnitsignalfx/skogaming
c860219c89149d686106dfb7a93d27df39830842
[ "MIT" ]
1
2021-02-20T17:36:47.000Z
2021-02-20T17:36:47.000Z
from flask import Flask, jsonify, request from flask_cors import CORS, cross_origin import simplejson as json from leaderboard.leaderboard import Leaderboard import uwsgidecorators import signalfx app = Flask(__name__) app.config['CORS_HEADERS'] = 'Content-Type' cors = CORS(app) highscore_lb_starship = Leaderboard('highscores-starship',host='redis-instance') sfx = signalfx.SignalFx(ingest_endpoint='http://otelcol:9943').ingest('token-at-collector') if __name__ == '__main__': app.run(host='0.0.0.0', port=6001)
26.663366
95
0.649833
e6b183e72d2aff2b604bbf82d32e69244b409f59
1,591
py
Python
meshio/_cli/_info.py
jorgensd/meshio
0600ac9e9e8d1e1a27d5f3f2f4235414f4482cac
[ "MIT" ]
1
2020-09-01T11:26:15.000Z
2020-09-01T11:26:15.000Z
meshio/_cli/_info.py
jorgensd/meshio
0600ac9e9e8d1e1a27d5f3f2f4235414f4482cac
[ "MIT" ]
null
null
null
meshio/_cli/_info.py
jorgensd/meshio
0600ac9e9e8d1e1a27d5f3f2f4235414f4482cac
[ "MIT" ]
null
null
null
import argparse import numpy as np from .._helpers import read, reader_map from ._helpers import _get_version_text
26.966102
87
0.637335
e6b2c4874559385c0807dca69b9f07a62e9a1d08
1,324
py
Python
ccslink/Zip.py
Data-Linkage/ccslink
ee1105888d43c6a2b307deb96ddede34d03a965f
[ "MIT" ]
null
null
null
ccslink/Zip.py
Data-Linkage/ccslink
ee1105888d43c6a2b307deb96ddede34d03a965f
[ "MIT" ]
null
null
null
ccslink/Zip.py
Data-Linkage/ccslink
ee1105888d43c6a2b307deb96ddede34d03a965f
[ "MIT" ]
null
null
null
import os, shutil from CCSLink import Spark_Session as SS def add_zipped_dependency(zip_from, zip_target): """ This method creates a zip of the code to be sent to the executors. It essentially zips the Python packages installed by PIP and submits them via addPyFile in the current PySpark context E.g. if we want to submit "metaphone" package so that we can do use `import metaphone` and use its methods inside UDF, we run this method with: - zip_from = /home/cdsw/.local/lib/python3.6/site-packages/ - zip_target = metaphone """ # change this to a path in your project zipped_fpath = f'/home/cdsw/zipped_packages/{zip_target}' if os.path.exists(zipped_fpath + '.zip'): os.remove(zipped_fpath + '.zip') shutil.make_archive( # path to the resulting zipped file (without the suffix) base_name=zipped_fpath, # resulting filename # specifies the format --> implies .zip suffix format='zip', # the root dir from where we want to zip root_dir=zip_from, # the dir (relative to root dir) which we want to zip # (all files in the final zip will have this prefix) base_dir=zip_target, ) # add the files to the executors SS.SPARK().sparkContext.addPyFile(f'{zipped_fpath}.zip')
33.1
70
0.676737
e6b2fbff1fb4792ec87b5e0830c85e32ea769936
2,484
py
Python
moltemplate/nbody_Angles.py
Mopolino8/moltemplate
363df364fcb012e8e4beb7bc616a77d696b8b707
[ "BSD-3-Clause" ]
null
null
null
moltemplate/nbody_Angles.py
Mopolino8/moltemplate
363df364fcb012e8e4beb7bc616a77d696b8b707
[ "BSD-3-Clause" ]
null
null
null
moltemplate/nbody_Angles.py
Mopolino8/moltemplate
363df364fcb012e8e4beb7bc616a77d696b8b707
[ "BSD-3-Clause" ]
1
2019-11-24T17:32:28.000Z
2019-11-24T17:32:28.000Z
try: from .nbody_graph_search import Ugraph except (SystemError, ValueError): # not installed as a package from nbody_graph_search import Ugraph # This file defines how 3-body angle interactions are generated by moltemplate # by default. It can be overridden by supplying your own custom file. # To find 3-body "angle" interactions, we would use this subgraph: # # # *---*---* => 1st bond connects atoms 0 and 1 # 0 1 2 2nd bond connects atoms 1 and 2 # bond_pattern = Ugraph([(0, 1), (1, 2)]) # (Ugraph atom indices begin at 0, not 1) # The next function eliminates the redundancy between 0-1-2 and 2-1-0: def canonical_order(match): """ Before defining a new interaction, we must check to see if an interaction between these same 3 atoms has already been created (perhaps listed in a different, but equivalent order). If we don't check for this this, we will create many unnecessary redundant interactions (which can slow down he simulation). To avoid this, I define a "canonical_order" function which sorts the atoms and bonds in a way which is consistent with the symmetry of the interaction being generated... Later the re-ordered list of atom and bond ids will be tested against the list of atom/bond ids in the matches-found-so-far, before it is added to the list of interactions found so far. Note that the energy of an angle interaction is a function of the angle between. three consecutively bonded atoms (referred to here as: 0,1,2). This angle does not change when swapping the atoms at either end (0 and 2). So it does not make sense to define a separate 3-body angle interaction between atoms 0,1,2 AS WELL AS an interaction between 2,1,0. So we sort the atoms and bonds so that the first atom has a always has a lower atomID than the third atom. (Later we will check to see if we have already defined an interaction between these 3 atoms. If not then we create a new one.) """ # match[0][0:2] contains the ID numbers for the 3 atoms in the match atom0 = match[0][0] atom1 = match[0][1] atom2 = match[0][2] # match[1][0:1] contains the ID numbers for the 2 bonds bond0 = match[1][0] bond1 = match[1][1] if atom0 < atom2: # return ((atom0, atom1, atom2), (bond0, bond1)) same thing as: return match else: return ((atom2, atom1, atom0), (bond1, bond0))
42.827586
79
0.68599
e6b3c1a04d6b23957a4328b1a4d335f1079479f3
8,099
py
Python
extras/usd/examples/usdMakeFileVariantModelAsset/usdMakeFileVariantModelAsset.py
DougRogers-DigitalFish/USD
d8a405a1344480f859f025c4f97085143efacb53
[ "BSD-2-Clause" ]
3,680
2016-07-26T18:28:11.000Z
2022-03-31T09:55:05.000Z
extras/usd/examples/usdMakeFileVariantModelAsset/usdMakeFileVariantModelAsset.py
DougRogers-DigitalFish/USD
d8a405a1344480f859f025c4f97085143efacb53
[ "BSD-2-Clause" ]
1,759
2016-07-26T19:19:59.000Z
2022-03-31T21:24:00.000Z
extras/usd/examples/usdMakeFileVariantModelAsset/usdMakeFileVariantModelAsset.py
DougRogers-DigitalFish/USD
d8a405a1344480f859f025c4f97085143efacb53
[ "BSD-2-Clause" ]
904
2016-07-26T18:33:40.000Z
2022-03-31T09:55:16.000Z
#!/pxrpythonsubst # # Copyright 2016 Pixar # # Licensed under the Apache License, Version 2.0 (the "Apache License") # with the following modification; you may not use this file except in # compliance with the Apache License and the following modification to it: # Section 6. Trademarks. is deleted and replaced with: # # 6. Trademarks. This License does not grant permission to use the trade # names, trademarks, service marks, or product names of the Licensor # and its affiliates, except as required to comply with Section 4(c) of # the License and to reproduce the content of the NOTICE file. # # You may obtain a copy of the Apache License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the Apache License with the above modification is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the Apache License for the specific # language governing permissions and limitations under the Apache License. # ''' Creates a top-level, referenceable asset USD file from one or more 'variant' files, each of which can contain arbitrary scene description. When supplying multiple files, one must also provide the name for a variantSet that will be constructed to switch between the files. The asset file will place the variant files behind a "payload", which will enable consumers to defer loading and processing of the data when composed onto a UsdStage. The names of the created variations will be taken directly from the basename of their corresponding input file. ''' from __future__ import print_function from pxr import Tf, Kind, Sdf, Usd # ToDo: # - handle multiple variantSets # - layer multiple kinds of files (e.g. shading.usd over geom.usd) # - allow output filename to be independently specifiable? (Breaks with Pixar # convention) # - allow variant names to be specified independently of variant file names # - Compute and present (per-variant) UsdGeomModelAPI.extentsHint # - Compute and author UsdModelAPI::SetPayloadAssetDependencies() if __name__ == "__main__": import argparse, os, sys descr = __doc__.strip() parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]), description=descr) parser.add_argument('assetName') parser.add_argument('variantFiles', nargs='+') parser.add_argument( '-k', '--kind', default='component', action='store', metavar='kind', help="Model kind, one of: component, group, or assembly") parser.add_argument( '-v', '--variantSet', default='', action='store', metavar='variantSet', help="Variantset to create to modulate variantFiles. Can be elided " "if only one file is supplied") parser.add_argument( '-i', '--identifier', default='', action='store', metavar='identifier', help="The identifier you would expect your Ar asset-resolver plugin " "to resolve to the (installed) assetName.usd file this script creates. " " If unspecified, defaults to assetName.usd") parser.add_argument( '-d', '--defaultVariantSelection', default='', action='store', metavar='defaultVariantSelection', help="This variant will be selected by default when the asset is " "added to a composition. If unspecified, will be the variant for " "'variantFile1'") args = parser.parse_args() if not args.assetName or args.assetName == '': parser.error("No assetName specified") stage = CreateModelStage(args.assetName, assetIdentifier=args.identifier, kind=args.kind, filesToReference=args.variantFiles, variantSetName=args.variantSet, defaultVariantSelection=args.defaultVariantSelection) if stage: stage.GetRootLayer().Save() exit(0) else: exit(1)
44.256831
85
0.684159
e6b3c20df06992b958887a2ed1583c032b8b6295
7,079
py
Python
src/main.py
fbdp1202/pyukf_kinect_body_tracking
c44477149cfc22abfe9121c2604dc284c93fbd42
[ "MIT" ]
7
2020-04-23T06:03:10.000Z
2022-01-16T21:16:23.000Z
src/main.py
fbdp1202/pyukf_kinect_body_tracking
c44477149cfc22abfe9121c2604dc284c93fbd42
[ "MIT" ]
null
null
null
src/main.py
fbdp1202/pyukf_kinect_body_tracking
c44477149cfc22abfe9121c2604dc284c93fbd42
[ "MIT" ]
3
2020-07-12T15:07:52.000Z
2021-12-05T09:27:18.000Z
import sys import os sys.path.append('./code/') from skeleton import Skeleton from read_data import * from calibration import Calibration from ukf_filter import ukf_Filter_Controler from canvas import Canvas from regression import * import time from functools import wraps import os def make_folder(folder_name): if not os.path.isdir(folder_name): os.mkdir(folder_name) return folder_name def get_save_skeleton_data_folder_name(person_name, pos_mode, model): folder_name = make_folder('result') folder_name = make_folder(folder_name + '/' + person_name) folder_name = make_folder(folder_name + '/' + pos_mode) folder_name = make_folder(folder_name + '/' + model) return folder_name + '/' def save_sk_data_to_csv(folder_name, filename, data): filename = folder_name + filename f = open(filename, "w", encoding="UTF-8") for i in range(len(data)): for j in range(len(data[i])): for k in range(3): f.write(str(data[i][j][k])) if j == (len(data[i])-1) and k == 2: f.write('\n') else: f.write(',')
34.198068
368
0.759288
e6b3d6bc9a4bc463c1dd688594551748653895d4
2,683
py
Python
cfgov/scripts/initial_data.py
Mario-Kart-Felix/cfgov-refresh
7978fedeb7aaf4d96a87720e6545567085e056a9
[ "CC0-1.0" ]
1
2019-12-29T17:50:07.000Z
2019-12-29T17:50:07.000Z
cfgov/scripts/initial_data.py
ascott1/cfgov-refresh
9c916aaed3a48110a199eb4675474290a51f815d
[ "CC0-1.0" ]
1
2021-04-22T01:09:52.000Z
2021-04-22T01:09:52.000Z
cfgov/scripts/initial_data.py
ascott1/cfgov-refresh
9c916aaed3a48110a199eb4675474290a51f815d
[ "CC0-1.0" ]
1
2021-02-02T08:59:38.000Z
2021-02-02T08:59:38.000Z
from __future__ import print_function import json import os from django.conf import settings from django.contrib.auth.hashers import make_password from django.contrib.auth.models import User from wagtail.wagtailcore.models import Page, Site from v1.models import HomePage, BrowseFilterablePage
34.844156
97
0.666045
e6b40095f02ec8f60d6c2306673d054478953aba
1,456
py
Python
Scripts/compareOutputs.py
harmim/vut-avs-project1
d36e6b5cdebce748d2bdf2afc43950968ecf0a91
[ "MIT" ]
null
null
null
Scripts/compareOutputs.py
harmim/vut-avs-project1
d36e6b5cdebce748d2bdf2afc43950968ecf0a91
[ "MIT" ]
null
null
null
Scripts/compareOutputs.py
harmim/vut-avs-project1
d36e6b5cdebce748d2bdf2afc43950968ecf0a91
[ "MIT" ]
null
null
null
# Simple python3 script to compare output with a reference output. # Usage: python3 compareOutputs.py testOutput.h5 testRefOutput.h5 import sys import h5py import numpy as np if len(sys.argv) != 3: print("Expected two arguments. Output and reference output file.") sys.exit(1) filename = sys.argv[1] ref_filename = sys.argv[2] f = h5py.File(filename, 'r') ref_f = h5py.File(ref_filename, 'r') out = np.array(f['output_data']) out_ref = np.array(ref_f['output_data']) if out.shape != out_ref.shape: print("The files do not contain the same number of outputs.") print("The output size: {0}.".format(out.shape[0])) print("The reference size: {0}.".format(out_ref.shape[0])) sys.exit(1) ref_value = np.copy(out_ref) ref_value[ref_value == 0.0] = 1.0 error = (out_ref - out) / ref_value maximal_error = np.amax(error) print("Maximal error between the output and the reference is {0}.".format(maximal_error)) if maximal_error < 10**(-6): print("OK:Output seems to match the reference.") sys.exit(0) print("Failure:Output does not match the reference.") maximal_error = np.amax(error, axis=1) print(maximal_error.shape) for i in range(0, 5): print("Image", i) print("Expected:", end="") for j in range(0, 10): print(out_ref[i, j], end = " ") print("\nGot:", end="") for j in range(0, 10): print(out[i, j], end=" ") print("\nMaximal error:", maximal_error[i], "\n") sys.exit(1)
26.472727
89
0.666896
e6b45faace1959ed1daf554b861e2a396b78702b
222
py
Python
sanctuary/tag/serializers.py
20CM/Sanctuary
14694d9bd6376bdc05248741a91df778400e9f66
[ "BSD-3-Clause" ]
1
2017-05-29T11:53:06.000Z
2017-05-29T11:53:06.000Z
sanctuary/tag/serializers.py
20CM/Sanctuary
14694d9bd6376bdc05248741a91df778400e9f66
[ "BSD-3-Clause" ]
null
null
null
sanctuary/tag/serializers.py
20CM/Sanctuary
14694d9bd6376bdc05248741a91df778400e9f66
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- from rest_framework import serializers from .models import Tag
18.5
49
0.689189
e6b462f3efa3e6e931f1a4ef9f1a10fd45f8f99c
571
py
Python
examples/management_api/aliveness_test.py
cloudamqp/amqpstorm
35eb8edc5f0c2ea3839e93940bf9d0e5f8f4242e
[ "MIT" ]
null
null
null
examples/management_api/aliveness_test.py
cloudamqp/amqpstorm
35eb8edc5f0c2ea3839e93940bf9d0e5f8f4242e
[ "MIT" ]
null
null
null
examples/management_api/aliveness_test.py
cloudamqp/amqpstorm
35eb8edc5f0c2ea3839e93940bf9d0e5f8f4242e
[ "MIT" ]
null
null
null
from amqpstorm.management import ApiConnectionError from amqpstorm.management import ApiError from amqpstorm.management import ManagementApi if __name__ == '__main__': API = ManagementApi('http://127.0.0.1:15672', 'guest', 'guest') try: result = API.aliveness_test('/') if result['status'] == 'ok': print("RabbitMQ is alive!") else: print("RabbitMQ is not alive! :(") except ApiConnectionError as why: print('Connection Error: %s' % why) except ApiError as why: print('ApiError: %s' % why)
33.588235
67
0.635727
e6b741334252c43868c1ae3bb0661b811481f368
1,048
py
Python
src/zvt/recorders/em/meta/em_stockhk_meta_recorder.py
vishalbelsare/zvt
d55051147274c0a4157f08ec60908c781a323c8f
[ "MIT" ]
2,032
2019-04-16T14:10:32.000Z
2022-03-31T12:40:13.000Z
src/zvt/recorders/em/meta/em_stockhk_meta_recorder.py
vishalbelsare/zvt
d55051147274c0a4157f08ec60908c781a323c8f
[ "MIT" ]
162
2019-05-07T09:57:46.000Z
2022-03-25T16:23:08.000Z
src/zvt/recorders/em/meta/em_stockhk_meta_recorder.py
vishalbelsare/zvt
d55051147274c0a4157f08ec60908c781a323c8f
[ "MIT" ]
755
2019-04-30T10:25:16.000Z
2022-03-29T17:50:49.000Z
# -*- coding: utf-8 -*- from zvt.contract.api import df_to_db from zvt.contract.recorder import Recorder from zvt.domain.meta.stockhk_meta import Stockhk from zvt.recorders.em import em_api if __name__ == "__main__": recorder = EMStockhkRecorder() recorder.run() # the __all__ is generated __all__ = ["EMStockhkRecorder"]
33.806452
115
0.711832
e6b7cb0bb44951e0d2ab9c8433c064285f85c4f7
6,362
py
Python
src/main.py
yanwunhao/auto-mshts
7a4b690bbb6ae55e2f6fad77d176c2c0822db7a0
[ "MIT" ]
null
null
null
src/main.py
yanwunhao/auto-mshts
7a4b690bbb6ae55e2f6fad77d176c2c0822db7a0
[ "MIT" ]
null
null
null
src/main.py
yanwunhao/auto-mshts
7a4b690bbb6ae55e2f6fad77d176c2c0822db7a0
[ "MIT" ]
null
null
null
from util.io import read_setting_json, read_0h_data, read_24h_data, draw_single_curve from util.convert import split_array_into_samples, calculate_avg_of_sample, convert_to_percentage from util.calculus import calculate_summary_of_sample, fit_sigmoid_curve import matplotlib.pyplot as plt import numpy as np import csv setting = read_setting_json() setting = setting["rule"] # load experiment parameter # experiment parameter is stored in file of ./data/setting.json initial_filename = setting["0h_datafile"] final_filename = setting["24h_datafile"] # sample width and height are the size of each sample area sample_width = setting["sample_width"] sample_height = setting["sample_height"] dilution_protocol = setting["dilution_protocol"] # width of each dilution basic_width = setting["basic_width"] # number of each control group control_number_list = setting["control_number"] # output directory output_directory = setting["output_directory"] # import initial concentration and calculate x_data initial_concentration = setting["initial_concentration"] repeat_times = int(sample_width / basic_width) x_data = [] current_concentration = initial_concentration for i in range(repeat_times): x_data.append(current_concentration) current_concentration /= dilution_protocol # load raw data initial_sd_data = read_0h_data() final_sd_data = read_24h_data() # reshape data into the size of board rebuild_0h_data = initial_sd_data.reshape((32, -1)) rebuild_24h_data = final_sd_data.reshape((32, -1)) # reshape data into a 2-dimensional array contains each group data sample_divided_list_0h = split_array_into_samples(rebuild_0h_data, sample_width, sample_height) sample_divided_list_24h = split_array_into_samples(rebuild_24h_data, sample_width, sample_height) # handle data of control groups control_0h_summary = 0 for number in control_number_list: number = number - 1 sample = sample_divided_list_0h[number] control_0h_summary = control_0h_summary + calculate_summary_of_sample(sample) control_0h_average = control_0h_summary / (sample_width * sample_height * len(control_number_list)) control_24h_summary = 0 for number in control_number_list: number = number - 1 sample = sample_divided_list_24h[number] control_24h_summary = control_24h_summary + calculate_summary_of_sample(sample) control_24h_average = control_24h_summary / (sample_width * sample_height * len(control_number_list)) # calculate standard deviation of each grid sd_matrix = [] for line in rebuild_24h_data: new_line = [] for element in line: sd_data = (float(element) - control_0h_average.item()) \ / (control_24h_average.item() - control_0h_average.item()) new_line.append(sd_data) sd_matrix.append(new_line) sd_matrix = np.array(sd_matrix) # split array into different samples sd_groups = split_array_into_samples(sd_matrix, sample_width, sample_height) sd_groups = np.array(sd_groups, dtype=float) RESULT_LIST = [] for sample in sd_groups: result = calculate_avg_of_sample(sample, sample_width, basic_width) RESULT_LIST.append(result) RESULT_LIST = np.array(RESULT_LIST) FULL_RESULT_LIST = [] for group in sd_groups: x_index = 0 y_index = 0 sample_buffer = [] data_buffer = [] while y_index < sample_height: while x_index < basic_width: x = x_index while x < sample_width: data_buffer.append(group[y_index][x]) x += basic_width sample_buffer.append(data_buffer) data_buffer = [] x_index += 1 y_index += 1 x_index = 0 FULL_RESULT_LIST.append(sample_buffer) FULL_RESULT_LIST = np.array(FULL_RESULT_LIST, dtype=float) optional_color = ['red', 'orange', 'yellow', 'green', 'cyan', 'blue', 'purple'] EC50_LIST = [] EC50_AVG_LIST = [] sample_num = 0 for SAMPLE in FULL_RESULT_LIST: sample_num += 1 fig, ax = plt.subplots() index = 0 ax.set_title('Sample '+str(sample_num)) x_buffer = [] x_sampling_buffer = [] y_sampling_buffer = [] for repeat in SAMPLE: x, y, x_sampling, y_sampling = fit_sigmoid_curve(x_data, repeat) x_buffer.append(x) x_sampling_buffer.append(x_sampling) y_sampling_buffer.append(y_sampling) draw_single_curve(ax, x, y, x_sampling, y_sampling, optional_color[index]) index += 1 EC50_LIST.append(x_buffer) # draw the average result avg = np.mean(x_buffer) EC50_AVG_LIST.append(avg) # draw the average curve x_sampling_buffer = np.array(x_sampling_buffer).T y_sampling_buffer = np.array(y_sampling_buffer).T x_sampling_avg = [] y_sampling_avg = [] for line in x_sampling_buffer: x_sampling_avg.append(np.mean(line)) for line in y_sampling_buffer: y_sampling_avg.append(np.mean(line)) ax.plot(avg, 0.5, 'o', color='black') ax.plot(x_sampling_avg, y_sampling_avg, color='black') plt.savefig("./output/" + output_directory + "/figs" + "/Sample " + str(sample_num)) plt.cla() plt.close(fig) # output grouped result output_f_grouped = open("./output/" + output_directory + "/result_grouped.csv", "w") csv_writer_grouped = csv.writer(output_f_grouped) csv_writer_grouped.writerow(["initial concentration: " + str(initial_concentration), "dilution protocol: " + str(dilution_protocol)]) csv_writer_grouped.writerow("") sample_num = 0 for SAMPLE in FULL_RESULT_LIST: SAMPLE = SAMPLE.T sample_num += 1 csv_writer_grouped.writerow(["Sample " + str(sample_num)]) for repeat in SAMPLE: csv_writer_grouped.writerow(repeat) csv_writer_grouped.writerow("") ec50_result_list = [] for ec50_index in EC50_LIST[sample_num-1]: ec50_result_list.append(10**ec50_index) csv_writer_grouped.writerow(ec50_result_list) average_ec50 = np.power(10, EC50_AVG_LIST[sample_num-1]) csv_writer_grouped.writerow([]) csv_writer_grouped.writerow(["Average EC50", "Std"]) csv_writer_grouped.writerow([average_ec50, np.std(ec50_result_list)]) csv_writer_grouped.writerow("") output_f_grouped.close() output_f_full = open("./output/" + output_directory + "/result_full.csv", "w") csv_writer_full = csv.writer(output_f_full) for line in sd_matrix: csv_writer_full.writerow(line) output_f_full.close() print("Finished")
31.651741
133
0.735618
e6b8a82e6b0282dee965fc93d3c31abaae481d21
6,492
py
Python
twisted/names/root.py
twonds/twisted
d6e270a465d371c3bed01bf369af497b77eb9f1e
[ "Unlicense", "MIT" ]
1
2021-01-27T19:11:21.000Z
2021-01-27T19:11:21.000Z
twisted/names/root.py
twonds/twisted
d6e270a465d371c3bed01bf369af497b77eb9f1e
[ "Unlicense", "MIT" ]
null
null
null
twisted/names/root.py
twonds/twisted
d6e270a465d371c3bed01bf369af497b77eb9f1e
[ "Unlicense", "MIT" ]
3
2017-01-04T01:24:15.000Z
2020-06-18T16:14:56.000Z
# -*- test-case-name: twisted.names.test.test_rootresolve -*- # Copyright (c) 2001-2009 Twisted Matrix Laboratories. # See LICENSE for details. """ Resolver implementation for querying successive authoritative servers to lookup a record, starting from the root nameservers. @author: Jp Calderone todo:: robustify it break discoverAuthority into several smaller functions documentation """ from twisted.internet import defer from twisted.names import dns from twisted.names import common def lookupNameservers(host, atServer, p=None): # print 'Nameserver lookup for', host, 'at', atServer, 'with', p if p is None: p = dns.DNSDatagramProtocol(_DummyController()) p.noisy = False return retry( (1, 3, 11, 45), # Timeouts p, # Protocol instance (atServer, dns.PORT), # Server to query [dns.Query(host, dns.NS, dns.IN)] # Question to ask ) def lookupAddress(host, atServer, p=None): # print 'Address lookup for', host, 'at', atServer, 'with', p if p is None: p = dns.DNSDatagramProtocol(_DummyController()) p.noisy = False return retry( (1, 3, 11, 45), # Timeouts p, # Protocol instance (atServer, dns.PORT), # Server to query [dns.Query(host, dns.A, dns.IN)] # Question to ask ) def extractAuthority(msg, cache): records = msg.answers + msg.authority + msg.additional nameservers = [r for r in records if r.type == dns.NS] # print 'Records for', soFar, ':', records # print 'NS for', soFar, ':', nameservers if not nameservers: return None, nameservers if not records: raise IOError("No records") for r in records: if r.type == dns.A: cache[str(r.name)] = r.payload.dottedQuad() for r in records: if r.type == dns.NS: if str(r.payload.name) in cache: return cache[str(r.payload.name)], nameservers for addr in records: if addr.type == dns.A and addr.name == r.name: return addr.payload.dottedQuad(), nameservers return None, nameservers def discoverAuthority(host, roots, cache=None, p=None): if cache is None: cache = {} rootAuths = list(roots) parts = host.rstrip('.').split('.') parts.reverse() authority = rootAuths.pop() soFar = '' for part in parts: soFar = part + '.' + soFar # print '///////', soFar, authority, p msg = defer.waitForDeferred(lookupNameservers(soFar, authority, p)) yield msg msg = msg.getResult() newAuth, nameservers = extractAuthority(msg, cache) if newAuth is not None: # print "newAuth is not None" authority = newAuth else: if nameservers: r = str(nameservers[0].payload.name) # print 'Recursively discovering authority for', r authority = defer.waitForDeferred(discoverAuthority(r, roots, cache, p)) yield authority authority = authority.getResult() # print 'Discovered to be', authority, 'for', r ## else: ## # print 'Doing address lookup for', soFar, 'at', authority ## msg = defer.waitForDeferred(lookupAddress(soFar, authority, p)) ## yield msg ## msg = msg.getResult() ## records = msg.answers + msg.authority + msg.additional ## addresses = [r for r in records if r.type == dns.A] ## if addresses: ## authority = addresses[0].payload.dottedQuad() ## else: ## raise IOError("Resolution error") # print "Yielding authority", authority yield authority discoverAuthority = defer.deferredGenerator(discoverAuthority) def bootstrap(resolver): """Lookup the root nameserver addresses using the given resolver Return a Resolver which will eventually become a C{root.Resolver} instance that has references to all the root servers that we were able to look up. """ domains = [chr(ord('a') + i) for i in range(13)] # f = lambda r: (log.msg('Root server address: ' + str(r)), r)[1] f = lambda r: r L = [resolver.getHostByName('%s.root-servers.net' % d).addCallback(f) for d in domains] d = defer.DeferredList(L) d.addCallback(lambda r: Resolver([e[1] for e in r if e[0]])) return DeferredResolver(d)
33.989529
91
0.59658
e6b8dc6f73954e378a1c4ed802de05ace9457d1e
2,056
py
Python
tools/apply_colormap_dir.py
edwardyehuang/iDS
36bde3a9e887eb7e1a8d88956cf041909ee84da4
[ "MIT" ]
null
null
null
tools/apply_colormap_dir.py
edwardyehuang/iDS
36bde3a9e887eb7e1a8d88956cf041909ee84da4
[ "MIT" ]
null
null
null
tools/apply_colormap_dir.py
edwardyehuang/iDS
36bde3a9e887eb7e1a8d88956cf041909ee84da4
[ "MIT" ]
null
null
null
# ================================================================ # MIT License # Copyright (c) 2021 edwardyehuang (https://github.com/edwardyehuang) # ================================================================ import os, sys rootpath = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)) sys.path.insert(1, rootpath) import tensorflow as tf import numpy as np from PIL import Image from absl import app from absl import flags from common_flags import FLAGS from ids.voc2012 import get_colormap as get_voc2012_colormap from ids.cityscapes_fine import get_colormap as get_cityscapes_colormap flags.DEFINE_string("input_dir", None, "input dir path") flags.DEFINE_string("output_dir", None, "output dir path") flags.DEFINE_string("colormap", "voc2012", "colormap name") flags.DEFINE_integer("ignore_label", 255, "ignore label") if __name__ == "__main__": app.run(main)
25.073171
89
0.651751
e6b94f55392b1866e86cdeb5f1344d92e8c4dea3
6,007
py
Python
EDScoutCore/JournalInterface.py
bal6765/ed-scout
0c2ee6141a5cd86a660c2319d7c4be61614b13fb
[ "MIT" ]
null
null
null
EDScoutCore/JournalInterface.py
bal6765/ed-scout
0c2ee6141a5cd86a660c2319d7c4be61614b13fb
[ "MIT" ]
null
null
null
EDScoutCore/JournalInterface.py
bal6765/ed-scout
0c2ee6141a5cd86a660c2319d7c4be61614b13fb
[ "MIT" ]
null
null
null
from inspect import signature import json import time import os import glob import logging from pathlib import Path from watchdog.observers import Observer from watchdog.observers.polling import PollingObserver from watchdog.events import PatternMatchingEventHandler from EDScoutCore.FileSystemUpdatePrompter import FileSystemUpdatePrompter default_journal_path = os.path.join(str(Path.home()), "Saved Games\\Frontier Developments\\Elite Dangerous") journal_file_pattern = "journal.*.log" logger = logging.getLogger('JournalInterface') if __name__ == '__main__': journalWatcher = JournalWatcher() journalWatcher.set_callback(ReportJournalChange) print('running') try: while True: time.sleep(1) except KeyboardInterrupt: print('done') journalWatcher.stop()
34.522989
164
0.632096
e6ba0dc97e3a9015e73a33e1fbadd9852c0606ea
1,355
py
Python
labs-python/lab9/add_files.py
xR86/ml-stuff
2a1b79408897171b78032ff2531ab6f8b18be6c4
[ "MIT" ]
3
2018-12-11T03:03:15.000Z
2020-02-11T19:38:07.000Z
labs-python/lab9/add_files.py
xR86/ml-stuff
2a1b79408897171b78032ff2531ab6f8b18be6c4
[ "MIT" ]
6
2017-05-31T20:58:32.000Z
2021-02-16T23:13:15.000Z
labs-python/lab9/add_files.py
xR86/ml-stuff
2a1b79408897171b78032ff2531ab6f8b18be6c4
[ "MIT" ]
null
null
null
import sqlite3 conn = sqlite3.connect('example.db') c = conn.cursor() import os import hashlib import time get_dir_data('./') # Save (commit) the changes conn.commit() conn.close()
22.966102
83
0.710701
e6ba0ea03b3d3e18b20568efd5fed882e88148ea
1,834
py
Python
lib/galaxy/model/migrate/versions/0073_add_ldda_to_implicit_conversion_table.py
blankenberg/galaxy-data-resource
ca32a1aafd64948f489a4e5cf88096f32391b1d9
[ "CC-BY-3.0" ]
null
null
null
lib/galaxy/model/migrate/versions/0073_add_ldda_to_implicit_conversion_table.py
blankenberg/galaxy-data-resource
ca32a1aafd64948f489a4e5cf88096f32391b1d9
[ "CC-BY-3.0" ]
1
2015-02-21T18:48:19.000Z
2015-02-27T15:50:32.000Z
lib/galaxy/model/migrate/versions/0073_add_ldda_to_implicit_conversion_table.py
blankenberg/galaxy-data-resource
ca32a1aafd64948f489a4e5cf88096f32391b1d9
[ "CC-BY-3.0" ]
3
2015-02-22T13:34:16.000Z
2020-10-01T01:28:04.000Z
""" Migration script to add 'ldda_parent_id' column to the implicitly_converted_dataset_association table. """ from sqlalchemy import * from sqlalchemy.orm import * from migrate import * from migrate.changeset import * import logging log = logging.getLogger( __name__ ) metadata = MetaData()
44.731707
134
0.741003
e6bacf59de7852cf3a5c740a8171a4aa7144b26c
4,083
py
Python
Replication Python and R Codes/Figure_6/cMCA_ESS2018_LABCON_org.py
tzuliu/Contrastive-Multiple-Correspondence-Analysis-cMCA
a59a5c36dd5d4ac04205627827e792322742462d
[ "MIT" ]
3
2020-09-25T07:11:46.000Z
2022-02-08T05:07:34.000Z
Replication Python and R Codes/Figure_6/cMCA_ESS2018_LABCON_org.py
tzuliu/Contrastive-Multiple-Correspondence-Analysis-cMCA
a59a5c36dd5d4ac04205627827e792322742462d
[ "MIT" ]
null
null
null
Replication Python and R Codes/Figure_6/cMCA_ESS2018_LABCON_org.py
tzuliu/Contrastive-Multiple-Correspondence-Analysis-cMCA
a59a5c36dd5d4ac04205627827e792322742462d
[ "MIT" ]
1
2021-02-06T16:44:44.000Z
2021-02-06T16:44:44.000Z
import pandas as pd import numpy as np import matplotlib.pyplot as plt import prince from sklearn import utils from sklearn.cluster import DBSCAN import itertools from cmca import CMCA from ccmca import CCMCA from matplotlib import rc plt.style.use('ggplot') df = pd.read_csv("./uk2018.csv") df["prtclcgb"].replace({5: 8, 9: 8, 10:8, 11:8, 12:8, 13:8, 15:8, 19:8}, inplace=True) df["prtclcgb"].replace({6: 5}, inplace=True) df["prtclcgb"].replace({7: 6}, inplace=True) df["prtclcgb"].replace({8: 7}, inplace=True) alpha = r'$ \alpha $' tableau10 = { 'teal': '#78B7B2', 'blue': '#507AA6', 'orange': '#F08E39', 'red': '#DF585C', 'green': '#5BA053', 'purple': '#AF7BA1', 'yellow': '#ECC854', 'brown': '#9A7460', 'pink': '#FD9EA9', 'gray': '#BAB0AC', 7: '#9A7460', 1: '#507AA6', 2: '#F08E39', 3: '#DF585C', 4: '#5BA053', 0: '#78B7B2', 6: '#ECC854', 5: '#AF7BA1', 8: '#FD9EA9', 9: '#BAB0AC', -1: '#BAB0AC', 99: '#BAB0AC', 'LDP': '#507AA6', 'DPJ': '#F08E39' } X_con, X_lab, X_ldp, X_snp, X_gre, X_uip, X_oth = df_to_mat(df) X = pd.concat([X_con, X_lab, X_ldp, X_snp, X_gre, X_uip, X_oth]) print(X_con.shape, X_lab.shape, X_ldp.shape, X_snp.shape, X_gre.shape, X_uip.shape, X_oth.shape, X.shape) ##Disctionay for Level and Party party = {1:"Con", 2:"Lab", 3:"LD", 4:"SNP", 5:"Green", 6:"UKIP", 7:"Other"} ##Fitting cMCA and export plots cmca = CMCA(n_components=2, copy=True, check_input=True) cmca = cmca.fit(fg=X_lab.iloc[:,0:(X_lab.shape[1]-3)], bg=X_con.iloc[:,0:(X_con.shape[1]-3)], alpha=1.5) Y_fg = np.array(cmca.transform(X_lab.iloc[:,0:(X.shape[1]-3)])) Y_bg = np.array(cmca.transform(X_con.iloc[:,0:(X.shape[1]-3)])) Y_fg_col = np.array(cmca.transform(X_lab.iloc[:,0:(X.shape[1]-3)], axis='col')) prefix_to_info = cmca.gen_prefix_to_info() f_6 = plt.figure() plt.xlim([-2.5, 2.5]) plt.ylim([-2.5, 2.5]) plt.scatter(Y_fg[:, 0], Y_fg[:, 1], c=tableau10[X_lab["prtclcgb"].iloc[0]], label=party[X_lab["prtclcgb"].iloc[0]], alpha=0.3, linewidths=0) plt.scatter(Y_bg[:, 0], Y_bg[:, 1], c=tableau10[X_con["prtclcgb"].iloc[0]], label=party[X_con["prtclcgb"].iloc[0]], alpha=0.3, linewidths=0) handles, labels = plt.gca().get_legend_handles_labels() handles = [handles[1],handles[0]] labels = ["Con","Lab"] plt.legend(handles, labels, loc="lower right", shadow=False, scatterpoints=1, fontsize=8) plt.xlabel('cPC1') plt.ylabel('cPC2') plt.title("cMCA (tg: LAB, bg: CON, " + str(alpha) + ": 1.5)") plt.show() f_6.savefig("cMCA_ESS2018_labcon_org.pdf", bbox_inches='tight')
35.198276
140
0.624051
e6bb99021b44144da731911de204a7afc66e8789
1,196
py
Python
Solutions/077.py
ruppysuppy/Daily-Coding-Problem-Solutions
37d061215a9af2ce39c51f8816c83039914c0d0b
[ "MIT" ]
70
2021-03-18T05:22:40.000Z
2022-03-30T05:36:50.000Z
Solutions/077.py
ungaro/Daily-Coding-Problem-Solutions
37d061215a9af2ce39c51f8816c83039914c0d0b
[ "MIT" ]
null
null
null
Solutions/077.py
ungaro/Daily-Coding-Problem-Solutions
37d061215a9af2ce39c51f8816c83039914c0d0b
[ "MIT" ]
30
2021-03-18T05:22:43.000Z
2022-03-17T10:25:18.000Z
""" Problem: Given a list of possibly overlapping intervals, return a new list of intervals where all overlapping intervals have been merged. The input list is not necessarily ordered in any way. For example, given [(1, 3), (5, 8), (4, 10), (20, 25)], you should return [(1, 3), (4, 10), (20, 25)]. """ from typing import List, Tuple if __name__ == "__main__": print(merge_intervals([(1, 3), (5, 8), (4, 10), (20, 25)])) print(merge_intervals([(1, 3), (5, 8), (4, 10), (20, 25), (6, 12)])) """ SPECS: TIME COMPLEXITY: O(n) SPACE COMPLEXITY: O(n) """
26
84
0.622074
e6bbb3606fdfbd374577782a243b3f2af19f5e8d
3,163
py
Python
slackbot_wems/chris/slacklib.py
wray/wems
69caedfb8906f04175196d610a1ca516db01f72a
[ "MIT" ]
4
2016-11-10T21:43:01.000Z
2017-02-24T21:36:45.000Z
slackbot_wems/chris/slacklib.py
wray/wems
69caedfb8906f04175196d610a1ca516db01f72a
[ "MIT" ]
1
2019-04-26T10:48:34.000Z
2019-05-18T15:59:35.000Z
slackbot_wems/chris/slacklib.py
wray/wems
69caedfb8906f04175196d610a1ca516db01f72a
[ "MIT" ]
8
2016-11-09T22:25:14.000Z
2019-04-26T19:53:37.000Z
import time import emoji # Put your commands here COMMAND1 = "testing testing" COMMAND2 = "roger roger" BLUEON = str("blue on") BLUEOFF = str("blue off") REDON = str("red on") REDOFF = str("red off") GREENON = str("green on") GREENOFF = str("green off") YELLOWON = str("yellow on") YELLOWOFF = str("yellow off") CLOCK = str("update clock") SCRAMBLE = str('scramble the 7') HACKER = str('hack the 7') SINGLEREADING = str('light') setup = False # Your handling code goes in this function def handle_command(command): """ Determine if the command is valid. If so, take action and return a response, if necessary. """ if not setup: setup_gpio() setup = True response = "" if command.find(COMMAND1) >= 0: response = str("Surprise!") elif command.find(COMMAND2) >= 0: response = (emoji.emojize('Python\n is\n :thumbs_up: :thumbs_up: :thumbs_up:')) # Blue LED Commands elif command.find(BLUEON) >= 0: GPIO.output(17, True) response = emoji.emojize("" + "Turning :radio_button: ON...") elif command.find(BLUEOFF) >= 0: GPIO.output(17, False) response = emoji.emojize("" + "Turning :radio_button: OFF...") # Red LED Commands elif command.find(REDON) >= 0: GPIO.output(27, True) response = emoji.emojize("" + "Turning :red_circle: ON...") elif command.find(REDOFF) >= 0: GPIO.output(27, False) response = emoji.emojize("" + "Turning :red_circle: OFF...") # Green LED Commands elif command.find(GREENON) >= 0: GPIO.output(5, True) response = emoji.emojize("" + "Turning :green_apple: ON...") elif command.find(GREENOFF) >= 0: GPIO.output(5, False) response = emoji.emojize("" + "Turning :green_apple: OFF...") # Yellow LED Commands elif command.find(YELLOWON) >= 0: GPIO.output(22, True) response = emoji.emojize("" + "Turning :sunny: ON...") elif command.find(YELLOWOFF) >= 0: GPIO.output(22, False) response = emoji.emojize("" + "Turning :sunny: OFF...") # 7 Segment Commands elif command.find(CLOCK) >= 0: print('Updating the clock!') response = segment.updateClock() elif command.find(SCRAMBLE) >= 0: print(emoji.emojize(":egg: There is nothing better than scrambled eggs! :egg:")) response = segment.scramble() elif command.find(HACKER) >= 0: print('Message') response = segment.hacker() elif command.find(SINGLEREADING) >= 0: a = lite.printReading() a = int(a) time.sleep(1) print(a) response = ('Here is what the LDR Sensor said to me: ' + str(a)) return response
26.140496
88
0.607651
e6bc053f9c92b2bf8e29c294b8627f9ea57a47fd
29
py
Python
rses/__init__.py
iScrE4m/RSES
88299f105ded8838243eab8b25ab1626c97d1179
[ "MIT" ]
1
2022-02-16T15:06:22.000Z
2022-02-16T15:06:22.000Z
rses/__init__.py
djetelina/RSES
88299f105ded8838243eab8b25ab1626c97d1179
[ "MIT" ]
null
null
null
rses/__init__.py
djetelina/RSES
88299f105ded8838243eab8b25ab1626c97d1179
[ "MIT" ]
null
null
null
# coding=utf-8 """RSES :)"""
9.666667
14
0.482759
e6bdcee8c086f35e2a59b7fc819faaf2312d18c6
89,316
py
Python
sdk/cosmos/azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/operations/_gremlin_resources_operations.py
adewaleo/azure-sdk-for-python
169457edbea5e3c5557246cfcf8bd635d528bae4
[ "MIT" ]
2
2019-08-23T21:14:00.000Z
2021-09-07T18:32:34.000Z
sdk/cosmos/azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/operations/_gremlin_resources_operations.py
adewaleo/azure-sdk-for-python
169457edbea5e3c5557246cfcf8bd635d528bae4
[ "MIT" ]
2
2021-11-03T06:10:36.000Z
2021-12-01T06:29:39.000Z
sdk/cosmos/azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/operations/_gremlin_resources_operations.py
adewaleo/azure-sdk-for-python
169457edbea5e3c5557246cfcf8bd635d528bae4
[ "MIT" ]
1
2021-05-19T02:55:10.000Z
2021-05-19T02:55:10.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse from azure.core.polling import LROPoller, NoPolling, PollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.arm_polling import ARMPolling from .. import models if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
52.538824
326
0.672242
e6bde93bee8b10728e74b15763f724d08484c86a
4,640
py
Python
homeassistant/components/tasmota/discovery.py
yura505/core
0fc5f4b0421c6c5204d3ccb562153ac3836441a9
[ "Apache-2.0" ]
null
null
null
homeassistant/components/tasmota/discovery.py
yura505/core
0fc5f4b0421c6c5204d3ccb562153ac3836441a9
[ "Apache-2.0" ]
null
null
null
homeassistant/components/tasmota/discovery.py
yura505/core
0fc5f4b0421c6c5204d3ccb562153ac3836441a9
[ "Apache-2.0" ]
null
null
null
"""Support for MQTT discovery.""" import asyncio import logging from hatasmota.discovery import ( TasmotaDiscovery, get_device_config as tasmota_get_device_config, get_entities_for_platform as tasmota_get_entities_for_platform, get_entity as tasmota_get_entity, has_entities_with_platform as tasmota_has_entities_with_platform, unique_id_from_hash, ) from homeassistant.helpers.dispatcher import async_dispatcher_send from homeassistant.helpers.typing import HomeAssistantType from .const import DOMAIN _LOGGER = logging.getLogger(__name__) SUPPORTED_PLATFORMS = [ "switch", ] ALREADY_DISCOVERED = "tasmota_discovered_components" CONFIG_ENTRY_IS_SETUP = "tasmota_config_entry_is_setup" DATA_CONFIG_ENTRY_LOCK = "tasmota_config_entry_lock" TASMOTA_DISCOVERY_DEVICE = "tasmota_discovery_device" TASMOTA_DISCOVERY_ENTITY_NEW = "tasmota_discovery_entity_new_{}" TASMOTA_DISCOVERY_ENTITY_UPDATED = "tasmota_discovery_entity_updated_{}_{}_{}_{}" def clear_discovery_hash(hass, discovery_hash): """Clear entry in ALREADY_DISCOVERED list.""" del hass.data[ALREADY_DISCOVERED][discovery_hash] def set_discovery_hash(hass, discovery_hash): """Set entry in ALREADY_DISCOVERED list.""" hass.data[ALREADY_DISCOVERED][discovery_hash] = {}
37.419355
88
0.694612
e6be7a1b7add8b9481d98005ea50f939d83dd351
15,696
py
Python
tfx/components/infra_validator/executor.py
TimoKerr/tfx
10d13d57eeac21514fed73118cb43464dada67f1
[ "Apache-2.0" ]
1
2021-05-10T10:41:06.000Z
2021-05-10T10:41:06.000Z
tfx/components/infra_validator/executor.py
TimoKerr/tfx
10d13d57eeac21514fed73118cb43464dada67f1
[ "Apache-2.0" ]
null
null
null
tfx/components/infra_validator/executor.py
TimoKerr/tfx
10d13d57eeac21514fed73118cb43464dada67f1
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TFX InfraValidator executor definition.""" import contextlib import functools import os import signal import threading import time from typing import Any, Dict, List, Optional from absl import logging from tfx import types from tfx.components.infra_validator import error_types from tfx.components.infra_validator import request_builder from tfx.components.infra_validator import serving_bins from tfx.components.infra_validator import types as iv_types from tfx.components.infra_validator.model_server_runners import kubernetes_runner from tfx.components.infra_validator.model_server_runners import local_docker_runner from tfx.dsl.components.base import base_executor from tfx.proto import infra_validator_pb2 from tfx.types import artifact_utils from tfx.types.standard_component_specs import BLESSING_KEY from tfx.types.standard_component_specs import EXAMPLES_KEY from tfx.types.standard_component_specs import MODEL_KEY from tfx.types.standard_component_specs import REQUEST_SPEC_KEY from tfx.types.standard_component_specs import SERVING_SPEC_KEY from tfx.types.standard_component_specs import VALIDATION_SPEC_KEY from tfx.utils import io_utils from tfx.utils import path_utils from tfx.utils import proto_utils from tfx.utils.model_paths import tf_serving_flavor from tensorflow_serving.apis import classification_pb2 from tensorflow_serving.apis import predict_pb2 from tensorflow_serving.apis import prediction_log_pb2 from tensorflow_serving.apis import regression_pb2 _DEFAULT_NUM_TRIES = 5 _DEFAULT_POLLING_INTERVAL_SEC = 1 _DEFAULT_MAX_LOADING_TIME_SEC = 300 _DEFAULT_MODEL_NAME = 'infra-validation-model' # Proto message keys for oneof block. _TENSORFLOW_SERVING = 'tensorflow_serving' _LOCAL_DOCKER = 'local_docker' _KUBERNETES = 'kubernetes' # Artifact property keys _BLESSED_KEY = 'blessed' _MODEL_FLAG_KEY = 'has_model' # Filename of infra blessing artifact on succeed. _BLESSED_FILENAME = 'INFRA_BLESSED' # Filename of infra blessing artifact on fail. _NOT_BLESSED_FILENAME = 'INFRA_NOT_BLESSED' def _create_model_server_runner( model_path: str, serving_binary: serving_bins.ServingBinary, serving_spec: infra_validator_pb2.ServingSpec): """Create a ModelServerRunner from a model, a ServingBinary and a ServingSpec. Args: model_path: An IV-flavored model path. (See model_path_utils.py) serving_binary: One of ServingBinary instances parsed from the `serving_spec`. serving_spec: A ServingSpec instance of this infra validation. Returns: A ModelServerRunner. """ platform = serving_spec.WhichOneof('serving_platform') if platform == 'local_docker': return local_docker_runner.LocalDockerRunner( model_path=model_path, serving_binary=serving_binary, serving_spec=serving_spec ) elif platform == 'kubernetes': return kubernetes_runner.KubernetesRunner( model_path=model_path, serving_binary=serving_binary, serving_spec=serving_spec ) else: raise NotImplementedError('Invalid serving_platform {}'.format(platform)) def _convert_to_prediction_log(request: iv_types.Request): """Try convert infra validation request to TF-Serving PredictionLog.""" if isinstance(request, classification_pb2.ClassificationRequest): return prediction_log_pb2.PredictionLog( classify_log=prediction_log_pb2.ClassifyLog(request=request)) elif isinstance(request, regression_pb2.RegressionRequest): return prediction_log_pb2.PredictionLog( regress_log=prediction_log_pb2.RegressLog(request=request)) elif isinstance(request, predict_pb2.PredictRequest): return prediction_log_pb2.PredictionLog( predict_log=prediction_log_pb2.PredictLog(request=request)) else: raise NotImplementedError( f'Cannot convert {type(request)} to PredictionLog')
39.24
83
0.73248
e6bf66183d1220ed94fa05bc46a4ec69c5cf4ba5
130
py
Python
learning_python/org/allnix/util.py
ykyang/org.allnix.python
f9d74db2db026b20e925ac40dbca7d21b3ac0b0f
[ "Apache-2.0" ]
null
null
null
learning_python/org/allnix/util.py
ykyang/org.allnix.python
f9d74db2db026b20e925ac40dbca7d21b3ac0b0f
[ "Apache-2.0" ]
null
null
null
learning_python/org/allnix/util.py
ykyang/org.allnix.python
f9d74db2db026b20e925ac40dbca7d21b3ac0b0f
[ "Apache-2.0" ]
null
null
null
def read() -> str: """Returns a string""" return "org.allnix"
16.25
32
0.6
e6bfbff8f4c4eb14d73dd394e1c8390a8c552bf9
18,474
py
Python
metr-la/model/Double_C_STTN.py
happys2333/DL-2021-fall
e110d737d1a70c8238f2de3278e6aebce07c7a66
[ "Apache-2.0" ]
1
2022-02-11T12:24:08.000Z
2022-02-11T12:24:08.000Z
metr-la/model/Double_C_STTN.py
happys2333/DL-2021-fall
e110d737d1a70c8238f2de3278e6aebce07c7a66
[ "Apache-2.0" ]
null
null
null
metr-la/model/Double_C_STTN.py
happys2333/DL-2021-fall
e110d737d1a70c8238f2de3278e6aebce07c7a66
[ "Apache-2.0" ]
null
null
null
# from folder workMETRLA # MODEL CODE # -*- coding: utf-8 -*- """ Created on Mon Sep 28 10:28:06 2020 @author: wb """ import torch import torch.nn as nn import math # from GCN_models import GCN # from One_hot_encoder import One_hot_encoder import torch.nn.functional as F import numpy as np from scipy.sparse.linalg import eigs from Param import * from torchsummary import summary DEVICE = 'cuda:1' ''' Attention ScaledDotProductAttention dk B,N,T,CattentionC=1 C ---> embedded size 32 or 64 dk = 32 328head832*8=256NIPS17 tranformerdk=64head = 8 all embeded size = 512 ''' ''' S spatial MultiHeadAttention ''' ''' T Temporal MultiHeadAttention ''' ### STBlock ### Encoder ### ST Transformer: Total Model def print_params(model_name, model): param_count = 0 for name, param in model.named_parameters(): if param.requires_grad: param_count += param.numel() print(f'{model_name}, {param_count} trainable parameters in total.') return import sys import pandas as pd if __name__ == '__main__': main() ''' 1. only Spatial Transformer PEMSBAY 12 in 12 out 2. only Temporal Transformer PEMSBAY 12 in 12 out 3. Temporal-Spatial Transformer PEMSBAY 12 in 12 out 4. C 12C B N T C=1 B,N,T,C=2 123 12 in 12 out PEMSBAY '''
36.654762
228
0.566742
e6c28ea190ebaccb28d1869f9e2a7ef2b94d001d
2,079
py
Python
tetrisanim3.py
daniel-chuang/tetris
518bd7b1fd80babc34a1da323b2f50d88c31ed4a
[ "MIT" ]
null
null
null
tetrisanim3.py
daniel-chuang/tetris
518bd7b1fd80babc34a1da323b2f50d88c31ed4a
[ "MIT" ]
null
null
null
tetrisanim3.py
daniel-chuang/tetris
518bd7b1fd80babc34a1da323b2f50d88c31ed4a
[ "MIT" ]
null
null
null
# animation for medium article from termcolor import colored import time import imageio import pyautogui pyautogui.FAILSAFE = True matrix = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 2, 2, 2, 0, 0, 0], [0, 0, 0, 0, 0, 2, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 1, 1, 1, 1, 1, 1, 1, 0], [1, 0, 1, 0, 0, 0, 0, 1, 0, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 0, 1, 0, 1, 0, 0, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] lst = set() for i in range(21): for z in range(10): for row in range(len(matrix)): if 0 not in matrix[row]: lst.add(row) if (i == 20 or i > row) and row in lst: print(colored("1 " * 10, "green")) else: for element in range(len(matrix[row])): if i == row and z == element: print(colored(matrix[row][element], "green"), end=" ", flush=False) elif matrix[row][element] == 1: print(colored(matrix[row][element], "red"), end=" ", flush=False) elif matrix[row][element] == 2: print(colored(matrix[row][element], "blue"), end=" ", flush=False) else: print(matrix[row][element], end=" ", flush=False) print("") print("") # takes a screenshot pyautogui.moveTo(338, 580, duration = 0) pyautogui.hotkey('command', 'shift', '4') pyautogui.dragTo(547, 1000, duration = 0, button = 'left')
37.8
91
0.398268
e6c3d9a702952ceabdb0472e2fb0bedbc90655bc
782
py
Python
inventories/models.py
destodasoftware/kately_api
89e4e80a93ebf8e5d2f2981d108ce5efde75d0dd
[ "MIT" ]
null
null
null
inventories/models.py
destodasoftware/kately_api
89e4e80a93ebf8e5d2f2981d108ce5efde75d0dd
[ "MIT" ]
10
2019-12-04T23:52:31.000Z
2022-02-10T08:34:15.000Z
inventories/models.py
destodasoftware/kately_api
89e4e80a93ebf8e5d2f2981d108ce5efde75d0dd
[ "MIT" ]
null
null
null
from django.db import models from products.models import Product from utils.models import Utility
27.928571
91
0.751918
e6c4f83ee3a07eb68063b52c122a3a5c692004c3
276
py
Python
hierarchical_app/views.py
stephken/Hierarchical_assessment
537219903357d97d1354a8f262badba9729fb5e0
[ "MIT" ]
null
null
null
hierarchical_app/views.py
stephken/Hierarchical_assessment
537219903357d97d1354a8f262badba9729fb5e0
[ "MIT" ]
null
null
null
hierarchical_app/views.py
stephken/Hierarchical_assessment
537219903357d97d1354a8f262badba9729fb5e0
[ "MIT" ]
null
null
null
from django.shortcuts import render from hierarchical_app.models import Folder # Create your views here.
30.666667
142
0.764493
e6c52e70a50ff76dae5fa9533aa70b45708e60ab
19,221
py
Python
bin/train_vit.py
ramizdundar/Chexpert
6a5f005f1df421538182ad8497725b78e6de29be
[ "Apache-2.0" ]
null
null
null
bin/train_vit.py
ramizdundar/Chexpert
6a5f005f1df421538182ad8497725b78e6de29be
[ "Apache-2.0" ]
null
null
null
bin/train_vit.py
ramizdundar/Chexpert
6a5f005f1df421538182ad8497725b78e6de29be
[ "Apache-2.0" ]
null
null
null
import sys import os import argparse import logging import json import time import subprocess from shutil import copyfile import numpy as np from sklearn import metrics from easydict import EasyDict as edict import torch from torch.utils.data import DataLoader import torch.nn.functional as F from torch.nn import DataParallel from vit_pytorch import ViT from tensorboardX import SummaryWriter sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../') torch.manual_seed(0) torch.cuda.manual_seed_all(0) from data.dataset import ImageDataset # noqa from model.classifier import Classifier # noqa from utils.misc import lr_schedule # noqa from model.utils import get_optimizer # noqa parser = argparse.ArgumentParser(description='Train model') parser.add_argument('cfg_path', default=None, metavar='CFG_PATH', type=str, help="Path to the config file in yaml format") parser.add_argument('save_path', default=None, metavar='SAVE_PATH', type=str, help="Path to the saved models") parser.add_argument('--num_workers', default=8, type=int, help="Number of " "workers for each data loader") parser.add_argument('--device_ids', default='0,1,2,3', type=str, help="GPU indices ""comma separated, e.g. '0,1' ") parser.add_argument('--pre_train', default=None, type=str, help="If get" "parameters from pretrained model") parser.add_argument('--resume', default=0, type=int, help="If resume from " "previous run") parser.add_argument('--logtofile', default=False, type=bool, help="Save log " "in save_path/log.txt if set True") parser.add_argument('--verbose', default=False, type=bool, help="Detail info") if __name__ == '__main__': main()
38.908907
78
0.542115
e6c580f84de62db4b9d20acb6cce98ce88761586
262
py
Python
Sets/the capaint s room.py
AndreasGeiger/hackerrank-python
a436c207e62b32f70a6b4279bb641a3c4d90e112
[ "MIT" ]
null
null
null
Sets/the capaint s room.py
AndreasGeiger/hackerrank-python
a436c207e62b32f70a6b4279bb641a3c4d90e112
[ "MIT" ]
null
null
null
Sets/the capaint s room.py
AndreasGeiger/hackerrank-python
a436c207e62b32f70a6b4279bb641a3c4d90e112
[ "MIT" ]
null
null
null
groupSize = input() groups = list(map(int,input().split(' '))) tmpArray1 = set() tmpArray2 = set() for i in groups: if i in tmpArray1: tmpArray2.discard(i) else: tmpArray1.add(i) tmpArray2.add(i) for i in tmpArray2: print(i)
18.714286
42
0.603053
e6c60d4fe212527f51e4cf099e6d8185c934aa4e
164
py
Python
tests/testsoma.py
gtmadureira/Python
38de6c56fec1d22662f30c1ff4d4f4f411678484
[ "MIT" ]
4
2020-04-10T05:48:46.000Z
2021-07-14T10:56:19.000Z
tests/testsoma.py
gtmadureira/Python
38de6c56fec1d22662f30c1ff4d4f4f411678484
[ "MIT" ]
1
2020-05-09T21:00:52.000Z
2020-05-09T21:00:52.000Z
tests/testsoma.py
gtmadureira/Python
38de6c56fec1d22662f30c1ff4d4f4f411678484
[ "MIT" ]
null
null
null
import unittest from hf_src.main import soma
20.5
42
0.743902
e6c6e8aaf6429afdb1edbeda8513d241f632fc14
6,867
py
Python
src/oictest/setup.py
rohe/oictest
f6f0800220befd5983b8cb34a5c984f98855d089
[ "Apache-2.0" ]
32
2015-01-02T20:15:17.000Z
2020-02-15T20:46:25.000Z
src/oictest/setup.py
rohe/oictest
f6f0800220befd5983b8cb34a5c984f98855d089
[ "Apache-2.0" ]
8
2015-02-23T19:48:53.000Z
2016-01-20T08:24:05.000Z
src/oictest/setup.py
rohe/oictest
f6f0800220befd5983b8cb34a5c984f98855d089
[ "Apache-2.0" ]
17
2015-01-02T20:15:22.000Z
2022-03-22T22:58:28.000Z
import copy import json from oic.utils.authn.client import CLIENT_AUTHN_METHOD from oic.utils.keyio import KeyJar from oic.utils.keyio import KeyBundle __author__ = 'roland' import logging logger = logging.getLogger(__name__) def request_and_return(conv, url, response=None, method="GET", body=None, body_type="json", state="", http_args=None, **kwargs): """ :param url: The URL to which the request should be sent :param response: Response type :param method: Which HTTP method to use :param body: A message body if any :param body_type: The format of the body of the return message :param http_args: Arguments for the HTTP _client :return: A cls or ErrorResponse instance or the HTTP response instance if no response body was expected. """ if http_args is None: http_args = {} _cli = conv._client try: _resp = _cli.http_request(url, method, data=body, **http_args) except Exception: raise conv.position = url conv.last_response = _resp conv.last_content = _resp.content if not "keyjar" in kwargs: kwargs["keyjar"] = conv.keyjar _response = _cli.parse_request_response(_resp, response, body_type, state, **kwargs) conv.protocol_response.append((_response, _resp.content)) return _response
28.6125
80
0.553371
e6c8040bae19150daa4afa3909164f31bd76f5c3
2,696
py
Python
HLTrigger/Configuration/python/HLT_75e33/modules/hltPFPuppiNoLep_cfi.py
PKUfudawei/cmssw
8fbb5ce74398269c8a32956d7c7943766770c093
[ "Apache-2.0" ]
1
2021-11-30T16:24:46.000Z
2021-11-30T16:24:46.000Z
HLTrigger/Configuration/python/HLT_75e33/modules/hltPFPuppiNoLep_cfi.py
PKUfudawei/cmssw
8fbb5ce74398269c8a32956d7c7943766770c093
[ "Apache-2.0" ]
4
2021-11-29T13:57:56.000Z
2022-03-29T06:28:36.000Z
HLTrigger/Configuration/python/HLT_75e33/modules/hltPFPuppiNoLep_cfi.py
PKUfudawei/cmssw
8fbb5ce74398269c8a32956d7c7943766770c093
[ "Apache-2.0" ]
1
2021-11-30T16:16:05.000Z
2021-11-30T16:16:05.000Z
import FWCore.ParameterSet.Config as cms hltPFPuppiNoLep = cms.EDProducer("PuppiProducer", DeltaZCut = cms.double(0.1), DeltaZCutForChargedFromPUVtxs = cms.double(0.2), EtaMaxCharged = cms.double(99999.0), EtaMaxPhotons = cms.double(2.5), EtaMinUseDeltaZ = cms.double(-1.0), MinPuppiWeight = cms.double(0.01), NumOfPUVtxsForCharged = cms.uint32(0), PUProxyValue = cms.InputTag("hltPixelClustersMultiplicity"), PtMaxCharged = cms.double(-1.0), PtMaxNeutrals = cms.double(200.0), PtMaxNeutralsStartSlope = cms.double(0.0), PtMaxPhotons = cms.double(20.0), UseDeltaZCut = cms.bool(True), UseFromPVLooseTight = cms.bool(False), algos = cms.VPSet( cms.PSet( EtaMaxExtrap = cms.double(2.0), MedEtaSF = cms.vdouble(1.0, 1.0), MinNeutralPt = cms.vdouble(0.5105, 0.821), MinNeutralPtSlope = cms.vdouble(9.51e-06, 1.902e-05), RMSEtaSF = cms.vdouble(1.0, 1.0), etaMax = cms.vdouble(2.5, 3.5), etaMin = cms.vdouble(0.0, 2.5), ptMin = cms.vdouble(0.0, 0.0), puppiAlgos = cms.VPSet(cms.PSet( algoId = cms.int32(5), applyLowPUCorr = cms.bool(True), combOpt = cms.int32(0), cone = cms.double(0.4), rmsPtMin = cms.double(0.1), rmsScaleFactor = cms.double(1.0), useCharged = cms.bool(True) )) ), cms.PSet( EtaMaxExtrap = cms.double(2.0), MedEtaSF = cms.vdouble(0.75), MinNeutralPt = cms.vdouble(3.656), MinNeutralPtSlope = cms.vdouble(5.072e-05), RMSEtaSF = cms.vdouble(1.0), etaMax = cms.vdouble(10.0), etaMin = cms.vdouble(3.5), ptMin = cms.vdouble(0.0), puppiAlgos = cms.VPSet(cms.PSet( algoId = cms.int32(5), applyLowPUCorr = cms.bool(True), combOpt = cms.int32(0), cone = cms.double(0.4), rmsPtMin = cms.double(0.5), rmsScaleFactor = cms.double(1.0), useCharged = cms.bool(False) )) ) ), applyCHS = cms.bool(True), candName = cms.InputTag("particleFlowTmp"), clonePackedCands = cms.bool(False), invertPuppi = cms.bool(False), puppiDiagnostics = cms.bool(False), puppiNoLep = cms.bool(True), useExistingWeights = cms.bool(False), useExp = cms.bool(False), usePUProxyValue = cms.bool(True), vertexName = cms.InputTag("goodOfflinePrimaryVertices"), vtxNdofCut = cms.int32(4), vtxZCut = cms.double(24) )
37.971831
65
0.563427
e6c80a99d05f2b6649c49c64c56164c81a82517f
29,212
py
Python
wizbin/build.py
RogueScholar/debreate
0abc168c51336b31ff87c61f84bc7bb6000e88f4
[ "MIT" ]
97
2016-09-16T08:44:04.000Z
2022-01-29T22:30:18.000Z
wizbin/build.py
RogueScholar/debreate
0abc168c51336b31ff87c61f84bc7bb6000e88f4
[ "MIT" ]
34
2016-09-20T00:42:45.000Z
2021-04-16T07:21:44.000Z
wizbin/build.py
RogueScholar/debreate
0abc168c51336b31ff87c61f84bc7bb6000e88f4
[ "MIT" ]
24
2016-09-16T08:44:56.000Z
2021-07-29T11:32:47.000Z
# -*- coding: utf-8 -*- ## \package wizbin.build # MIT licensing # See: docs/LICENSE.txt import commands, os, shutil, subprocess, traceback, wx from dbr.functions import FileUnstripped from dbr.language import GT from dbr.log import DebugEnabled from dbr.log import Logger from dbr.md5 import WriteMD5 from fileio.fileio import ReadFile from fileio.fileio import WriteFile from globals.bitmaps import ICON_EXCLAMATION from globals.bitmaps import ICON_INFORMATION from globals.errorcodes import dbrerrno from globals.execute import ExecuteCommand from globals.execute import GetExecutable from globals.execute import GetSystemInstaller from globals.ident import btnid from globals.ident import chkid from globals.ident import inputid from globals.ident import pgid from globals.paths import ConcatPaths from globals.paths import PATH_app from globals.strings import GS from globals.strings import RemoveEmptyLines from globals.strings import TextIsEmpty from globals.system import PY_VER_MAJ from globals.tooltips import SetPageToolTips from input.toggle import CheckBox from input.toggle import CheckBoxESS from startup.tests import UsingTest from ui.button import CreateButton from ui.checklist import CheckListDialog from ui.dialog import DetailedMessageDialog from ui.dialog import ShowErrorDialog from ui.layout import BoxSizer from ui.output import OutputLog from ui.panel import BorderedPanel from ui.progress import PD_DEFAULT_STYLE from ui.progress import ProgressDialog from ui.progress import TimedProgressDialog from ui.style import layout as lyt from wiz.helper import FieldEnabled from wiz.helper import GetField from wiz.helper import GetMainWindow from wiz.helper import GetPage from wiz.wizard import WizardPage ## Build page
28.251451
151
0.691736
e6c8ce8afe1fef7a0e2e19b44facdada82817d59
311
py
Python
__main__.py
maelstromdat/YOSHI
67e5176f24ff12e598025d4250b408da564f53d1
[ "Apache-2.0" ]
6
2017-05-07T09:39:18.000Z
2021-10-07T01:46:08.000Z
__main__.py
maelstromdat/YOSHI
67e5176f24ff12e598025d4250b408da564f53d1
[ "Apache-2.0" ]
1
2018-01-15T15:31:03.000Z
2018-01-15T15:31:03.000Z
__main__.py
maelstromdat/YOSHI
67e5176f24ff12e598025d4250b408da564f53d1
[ "Apache-2.0" ]
5
2020-02-28T04:16:16.000Z
2021-04-30T09:35:19.000Z
from YoshiViz import Gui if __name__ == '__main__': #file director gui = Gui.Gui() """ report_generator.\ generate_pdf_report(fileDirectory, repositoryName, tempCommunityType) """ print('the type of', repositoryName, 'is', tempCommunityType, '\n"check .\YoshiViz\output"')
25.916667
96
0.662379
e6c97a9ee684956ae509733d7e8dff568dd9da66
623
py
Python
hpotter/src/lazy_init.py
LarsenClose/dr.hpotter
ef6199ab563a92f3e4916277dbde9217126f36a9
[ "MIT" ]
1
2021-08-15T09:24:20.000Z
2021-08-15T09:24:20.000Z
hpotter/src/lazy_init.py
LarsenClose/dr.hpotter
ef6199ab563a92f3e4916277dbde9217126f36a9
[ "MIT" ]
18
2021-02-01T21:58:20.000Z
2021-05-24T17:10:25.000Z
hpotter/src/lazy_init.py
LarsenClose/dr.hpotter
ef6199ab563a92f3e4916277dbde9217126f36a9
[ "MIT" ]
1
2021-06-19T12:49:54.000Z
2021-06-19T12:49:54.000Z
''' Wrap an __init__ function so that I don't have to assign all the parameters to a self. variable. ''' # https://stackoverflow.com/questions/5048329/python-decorator-for-automatic-binding-init-arguments import inspect from functools import wraps def lazy_init(init): ''' Create an annotation to assign all the parameters to a self. variable. ''' arg_names = inspect.getfullargspec(init)[0] # pylint: disable=E1101 return new_init
28.318182
99
0.686998
e6cb19760623f02a584f4187adb3490f5de6005b
781
py
Python
main.py
technojam/MLian
7632c5c7d4c44b1d87de9ab23c1ed7293962ca49
[ "MIT" ]
1
2021-12-18T19:54:45.000Z
2021-12-18T19:54:45.000Z
main.py
technojam/MLian
7632c5c7d4c44b1d87de9ab23c1ed7293962ca49
[ "MIT" ]
2
2021-12-18T19:50:08.000Z
2021-12-18T19:52:20.000Z
main.py
technojam/MLian
7632c5c7d4c44b1d87de9ab23c1ed7293962ca49
[ "MIT" ]
1
2022-03-01T14:13:27.000Z
2022-03-01T14:13:27.000Z
# def register_feed(): import os import cv2 path = '/UserImage' cam = cv2.VideoCapture(0) name=input("Name: ") cv2.namedWindow("test") img_counter = 0 while True: ret, frame = cam.read() if not ret: print("failed to grab frame") break else: cv2.imshow("test", frame) k = cv2.waitKey(1) if k%256 == 27: # ESC pressed print("Escape hit, closing...") break elif k%256 == 32: # SPACE pressed # img_name = "opencv_frame_{}.png".format(img_counter) cv2.imwrite(name + ".jpg", frame) # print("{} written!".format(img_name)) print("Image Captured! Proceed...") img_counter += 1 cam.release() cv2.destroyAllWindows()
22.314286
66
0.541613
e6cb563badebdde1d425f141d7f04f5b497ea2ae
2,643
py
Python
models/train.py
Hiwyl/keras_cnn_finetune
f424302a72c8d05056a9af6f9b293003acb8398d
[ "MIT" ]
1
2019-09-30T01:07:03.000Z
2019-09-30T01:07:03.000Z
models/train.py
Hiwyl/keras_cnn_finetune
f424302a72c8d05056a9af6f9b293003acb8398d
[ "MIT" ]
null
null
null
models/train.py
Hiwyl/keras_cnn_finetune
f424302a72c8d05056a9af6f9b293003acb8398d
[ "MIT" ]
null
null
null
# -*- encoding: utf-8 -*- ''' @Author : lance @Email : [email protected] ''' import time from model_cx.inceptionresnet import inceptionresnet from model_cx.vgg19two import vgg19_all_lr from model_cx.inceptionv3 import inceptionv3 from model_cx.densenet import densenet from model_cx.nasnet import nasnet from model_cx.merge import merge from model_cx.bcnn import bilinearnet from model_cx.resnet import ResNet50 from model_cx.mobilenetv2 import mobilenetv2 from model_cx.senet import senet if __name__=="__main__": classes = 1 epochs = 100 steps_per_epoch = 113 validation_steps = 48 shape=(224,224) print("...") start = time.time() # # try: # print("densenet") # densenet(classes, epochs, steps_per_epoch, validation_steps, shape) # except Exception as e: # print(e) # try: # print("bcnn") # bilinearnet(classes, epochs, steps_per_epoch, validation_steps, shape) # # except Exception as e: # print(e) # try: # print("resnet") # ResNet50(classes, epochs, steps_per_epoch, validation_steps, shape) # except Exception as e: # print(e) try: print("merge") merge(classes, epochs, steps_per_epoch, validation_steps, shape) except Exception as e: print(e) # try: # print("ince_res") # inceptionresnet(classes, epochs, steps_per_epoch, validation_steps, (299, 299)) # # inceptionresnet(classes, epochs, steps_per_epoch, validation_steps, shape) # except Exception as e: # print(e) # try: # print("mobilenetv2") # mobilenetv2(classes, epochs, steps_per_epoch, validation_steps, shape) # except Exception as e: # print(e) # try: # print("inceptionv3") # inceptionv3(classes, epochs, steps_per_epoch, validation_steps, (299, 299)) # # inceptionv3(classes, epochs, steps_per_epoch, validation_steps, shape) # except Exception as e: # print(e) try: print("nasnet") nasnet(classes, epochs, steps_per_epoch, validation_steps, shape) except Exception as e: print(e) try: print("vgg19two") vgg19_all_lr(classes, epochs, steps_per_epoch, validation_steps, shape) except Exception as e: print(e) try: print("senet") vgg19_all_lr(classes, epochs, steps_per_epoch, validation_steps, (100,100)) except Exception as e: print(e) end = time.time() print("ETA:", (end - start) / 3600)
31.094118
90
0.623156
e6cb633a5c540a02c577994bd8b8eebe64755249
3,275
py
Python
src/probnum/randprocs/markov/integrator/_preconditioner.py
alpiges/probnum
2e4153cb0df559984e09ec74487ef6c9d3f6d464
[ "MIT" ]
null
null
null
src/probnum/randprocs/markov/integrator/_preconditioner.py
alpiges/probnum
2e4153cb0df559984e09ec74487ef6c9d3f6d464
[ "MIT" ]
40
2021-04-12T07:56:29.000Z
2022-03-28T00:18:18.000Z
src/probnum/randprocs/markov/integrator/_preconditioner.py
alpiges/probnum
2e4153cb0df559984e09ec74487ef6c9d3f6d464
[ "MIT" ]
null
null
null
"""Coordinate changes in state space models.""" import abc try: # cached_property is only available in Python >=3.8 from functools import cached_property except ImportError: from cached_property import cached_property import numpy as np import scipy.special # for vectorised factorial from probnum import config, linops, randvars
34.114583
101
0.685802
e6cc468eac9d6881bb54cbc2d585ee21f2641f3f
2,345
py
Python
allauth/socialaccount/providers/linkedin/provider.py
mina-gaid/scp
38e1cd303d4728a987df117f666ce194e241ed1a
[ "MIT" ]
1
2018-04-06T21:36:59.000Z
2018-04-06T21:36:59.000Z
allauth/socialaccount/providers/linkedin/provider.py
mina-gaid/scp
38e1cd303d4728a987df117f666ce194e241ed1a
[ "MIT" ]
6
2020-06-05T18:44:19.000Z
2022-01-13T00:48:56.000Z
allauth/socialaccount/providers/linkedin/provider.py
mina-gaid/scp
38e1cd303d4728a987df117f666ce194e241ed1a
[ "MIT" ]
1
2022-02-01T17:19:28.000Z
2022-02-01T17:19:28.000Z
from allauth.socialaccount import providers from allauth.socialaccount.providers.base import ProviderAccount from allauth.socialaccount.providers.oauth.provider import OAuthProvider from allauth.socialaccount import app_settings providers.registry.register(LinkedInProvider)
34.485294
78
0.594456
e6ccbdf212404d1bb840cdf710923204e7c1baa5
4,744
py
Python
game2048/myNew.py
CCTQL/2048-api
a75316a90e9a7c8c9171e39e1d1fc24cbac3ba1a
[ "Apache-2.0" ]
null
null
null
game2048/myNew.py
CCTQL/2048-api
a75316a90e9a7c8c9171e39e1d1fc24cbac3ba1a
[ "Apache-2.0" ]
null
null
null
game2048/myNew.py
CCTQL/2048-api
a75316a90e9a7c8c9171e39e1d1fc24cbac3ba1a
[ "Apache-2.0" ]
null
null
null
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import datasets from torch.autograd import Variable from sklearn.model_selection import train_test_split import time import pandas as pd import numpy as np import csv batch_size = 128 NUM_EPOCHS = 30 LR = 0.001 TIME_STEP = 4 #----------------------------------------------- csv_data = pd.read_csv('./drive/My Drive/DATA.csv') csv_data = csv_data.values A = csv_data.shape[0] board_data = csv_data[:,0:16] # X = np.log2(X) X = torch.FloatTensor(board_data) X = np.int64(board_data) # X = np.reshape(X, (-1,4,4)) XT = X.transpose(0,2,1) X = np.concatenate((X,XT),axis=1) print(X.shape) direction_data = csv_data[:,16] Y = np.int64(direction_data) #------------------------------------------------------- X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2,shuffle=False) X_train = torch.FloatTensor(X_train) X_test = torch.FloatTensor(X_test) Y_train = torch.LongTensor(Y_train) Y_test = torch.LongTensor(Y_test) train_dataset = torch.utils.data.TensorDataset(X_train,Y_train) # test_dataset = torch.utils.data.TensorDataset(X_test,Y_test) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True ) # test_loader = torch.utils.data.DataLoader(dataset=test_dataset, # batch_size=batch_size, # shuffle=False # ) batch_size = 128 NUM_EPOCHS = 30 LR = 0.001 TIME_STEP = 4 #----------------------------------------------- csv_data = pd.read_csv('./drive/My Drive/DATA.csv') csv_data = csv_data.values A = csv_data.shape[0] board_data = csv_data[:,0:16] # X = np.log2(X) X = torch.FloatTensor(board_data) X = np.int64(board_data) # X = np.reshape(X, (-1,4,4)) XT = X.transpose(0,2,1) X = np.concatenate((X,XT),axis=1) print(X.shape) direction_data = csv_data[:,16] Y = np.int64(direction_data) model = CCRNN() model = model.cuda() optimizer = optim.Adam(model.parameters(), lr = 0.001) if __name__ == '__main__': for epoch in range(0, NUM_EPOCHS): train(epoch)
27.421965
87
0.572513