hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
11 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
251
max_stars_repo_name
stringlengths
4
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
sequencelengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
251
max_issues_repo_name
stringlengths
4
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
sequencelengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
251
max_forks_repo_name
stringlengths
4
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
sequencelengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.05M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.04M
alphanum_fraction
float64
0
1
867bbaa4b747400e8e0dce95ef2502b3a1d6e3df
188
py
Python
app/helpers/geocode.py
Soumya117/finnazureflaskapp
794f82596a329ff1a2e4dc23d49903a0ef474f95
[ "MIT" ]
null
null
null
app/helpers/geocode.py
Soumya117/finnazureflaskapp
794f82596a329ff1a2e4dc23d49903a0ef474f95
[ "MIT" ]
2
2021-03-31T20:43:02.000Z
2021-12-13T20:13:40.000Z
app/helpers/geocode.py
Soumya117/finnparser
e89ff6e1a0c08b57a1b2f971d5f7bb888c2f4a05
[ "MIT" ]
null
null
null
import googlemaps gmaps = googlemaps.Client(key='google_key')
20.888889
52
0.75
868018c92dba01d6288623e8f84851ac57ade115
3,427
py
Python
tf/estimators/keras_estimator.py
aspratyush/dl_utils
c067831f3c72aba88223c231c7fbc249d997e222
[ "Apache-2.0" ]
null
null
null
tf/estimators/keras_estimator.py
aspratyush/dl_utils
c067831f3c72aba88223c231c7fbc249d997e222
[ "Apache-2.0" ]
null
null
null
tf/estimators/keras_estimator.py
aspratyush/dl_utils
c067831f3c72aba88223c231c7fbc249d997e222
[ "Apache-2.0" ]
null
null
null
from __future__ import absolute_import from __future__ import division from __future__ import print_function # Imports import os import numpy as np import tensorflow as tf def run(model, X, Y, optimizer=None, nb_epochs=30, nb_batches=128): """ Run the estimator """ if optimizer is None: optimizer = tf.keras.estimators.SGD( lr=0.0009, decay=1e-5, momentum=0.9, nesterov=True) # 1. Compile the model model.compile( optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) # 2. Create an estimator model_est = tf.keras.estimator.model_to_estimator( keras_model=model, model_dir='./lenet') # Training # 3a. Create the training function train_input_fn = tf.estimator.inputs.numpy_input_fn( x={model.input_names[0]: X['train'].astype(np.float32)}, y=Y['train'].astype(np.float32), batch_size=nb_batches, num_epochs=nb_epochs, shuffle=True ) # 3b. Train the model model_est.train(input_fn=train_input_fn, steps=nb_epochs*nb_batches) # Evaluate # 4a. Evaluate the model eval_input_fn = tf.estimator.inputs.numpy_input_fn( x={model.input_names[0]: X['test'].astype(np.float32)}, y=Y['test'].astype(np.float32), batch_size=nb_batches, num_epochs=nb_epochs, shuffle=True ) # 4b. Evaluate the model model_eval = model_est.evaluate(input_fn=eval_input_fn) print(model_eval) return model_est, model_eval def run_from_generator( model, input_func=None, input_func_dict=None, eval_func_dict=None, nb_epochs=10, optimizer=None, model_dir=None): """ Overloaded function to create an estimator using tf.data.Dataset :param model : uncompiled keras model :param input_fn : input function providing tf.data.Dataset to the estimator :param input_fn_dict : dictionary containing input params for input_fn :param eval_fn_dict : dictionary containing params for eval input_fn :param model_dir : directory to store the trained model """ # 1. Create optimizer and compile model if optimizer is None if (optimizer is None): optimizer = tf.keras.optimizers.SGD( lr=1e-3, decay=1e-5, momentum=0.9, nesterov=True) # 2. compile the model model.compile( optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) # 3. create estimator dir_path = os.path.join(os.getcwd(), model_dir) print("Model path chosen : ", dir_path) if (not os.path.exists(dir_path)): os.mkdir(dir_path) print("Creating estimator...") est = tf.keras.estimator.model_to_estimator( keras_model=model, model_dir=dir_path) # 4. Train and Evaluate the model print("Training...") # training spec train_spec = tf.estimator.TrainSpec(input_fn=lambda: input_func(input_func_dict), max_steps=500) # evaluation spec eval_spec = tf.estimator.EvalSpec(input_fn=lambda: input_func(eval_func_dict)) # Run the training model_est = tf.estimator.train_and_evaluate(est, train_spec, eval_spec) #est.train(input_fn=lambda: input_func(input_func_dict), # steps=None) # #est.evalute(input_fn=lambda: input_func(eval_func_dict)) return est
31.731481
85
0.66443
8681c3c33618ecd1ae623aef1502da24ff44d7f8
15,279
py
Python
isign/archive.py
l0ui3/isign
c0730ac1ce1b32defe8c6016e19b9701184b0f5a
[ "Apache-2.0" ]
1
2020-03-24T14:22:17.000Z
2020-03-24T14:22:17.000Z
isign/archive.py
l0ui3/isign
c0730ac1ce1b32defe8c6016e19b9701184b0f5a
[ "Apache-2.0" ]
null
null
null
isign/archive.py
l0ui3/isign
c0730ac1ce1b32defe8c6016e19b9701184b0f5a
[ "Apache-2.0" ]
1
2021-08-16T04:03:25.000Z
2021-08-16T04:03:25.000Z
""" Represents an app archive. This is an app at rest, whether it's a naked app bundle in a directory, or a zipped app bundle, or an IPA. We have a common interface to extract these apps to a temp file, then resign them, and create an archive of the same type """ import abc import biplist from bundle import App, Bundle, is_info_plist_native from exceptions import MissingHelpers, NotSignable, NotMatched from distutils import spawn import logging import os from os.path import abspath, dirname, exists, isdir, isfile, join, normpath import tempfile import re from subprocess import call from signer import Signer import shutil import zipfile REMOVE_WATCHKIT = True helper_paths = {} log = logging.getLogger(__name__) def get_helper(helper_name): """ find paths to executables. Cached in helper_paths """ if helper_name not in helper_paths or helper_paths[helper_name] is None: # note, find_executable returns None is not found # in other words, we keep retrying until found helper_paths[helper_name] = spawn.find_executable(helper_name) log.debug("got executable {} for {}".format(helper_paths[helper_name], helper_name)) return helper_paths[helper_name] def get_watchkit_paths(root_bundle_path): """ collect sub-bundles of this bundle that have watchkit """ # typical structure: # # app_bundle # ... # some_directory # watchkit_extension <-- this is the watchkit bundle # Info.plist # watchkit_bundle <-- this is the part that runs on the Watch # Info.plist <-- WKWatchKitApp=True # watchkit_paths = [] for path, _, _ in os.walk(root_bundle_path): if path == root_bundle_path: continue try: bundle = Bundle(path) except NotMatched: # this directory is not a bundle continue if bundle.info.get('WKWatchKitApp') is True: # get the *containing* bundle watchkit_paths.append(dirname(path)) return watchkit_paths def process_watchkit(root_bundle_path, should_remove=False): """ Unfortunately, we currently can't sign WatchKit. If you don't care about watchkit functionality, it is generally harmless to remove it, so that's the default. Remove when https://github.com/saucelabs/isign/issues/20 is fixed """ watchkit_paths = get_watchkit_paths(root_bundle_path) if len(watchkit_paths) > 0: if should_remove: for path in watchkit_paths: log.warning("Removing WatchKit bundle {}".format(path)) shutil.rmtree(path) else: raise NotSignable("Cannot yet sign WatchKit bundles") def unarchive_to_temp(self): containing_dir = make_temp_dir() log.debug("unarchiving to temp... %s -> %s", self.path, containing_dir) shutil.rmtree(containing_dir) # quirk of copytree, top dir can't exist already shutil.copytree(self.path, containing_dir) process_watchkit(containing_dir, REMOVE_WATCHKIT) return UncompressedArchive(containing_dir, '.', self.__class__) class AppZipArchive(Archive): """ Just like an app, except it's zipped up, and when repackaged, should be re-zipped. """ app_dir_pattern = r'^([^/]+\.app/).*$' extensions = ['.zip'] helpers = ['zip', 'unzip'] def __init__(self, path): self.path = path zipfile_obj = zipfile.ZipFile(path) self.relative_bundle_dir = self.find_bundle_dir(zipfile_obj) self.bundle_info = self.get_info(self.relative_bundle_dir, zipfile_obj) def unarchive_to_temp(self): containing_dir = make_temp_dir() call([get_helper('unzip'), "-qu", self.path, "-d", containing_dir]) app_dir = abspath(join(containing_dir, self.relative_bundle_dir)) process_watchkit(app_dir, REMOVE_WATCHKIT) return UncompressedArchive(containing_dir, self.relative_bundle_dir, self.__class__) def archive_factory(path): """ Guess what kind of archive we are dealing with, return an archive object. Returns None if path did not match any archive type """ archive = None for cls in [IpaArchive, AppZipArchive, AppArchive]: if cls.precheck(path): archive = cls(path) log.debug("File %s matched as %s", path, cls.__name__) break return archive def view(input_path): if not exists(input_path): raise IOError("{0} not found".format(input_path)) ua = None bundle_info = None try: archive = archive_factory(input_path) if archive is None: raise NotMatched('No matching archive type found') ua = archive.unarchive_to_temp() bundle_info = ua.bundle.info finally: if ua is not None: ua.remove() return bundle_info def resign(input_path, certificate, key, apple_cert, provisioning_profile, output_path, info_props=None, alternate_entitlements_path=None): """ Unified interface to extract any kind of archive from a temporary file, resign it with these credentials, and create a similar archive for that resigned app """ if not exists(input_path): raise IOError("{0} not found".format(input_path)) log.debug('Signing with apple_cert: {}'.format(apple_cert)) log.debug('Signing with key: {}'.format(key)) log.debug('Signing with certificate: {}'.format(certificate)) log.debug('Signing with provisioning_profile: {}'.format(provisioning_profile)) signer = Signer(signer_cert_file=certificate, signer_key_file=key, apple_cert_file=apple_cert) ua = None bundle_info = None try: archive = archive_factory(input_path) if archive is None: raise NotSignable('No matching archive type found') ua = archive.unarchive_to_temp() if info_props: # Override info.plist props of the parent bundle ua.bundle.update_info_props(info_props) ua.bundle.resign(signer, provisioning_profile, alternate_entitlements_path) bundle_info = ua.bundle.info ua.archive(output_path) except NotSignable as e: msg = "Not signable: <{0}>: {1}\n".format(input_path, e) log.info(msg) raise finally: if ua is not None: ua.remove() return bundle_info
37.540541
97
0.638458
8681dc9beb7ce1fcfe008337221dc6feb16aedb5
1,888
py
Python
conan/tools/env/virtualrunenv.py
dscole/conan
ff7b8e6703e8407773968517d68424b9ec59aa30
[ "MIT" ]
null
null
null
conan/tools/env/virtualrunenv.py
dscole/conan
ff7b8e6703e8407773968517d68424b9ec59aa30
[ "MIT" ]
1
2019-06-07T03:02:02.000Z
2019-06-07T03:02:02.000Z
conan/tools/env/virtualrunenv.py
dscole/conan
ff7b8e6703e8407773968517d68424b9ec59aa30
[ "MIT" ]
1
2021-08-20T19:47:51.000Z
2021-08-20T19:47:51.000Z
from conan.tools.env import Environment def runenv_from_cpp_info(conanfile, cpp_info): """ return an Environment deducing the runtime information from a cpp_info """ dyn_runenv = Environment(conanfile) if cpp_info is None: # This happens when the dependency is a private one = BINARY_SKIP return dyn_runenv if cpp_info.bin_paths: # cpp_info.exes is not defined yet dyn_runenv.prepend_path("PATH", cpp_info.bin_paths) # If it is a build_require this will be the build-os, otherwise it will be the host-os if cpp_info.lib_paths: dyn_runenv.prepend_path("LD_LIBRARY_PATH", cpp_info.lib_paths) dyn_runenv.prepend_path("DYLD_LIBRARY_PATH", cpp_info.lib_paths) if cpp_info.framework_paths: dyn_runenv.prepend_path("DYLD_FRAMEWORK_PATH", cpp_info.framework_paths) return dyn_runenv
37.76
94
0.697034
868508f96e8daab66111c1b9c301708733421f4d
652
py
Python
src/api/api_lists/models/list.py
rrickgauer/lists
371de6af332789ef386392fd24857702794d05a6
[ "Apache-2.0" ]
null
null
null
src/api/api_lists/models/list.py
rrickgauer/lists
371de6af332789ef386392fd24857702794d05a6
[ "Apache-2.0" ]
67
2021-12-14T22:30:56.000Z
2022-03-15T18:28:27.000Z
src/api/api_lists/models/list.py
rrickgauer/lists
371de6af332789ef386392fd24857702794d05a6
[ "Apache-2.0" ]
null
null
null
""" ********************************************************************************** List model ********************************************************************************** """ from enum import Enum from dataclasses import dataclass from uuid import UUID from datetime import datetime
23.285714
82
0.460123
86852eaa353d7f71b07181e8c40327dffa81fc7f
12,985
py
Python
config/appdaemon/apps/power_alarm.py
azogue/hassio_config
591f158794c173d6391179ab2f52348d58c49aad
[ "MIT" ]
18
2018-07-22T15:19:20.000Z
2022-01-09T20:57:43.000Z
config/appdaemon/apps/power_alarm.py
azogue/hassio_config
591f158794c173d6391179ab2f52348d58c49aad
[ "MIT" ]
1
2020-04-30T08:33:36.000Z
2020-05-03T08:25:00.000Z
config/appdaemon/apps/power_alarm.py
azogue/hassio_config
591f158794c173d6391179ab2f52348d58c49aad
[ "MIT" ]
8
2018-07-21T09:29:53.000Z
2021-11-10T19:06:32.000Z
# -*- coding: utf-8 -*- """ Automation task as a AppDaemon App for Home Assistant - current meter PEAK POWER notifications """ import datetime as dt from enum import IntEnum import appdaemon.plugins.hass.hassapi as hass LOG_LEVEL = "INFO" LOG_LEVEL_ALERT = "WARNING" LOGGER = "special_event_log" COEF_CRITICAL_LIMIT = 1.1 # 10% over limit MIN_TIME_TURN_OFF_AC = 60 # secs # Big power consumers BIG_CONSUMER_1_CLIMATE = "switch.ac_dry_contact" BIG_CONSUMER_1_LABEL = "aire acondicionado" BIG_CONSUMER_2 = "switch.calentador" BIG_CONSUMER_2_LABEL = "calentador" _IOS_SOUND_POWER_PEAK = "US-EN-Morgan-Freeman-Vacate-The-Premises.wav" # noinspection PyClassHasNoInit
35.285326
79
0.5196
86857d42e426b63b37d2aa71caa37b9b57dd862e
13,391
py
Python
mailmynet/Maildir/proxy_postfix/Twisted-11.0.0/build/lib.linux-x86_64-2.6/twisted/internet/gtk2reactor.py
SPIN-UMass/SWEET
1b0f39222e7064f70812e3293ca023619295741d
[ "MIT" ]
3
2020-04-02T06:23:44.000Z
2020-08-13T20:32:31.000Z
mailmynet/Maildir/proxy_postfix/Twisted-11.0.0/twisted/internet/gtk2reactor.py
SPIN-UMass/SWEET
1b0f39222e7064f70812e3293ca023619295741d
[ "MIT" ]
null
null
null
mailmynet/Maildir/proxy_postfix/Twisted-11.0.0/twisted/internet/gtk2reactor.py
SPIN-UMass/SWEET
1b0f39222e7064f70812e3293ca023619295741d
[ "MIT" ]
1
2020-04-02T06:26:10.000Z
2020-04-02T06:26:10.000Z
# -*- test-case-name: twisted.internet.test -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ This module provides support for Twisted to interact with the glib/gtk2 mainloop. In order to use this support, simply do the following:: | from twisted.internet import gtk2reactor | gtk2reactor.install() Then use twisted.internet APIs as usual. The other methods here are not intended to be called directly. When installing the reactor, you can choose whether to use the glib event loop or the GTK+ event loop which is based on it but adds GUI integration. """ # System Imports import sys, signal from zope.interface import implements try: if not hasattr(sys, 'frozen'): # Don't want to check this for py2exe import pygtk pygtk.require('2.0') except (ImportError, AttributeError): pass # maybe we're using pygtk before this hack existed. import gobject if hasattr(gobject, "threads_init"): # recent versions of python-gtk expose this. python-gtk=2.4.1 # (wrapping glib-2.4.7) does. python-gtk=2.0.0 (wrapping # glib-2.2.3) does not. gobject.threads_init() # Twisted Imports from twisted.python import log, runtime, failure from twisted.python.compat import set from twisted.internet.interfaces import IReactorFDSet from twisted.internet import main, base, posixbase, error, selectreactor POLL_DISCONNECTED = gobject.IO_HUP | gobject.IO_ERR | gobject.IO_NVAL # glib's iochannel sources won't tell us about any events that we haven't # asked for, even if those events aren't sensible inputs to the poll() # call. INFLAGS = gobject.IO_IN | POLL_DISCONNECTED OUTFLAGS = gobject.IO_OUT | POLL_DISCONNECTED def install(useGtk=True): """ Configure the twisted mainloop to be run inside the gtk mainloop. @param useGtk: should glib rather than GTK+ event loop be used (this will be slightly faster but does not support GUI). """ reactor = Gtk2Reactor(useGtk) from twisted.internet.main import installReactor installReactor(reactor) return reactor def portableInstall(useGtk=True): """ Configure the twisted mainloop to be run inside the gtk mainloop. """ reactor = PortableGtkReactor() from twisted.internet.main import installReactor installReactor(reactor) return reactor if runtime.platform.getType() != 'posix': install = portableInstall __all__ = ['install']
33.394015
92
0.633933
86864787ee128fda8f0e696df8fc12952938543c
4,356
py
Python
run_mod.py
fpl-analytics/gr_crypto
2b0ab451c9c205a9f572c4bca23fffbb68ca188f
[ "MIT" ]
null
null
null
run_mod.py
fpl-analytics/gr_crypto
2b0ab451c9c205a9f572c4bca23fffbb68ca188f
[ "MIT" ]
null
null
null
run_mod.py
fpl-analytics/gr_crypto
2b0ab451c9c205a9f572c4bca23fffbb68ca188f
[ "MIT" ]
null
null
null
""" Setup: - Import Libraries - Setup tf on multiple cores - Import Data """ import pandas as pd import numpy as np import tensorflow as tf import seaborn as sns from time import time import multiprocessing import random import os from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, LSTM, ConvLSTM2D, Flatten from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor from joblib import dump, load from mod.prep import log_return, log_return_np, preprocess from mod.model import return_pred from mod.eval import evaluate_regression, evaluate_up_down cores = multiprocessing.cpu_count() tf.config.threading.set_inter_op_parallelism_threads(cores-1) root_folder = "data" wide_close = pd.read_csv(root_folder + "/working/wide_close.csv") wide_target = pd.read_csv(root_folder + "/working/wide_target.csv") asset_details = pd.read_csv(root_folder + "/asset_details.csv") assets = [str(i) for i in asset_details["Asset_ID"]] """ Preprocess """ close_returns = wide_close[assets].apply(log_return) close_returns["time"] = wide_close["time"] close_returns[assets] = close_returns[assets].replace([np.inf,-np.inf],np.nan) """ Linear Regression """ x_steps, y_steps = 60, [1, 15] col_in, col_out = "1", "1" train_x, test_x, train_y, test_y, time_d = preprocess(data_in = wide_close, col_in, col_out, time_col="time", x_steps, y_steps) # 1 step lr_1 = LinearRegression() lr_1.fit(train_x.reshape(-1, x_steps), train_y[:,0,:].reshape(-1, 1)) true, pred = return_pred(test_x, test_y[:,0,:], lr_1) evaluate_regression(true, pred) evaluate_up_down(true, pred) # 15 step lr_15 = LinearRegression() lr_15.fit(train_x.reshape(-1, x_steps), train_y[:,1,:].reshape(-1, 1)) true, pred = return_pred(test_x, test_y[:,1,:], lr_1) evaluate_regression(true, pred) evaluate_up_down(true, pred) """ calculate and store components seperately process: - first, get rolling values for each timestamp - then, predict 1 and 15 gaps and store in array """ # Production """ Steps: - Get train, val test and test indices. Importantly, this needs to cover all assets (even though not all assets exist) for the whole time period. - Build models """ assets = list(asset_details["Asset_ID"].astype(str)) # Get indexes i = np.select( [ (wide_close.index >= 0) & (wide_close.index <= (len(wide_close)*0.7)), (wide_close.index > (len(wide_close)*0.7)) & (wide_close.index <= (len(wide_close)*0.8)) ], ["train", "val"], default = "test") indexes = pd.DataFrame({"time":wide_close["time"], "set":i}) for a in assets: print("asset", a) filt = indexes["set"][~pd.isna(wide_close[a])] counts = filt.value_counts() df = pd.DataFrame({"counts":counts, "pct":counts/np.sum(counts)}) print(df, "\n\n") indexes_d = {} for s in indexes["set"].unique(): indexes_d[s] = indexes["time"][indexes["set"] == s] mkdir "model_files" mkdir "model_files/linear_regression" for a in assets: print("Asset", a) x_steps, y_steps = 60, [1, 16] cols_in, cols_out = a, a train_x, test_x, train_y, test_y, time_d = preprocess(wide_close, cols_in, cols_out, "time", x_steps, y_steps) # 1 step lr_1 = LinearRegression() lr_1.fit(train_x.reshape(-1, x_steps), train_y[:,0,:].reshape(-1, 1)) true, pred = return_pred(test_x, test_y[:,0,:], lr_1) print("Model 1 Metrics") evaluate_regression(true, pred) evaluate_up_down(true, pred) # 16 step lr_16 = LinearRegression() lr_16.fit(train_x.reshape(-1, x_steps), train_y[:,1,:].reshape(-1, 1)) true, pred = return_pred(test_x, test_y[:,1,:], lr_16) print("Model 16 Metrics") evaluate_regression(true, pred) evaluate_up_down(true, pred) dump(lr_1, f"model_files/linear_regression/lr_{a}_1") dump(lr_16, f"model_files/linear_regression/lr_{a}_16") dump(time_d, "model_files/linear_regression/lr_times") """ Random Forest """ rf = RandomForestRegressor(n_jobs=-1) # start = time.time() rf.fit(train_x.reshape(-1, x_steps), train_y.reshape(-1)) # print("Took:", round(start-time.time()))
25.623529
93
0.677456
8687420e46b4f12f33134641d5dcf6986b995994
4,012
py
Python
bot.py
menlen/one
e24f1489d98faa9b548ebd668f2860c8d671b489
[ "Apache-2.0" ]
null
null
null
bot.py
menlen/one
e24f1489d98faa9b548ebd668f2860c8d671b489
[ "Apache-2.0" ]
null
null
null
bot.py
menlen/one
e24f1489d98faa9b548ebd668f2860c8d671b489
[ "Apache-2.0" ]
null
null
null
# This example show how to use inline keyboards and process button presses import telebot import time from telebot.types import InlineKeyboardMarkup, InlineKeyboardButton import os, sys from PIL import Image, ImageDraw, ImageFont import random TELEGRAM_TOKEN = '1425859530:AAF5MQE87Zg_bv3B2RLe3Vl2A5rMz6vYpsA' bot = telebot.TeleBot(TELEGRAM_TOKEN) channelId = -1001390673326 user_dict = {} bot.polling(none_stop=True)
30.861538
181
0.592722
86878499d4795f1de654ad30bc93467d3e84cd3c
261
py
Python
novice/python-unit-testing/answers/test_rectangle2.py
Southampton-RSG/2019-03-13-southampton-swc
1f07d82c1bd1f237a19fa7a17bb4765e0364dc88
[ "CC-BY-4.0" ]
1
2021-06-20T11:51:37.000Z
2021-06-20T11:51:37.000Z
novice/python-unit-testing/answers/test_rectangle2.py
Southampton-RSG/2019-03-13-southampton-swc
1f07d82c1bd1f237a19fa7a17bb4765e0364dc88
[ "CC-BY-4.0" ]
1
2019-09-30T21:15:32.000Z
2019-09-30T21:15:32.000Z
novice/python-unit-testing/answers/test_rectangle2.py
Southampton-RSG/2019-03-13-southampton-swc
1f07d82c1bd1f237a19fa7a17bb4765e0364dc88
[ "CC-BY-4.0" ]
null
null
null
from rectangle2 import rectangle_area
23.727273
47
0.67433
8687b3a09f22f924b0323932d63831d4c09242d4
3,081
py
Python
tests/requestreply.py
unclechu/py-radio-class
8f96d8bcb398693d18a4ebd732415a879047edee
[ "MIT" ]
null
null
null
tests/requestreply.py
unclechu/py-radio-class
8f96d8bcb398693d18a4ebd732415a879047edee
[ "MIT" ]
null
null
null
tests/requestreply.py
unclechu/py-radio-class
8f96d8bcb398693d18a4ebd732415a879047edee
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from unittest import TestCase, TestLoader from radio import (Radio, ListenerNotFound, ReplyHandlerAlreadyBound, HandlerAlreadyBound) suite = TestLoader().loadTestsFromTestCase(TestRadioRequestReplyMethods)
28.794393
75
0.627069
86880b5b73b7634f999e8879e1b07c2360a00ae8
6,256
py
Python
tests/unit/types/message/test_message.py
Immich/jina
1f5f7cf4d82029d76ab41df157526fe6f6e0da50
[ "Apache-2.0" ]
1
2021-02-25T19:28:50.000Z
2021-02-25T19:28:50.000Z
tests/unit/types/message/test_message.py
Immich/jina
1f5f7cf4d82029d76ab41df157526fe6f6e0da50
[ "Apache-2.0" ]
4
2020-09-01T17:47:27.000Z
2021-04-16T23:11:57.000Z
tests/unit/types/message/test_message.py
Immich/jina
1f5f7cf4d82029d76ab41df157526fe6f6e0da50
[ "Apache-2.0" ]
null
null
null
import sys from typing import Sequence import pytest from jina import Request, QueryLang, Document from jina.clients.request import request_generator from jina.proto import jina_pb2 from jina.proto.jina_pb2 import EnvelopeProto from jina.types.message import Message from jina.types.request import _trigger_fields from tests import random_docs
32.082051
111
0.634431
8688ffb74b8525b0642bda0ae7e8535617771ade
243
py
Python
tabnine-vim/third_party/ycmd/ycmd/tests/python/testdata/project/settings_extra_conf.py
MrMonk3y/vimrc
950230fb3fd7991d1234c2ab516ec03245945677
[ "MIT" ]
10
2020-07-21T21:59:54.000Z
2021-07-19T11:01:47.000Z
tabnine-vim/third_party/ycmd/ycmd/tests/python/testdata/project/settings_extra_conf.py
MrMonk3y/vimrc
950230fb3fd7991d1234c2ab516ec03245945677
[ "MIT" ]
null
null
null
tabnine-vim/third_party/ycmd/ycmd/tests/python/testdata/project/settings_extra_conf.py
MrMonk3y/vimrc
950230fb3fd7991d1234c2ab516ec03245945677
[ "MIT" ]
1
2021-01-30T18:17:01.000Z
2021-01-30T18:17:01.000Z
import os import sys DIR_OF_THIS_SCRIPT = os.path.abspath( os.path.dirname( __file__ ) )
20.25
69
0.691358
868928ba8707be83d375694167add597b520a225
616
py
Python
SmartCache/sim/Utilities/setup.py
Cloud-PG/smart-cache
467987abece3fd4830fd615288046359761229f8
[ "Apache-2.0" ]
1
2019-10-13T09:05:24.000Z
2019-10-13T09:05:24.000Z
SmartCache/sim/Utilities/setup.py
Cloud-PG/smart-cache
467987abece3fd4830fd615288046359761229f8
[ "Apache-2.0" ]
null
null
null
SmartCache/sim/Utilities/setup.py
Cloud-PG/smart-cache
467987abece3fd4830fd615288046359761229f8
[ "Apache-2.0" ]
1
2019-05-16T11:53:38.000Z
2019-05-16T11:53:38.000Z
from distutils.core import setup setup( name='utils', version='1.0.0', author='Mirco Tracolli', author_email='[email protected]', packages=[ 'utils', ], scripts=[], url='https://github.com/Cloud-PG/smart-cache', license='Apache 2.0 License', description='Utils for the SmartCache project', long_description="To do...", install_requires=open("requirements.txt").read(), classifier=[ "Operating System :: POSIX :: Linux", "License :: OSI Approved :: Apache 2.0 License", "Programming Language :: Python :: 3 :: Only" ] )
26.782609
56
0.61039
868a5177cfe7a43dcc855371fdd275a394644658
2,074
py
Python
homeassistant/components/eight_sleep/binary_sensor.py
liangleslie/core
cc807b4d597daaaadc92df4a93c6e30da4f570c6
[ "Apache-2.0" ]
2
2020-01-03T17:06:33.000Z
2020-01-13T18:57:32.000Z
homeassistant/components/eight_sleep/binary_sensor.py
liangleslie/core
cc807b4d597daaaadc92df4a93c6e30da4f570c6
[ "Apache-2.0" ]
1,016
2019-06-18T21:27:47.000Z
2020-03-06T11:09:58.000Z
homeassistant/components/eight_sleep/binary_sensor.py
liangleslie/core
cc807b4d597daaaadc92df4a93c6e30da4f570c6
[ "Apache-2.0" ]
null
null
null
"""Support for Eight Sleep binary sensors.""" from __future__ import annotations import logging from pyeight.eight import EightSleep from homeassistant.components.binary_sensor import ( BinarySensorDeviceClass, BinarySensorEntity, ) from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType from homeassistant.helpers.update_coordinator import DataUpdateCoordinator from . import EightSleepBaseEntity from .const import DATA_API, DATA_HEAT, DOMAIN _LOGGER = logging.getLogger(__name__)
28.805556
81
0.700579
868c6ea160dd2c056e7da123714e1987646a86cf
9,215
py
Python
ravem/tests/util_test.py
bpedersen2/indico-plugins-cern
c4f06d11d981c316fc8de2892758484deb58e2f5
[ "MIT" ]
null
null
null
ravem/tests/util_test.py
bpedersen2/indico-plugins-cern
c4f06d11d981c316fc8de2892758484deb58e2f5
[ "MIT" ]
null
null
null
ravem/tests/util_test.py
bpedersen2/indico-plugins-cern
c4f06d11d981c316fc8de2892758484deb58e2f5
[ "MIT" ]
null
null
null
# This file is part of the CERN Indico plugins. # Copyright (C) 2014 - 2022 CERN # # The CERN Indico plugins are free software; you can redistribute # them and/or modify them under the terms of the MIT License; see # the LICENSE file for more details. from unittest.mock import MagicMock import pytest from requests.exceptions import HTTPError, Timeout from indico.testing.util import extract_logs from indico_ravem.plugin import RavemPlugin from indico_ravem.util import has_access, ravem_api_call
39.549356
112
0.703527
868d3680b0c2bd4371570ee9b629404359f69eee
1,220
py
Python
apps/organization/urls.py
stormsha/StormOnline
10983b7a9ee09958927731ee3fd74178d7534ff6
[ "Apache-2.0" ]
18
2018-03-16T07:11:01.000Z
2021-11-18T08:42:11.000Z
apps/organization/urls.py
stormsha/StormOnline
10983b7a9ee09958927731ee3fd74178d7534ff6
[ "Apache-2.0" ]
1
2018-03-15T11:40:25.000Z
2018-03-15T11:40:25.000Z
apps/organization/urls.py
stormsha/StormOnline
10983b7a9ee09958927731ee3fd74178d7534ff6
[ "Apache-2.0" ]
13
2018-03-16T07:11:05.000Z
2020-06-23T09:27:49.000Z
# _*_ coding: utf-8 _*_ # --------------------------- __author__ = 'StormSha' __date__ = '2018/3/28 18:01' # --------------------------- # -------------------------django---------------------- from django.conf.urls import url from .views import OrgView, AddUserAskView, OrgHomeView, OrgCourseView, OrgDescView, OrgTeacherView, AddFavView from .views import TeacherListView, TeacherDetailView urlpatterns = [ url(r'^list/$', OrgView.as_view(), name="org_list"), url(r'^add_ask/$', AddUserAskView.as_view(), name="add_ask"), url(r'^home/(?P<org_id>\d+)/$', OrgHomeView.as_view(), name="org_home"), url(r'^course/(?P<org_id>\d+)/$', OrgCourseView.as_view(), name="org_course"), url(r'^desc/(?P<org_id>\d+)/$', OrgDescView.as_view(), name="org_desc"), url(r'^org_teacher/(?P<org_id>\d+)/$', OrgTeacherView.as_view(), name="org_teacher"), # --------------------------------------- url(r'^add_fav/$', AddFavView.as_view(), name="add_fav"), # -----------------------teacher------------------------------ url(r'^teacher/list/$', TeacherListView.as_view(), name="teacher_list"), url(r'^teacher/detail/(?P<teacher_id>\d+)/$', TeacherDetailView.as_view(), name="teacher_detail") ]
48.8
111
0.566393
868dd694341f559c01703d972c3b261cb6620ffe
571
py
Python
tech_project/lib/python2.7/site-packages/filer/migrations/0010_auto_20180414_2058.py
priyamshah112/Project-Descripton-Blog
8e01016c6be79776c4f5ca75563fa3daa839e39e
[ "MIT" ]
null
null
null
tech_project/lib/python2.7/site-packages/filer/migrations/0010_auto_20180414_2058.py
priyamshah112/Project-Descripton-Blog
8e01016c6be79776c4f5ca75563fa3daa839e39e
[ "MIT" ]
11
2019-11-02T20:57:52.000Z
2020-09-27T09:08:33.000Z
tech_project/lib/python2.7/site-packages/filer/migrations/0010_auto_20180414_2058.py
priyamshah112/Project-Descripton-Blog
8e01016c6be79776c4f5ca75563fa3daa839e39e
[ "MIT" ]
4
2018-08-07T17:13:48.000Z
2019-06-13T11:09:32.000Z
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion
27.190476
187
0.670753
868dd7b75196bf80f589754ce91dc36872de638a
12,166
py
Python
SLHCUpgradeSimulations/Configuration/python/aging.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
852
2015-01-11T21:03:51.000Z
2022-03-25T21:14:00.000Z
SLHCUpgradeSimulations/Configuration/python/aging.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
30,371
2015-01-02T00:14:40.000Z
2022-03-31T23:26:05.000Z
SLHCUpgradeSimulations/Configuration/python/aging.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
3,240
2015-01-02T05:53:18.000Z
2022-03-31T17:24:21.000Z
import FWCore.ParameterSet.Config as cms # handle normal mixing or premixing # change assumptions about lumi rate # turnon = True enables default, False disables # recalibration and darkening always together # needs lumi to set proper ZS thresholds (tbd)
44.40146
176
0.715025
868de8e68215f41c7a22fbffe3549ae81cd16557
10,106
py
Python
xml_parser.py
cbschaff/nlimb
f0564b00bab1b3367aaa88163e49bebc88f349bb
[ "MIT" ]
12
2018-10-26T19:33:05.000Z
2022-01-17T11:47:59.000Z
xml_parser.py
cbschaff/nlimb
f0564b00bab1b3367aaa88163e49bebc88f349bb
[ "MIT" ]
9
2020-01-28T22:30:55.000Z
2022-03-11T23:32:04.000Z
xml_parser.py
cbschaff/nlimb
f0564b00bab1b3367aaa88163e49bebc88f349bb
[ "MIT" ]
3
2019-07-09T14:56:01.000Z
2019-11-18T06:58:41.000Z
import numpy as np import xml.etree.ElementTree as ET if __name__ == '__main__': robot = MuJoCoXmlRobot('mujoco_assets/hopper.xml') params = list(1.0 * np.array(robot.get_params())) robot.update(params, 'mujoco_assets/hopper_test.xml') assert robot.get_params() == params #assert robot.get_height() == 1.31 print(robot.get_param_limits()) print(robot.get_param_names()) robot = MuJoCoXmlRobot('mujoco_assets/walker2d.xml') params = [.4,.04,.5,.05,.55,.055,.6,.06,.5,.05,.55,.055,.6,.06] robot.update(params, 'mujoco_assets/walker2d_test.xml') assert robot.get_params() == params assert robot.get_height() == 1.31 print(robot.get_param_limits()) print(robot.get_param_names()) robot = MuJoCoXmlRobot('mujoco_assets/ant.xml') params = [.2, .2,.06,.2,.06,.4,.06, .2,.06,.2,.06,.4,.06, .2,.06,.2,.06,.4,.06, .2,.06,.2,.06,.4,.06] robot.update(params, 'mujoco_assets/ant_test.xml') assert robot.get_params() == params assert robot.get_height() == .2 print(robot.get_param_limits()) print(robot.get_param_names()) robot = MuJoCoXmlRobot('mujoco_assets/humanoid.xml') params = list(.8 * np.array(robot.get_params())) robot.update(params, 'mujoco_assets/humanoid_test.xml') assert robot.get_params() == params print(robot.get_height()) #assert robot.get_height() == .6085 print(robot.get_param_limits()) print(robot.get_param_names()) import gym, roboschool env = gym.make("RoboschoolHopper-v1") env.unwrapped.model_xml = 'mujoco_assets/hopper_test.xml' env.reset() #env.render() import os from scipy.misc import imsave import subprocess as sp outdir = 'xml_vid' os.makedirs(outdir, exist_ok=True) i = 0 for _ in range(10): env.reset() for _ in range(100): env.step(env.action_space.sample()) rgb = env.render('rgb_array') imsave(os.path.join(outdir, '{:05d}.png'.format(i)), rgb) i+=1 sp.call(['ffmpeg', '-r', '60', '-f', 'image2', '-i', os.path.join(outdir, '%05d.png'), '-vcodec', 'libx264', '-pix_fmt', 'yuv420p', os.path.join(outdir, 'out.mp4')]) env.close()
33.574751
169
0.608351
868e31d3b6d09c73dfd001c290d85e56d3f9bb45
672
py
Python
app/http/middleware/LoadUserMiddleware.py
josephmancuso/masonite-forum
a91c7386f3e0b02b0ac71623eb295a7543cb60fd
[ "MIT" ]
11
2018-07-08T17:44:28.000Z
2020-03-02T10:45:37.000Z
app/http/middleware/LoadUserMiddleware.py
josephmancuso/masonite-forum
a91c7386f3e0b02b0ac71623eb295a7543cb60fd
[ "MIT" ]
2
2018-07-21T07:49:09.000Z
2019-05-29T14:34:42.000Z
app/http/middleware/LoadUserMiddleware.py
josephmancuso/masonite-forum
a91c7386f3e0b02b0ac71623eb295a7543cb60fd
[ "MIT" ]
5
2018-07-12T02:36:14.000Z
2020-04-05T21:10:30.000Z
''' Load User Middleware''' from masonite.facades.Auth import Auth
29.217391
74
0.644345
868eb825011495edcc58f16f21e6e75e8ab1abc6
22,691
py
Python
src/unittest/python/merciful_elo_limit_tests.py
mgaertne/minqlx-plugin-tests
10a827fe063c86481560dcc00a8a3ce2ba60861b
[ "BSD-3-Clause" ]
4
2017-11-01T04:49:27.000Z
2020-08-08T12:11:51.000Z
src/unittest/python/merciful_elo_limit_tests.py
mgaertne/minqlx-plugin-tests
10a827fe063c86481560dcc00a8a3ce2ba60861b
[ "BSD-3-Clause" ]
null
null
null
src/unittest/python/merciful_elo_limit_tests.py
mgaertne/minqlx-plugin-tests
10a827fe063c86481560dcc00a8a3ce2ba60861b
[ "BSD-3-Clause" ]
1
2021-04-26T09:04:36.000Z
2021-04-26T09:04:36.000Z
from minqlx_plugin_test import * import logging import unittest from mockito import * from mockito.matchers import * from hamcrest import * from redis import Redis from merciful_elo_limit import *
44.932673
117
0.677493
868f0e1cedcadcbc2d277dd9469765ca291fed6d
689
py
Python
meiduo_mall/celery_tasks/sms/tasks.py
Vent-Any/meiduo_mall_cangku
5b3b7f029be267cb5d2d3666f99be166d27213f1
[ "MIT" ]
null
null
null
meiduo_mall/celery_tasks/sms/tasks.py
Vent-Any/meiduo_mall_cangku
5b3b7f029be267cb5d2d3666f99be166d27213f1
[ "MIT" ]
null
null
null
meiduo_mall/celery_tasks/sms/tasks.py
Vent-Any/meiduo_mall_cangku
5b3b7f029be267cb5d2d3666f99be166d27213f1
[ "MIT" ]
null
null
null
from ronglian_sms_sdk import SmsSDK from celery_tasks.main import app # # celerytask # celery(main)
32.809524
65
0.703919
868f63854096baf68c5ff8cc2009603138d30b30
5,195
py
Python
delphiIDE.py
JeisonJHA/Plugins-Development
cccb58908eed6114c569e53d5710e70b8d53f5c5
[ "MIT" ]
null
null
null
delphiIDE.py
JeisonJHA/Plugins-Development
cccb58908eed6114c569e53d5710e70b8d53f5c5
[ "MIT" ]
null
null
null
delphiIDE.py
JeisonJHA/Plugins-Development
cccb58908eed6114c569e53d5710e70b8d53f5c5
[ "MIT" ]
null
null
null
import sublime_plugin
26.237374
99
0.609047
86920ec1c0159b8548b81683e13e218d1875aaf1
33,860
py
Python
python/test_pip_package.py
syt123450/tfjs-converter
a90fa59a44d9425beb7b1584fe753c62d62bbc4d
[ "Apache-2.0" ]
null
null
null
python/test_pip_package.py
syt123450/tfjs-converter
a90fa59a44d9425beb7b1584fe753c62d62bbc4d
[ "Apache-2.0" ]
null
null
null
python/test_pip_package.py
syt123450/tfjs-converter
a90fa59a44d9425beb7b1584fe753c62d62bbc4d
[ "Apache-2.0" ]
null
null
null
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test the Python API and shell binary of the tensorflowjs pip package.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import glob import json import os import shutil import subprocess import sys import tempfile import unittest import numpy as np import tensorflow as tf from tensorflow import keras from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import tensor_spec from tensorflow.python.ops import variables from tensorflow.python.training.tracking import tracking from tensorflow.python.saved_model.save import save import tensorflow_hub as hub import tensorflowjs as tfjs def _createKerasModel(layer_name_prefix, h5_path=None): """Create a Keras model for testing. Args: layer_name_prefix: A prefix string for layer names. This helps avoid clashes in layer names between different test methods. h5_path: Optional string path for a HDF5 (.h5) file to save the model in. Returns: An instance of keras.Model. """ input_tensor = keras.layers.Input((3, )) dense1 = keras.layers.Dense( 4, use_bias=True, kernel_initializer='ones', bias_initializer='zeros', name=layer_name_prefix + '1')(input_tensor) output = keras.layers.Dense( 2, use_bias=False, kernel_initializer='ones', name=layer_name_prefix + '2')(dense1) model = keras.models.Model(inputs=[input_tensor], outputs=[output]) if h5_path: model.save(h5_path) return model def _createTensorFlowSavedModelV1(name_scope, save_path): """Create a TensorFlow SavedModel for testing. Args: name_scope: Name scope to create the model under. This helps avoid op and variable name clashes between different test methods. save_path: The directory path in which to save the model. """ graph = tf.Graph() with graph.as_default(): with tf.compat.v1.name_scope(name_scope): x = tf.compat.v1.constant([[37.0, -23.0], [1.0, 4.0]]) w = tf.compat.v1.get_variable('w', shape=[2, 2]) y = tf.compat.v1.matmul(x, w) output = tf.compat.v1.nn.softmax(y) init_op = w.initializer # Create a builder. builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(save_path) with tf.compat.v1.Session() as sess: # Run the initializer on `w`. sess.run(init_op) builder.add_meta_graph_and_variables( sess, [tf.compat.v1.saved_model.tag_constants.SERVING], signature_def_map={ "serving_default": tf.compat.v1.saved_model.signature_def_utils.predict_signature_def( inputs={"x": x}, outputs={"output": output}) }, assets_collection=None) builder.save() def _createTensorFlowSavedModel(name_scope, save_path): """Create a TensorFlow SavedModel for testing. Args: name_scope: Name scope to create the model under. This helps avoid op and variable name clashes between different test methods. save_path: The directory path in which to save the model. """ input_data = constant_op.constant(1., shape=[1]) root = tracking.AutoTrackable() root.v1 = variables.Variable(3.) root.v2 = variables.Variable(2.) root.f = def_function.function(lambda x: root.v1 * root.v2 * x) to_save = root.f.get_concrete_function(input_data) save(root, save_path, to_save) def _create_hub_module(save_path): """Create a TensorFlow Hub module for testing. Args: save_path: The directory path in which to save the model. """ # Module function that doubles its input. graph = tf.Graph() with graph.as_default(): spec = hub.create_module_spec(double_module_fn) m = hub.Module(spec) # Export the module. with tf.compat.v1.Session(graph=graph) as sess: sess.run(tf.compat.v1.global_variables_initializer()) m.export(save_path, sess) class ConvertTfKerasSavedModelTest(tf.test.TestCase): if __name__ == '__main__': tf.test.main()
38.741419
88
0.674778
869310d00b7b8dcf8e18a56efd569e5ae8396471
7,195
py
Python
script.ezclean/resources/lib/modules/skinz.py
rrosajp/script.ezclean
ed6fbe6441713a3c96ce15a595cdd5c69291355f
[ "MIT" ]
5
2019-03-12T23:10:48.000Z
2021-05-06T05:31:26.000Z
script.ezclean/resources/lib/modules/skinz.py
rrosajp/script.ezclean-1
ed6fbe6441713a3c96ce15a595cdd5c69291355f
[ "MIT" ]
3
2019-03-17T21:53:29.000Z
2019-04-22T16:44:38.000Z
script.ezclean/resources/lib/modules/skinz.py
rrosajp/script.ezclean-1
ed6fbe6441713a3c96ce15a595cdd5c69291355f
[ "MIT" ]
4
2019-03-17T21:17:19.000Z
2020-03-30T12:45:33.000Z
# -*- coding: UTF-8 -*- import os, re, shutil, time, xbmc from resources.lib.modules import control try: import json as simplejson except: import simplejson ADDONS = os.path.join(control.HOMEPATH, 'addons')
46.720779
254
0.580542
86939231df10a74a6b6c8263b5d61c5806d7e19e
10,360
py
Python
pyhap/characteristic.py
bdraco/HAP-python
a2a5ce109d08af2f4f5bda4075f2176a98123806
[ "Apache-2.0" ]
null
null
null
pyhap/characteristic.py
bdraco/HAP-python
a2a5ce109d08af2f4f5bda4075f2176a98123806
[ "Apache-2.0" ]
null
null
null
pyhap/characteristic.py
bdraco/HAP-python
a2a5ce109d08af2f4f5bda4075f2176a98123806
[ "Apache-2.0" ]
null
null
null
""" All things for a HAP characteristic. A Characteristic is the smallest unit of the smart home, e.g. a temperature measuring or a device status. """ import logging from pyhap.const import ( HAP_PERMISSION_READ, HAP_REPR_DESC, HAP_REPR_FORMAT, HAP_REPR_IID, HAP_REPR_MAX_LEN, HAP_REPR_PERM, HAP_REPR_TYPE, HAP_REPR_VALID_VALUES, HAP_REPR_VALUE, ) from .util import hap_type_to_uuid, uuid_to_hap_type logger = logging.getLogger(__name__) # ### HAP Format ### HAP_FORMAT_BOOL = "bool" HAP_FORMAT_INT = "int" HAP_FORMAT_FLOAT = "float" HAP_FORMAT_STRING = "string" HAP_FORMAT_ARRAY = "array" HAP_FORMAT_DICTIONARY = "dictionary" HAP_FORMAT_UINT8 = "uint8" HAP_FORMAT_UINT16 = "uint16" HAP_FORMAT_UINT32 = "uint32" HAP_FORMAT_UINT64 = "uint64" HAP_FORMAT_DATA = "data" HAP_FORMAT_TLV8 = "tlv8" HAP_FORMAT_DEFAULTS = { HAP_FORMAT_BOOL: False, HAP_FORMAT_INT: 0, HAP_FORMAT_FLOAT: 0.0, HAP_FORMAT_STRING: "", HAP_FORMAT_ARRAY: "", HAP_FORMAT_DICTIONARY: "", HAP_FORMAT_UINT8: 0, HAP_FORMAT_UINT16: 0, HAP_FORMAT_UINT32: 0, HAP_FORMAT_UINT64: 0, HAP_FORMAT_DATA: "", HAP_FORMAT_TLV8: "", } HAP_FORMAT_NUMERICS = ( HAP_FORMAT_INT, HAP_FORMAT_FLOAT, HAP_FORMAT_UINT8, HAP_FORMAT_UINT16, HAP_FORMAT_UINT32, HAP_FORMAT_UINT64, ) # ### HAP Units ### HAP_UNIT_ARC_DEGREE = "arcdegrees" HAP_UNIT_CELSIUS = "celsius" HAP_UNIT_LUX = "lux" HAP_UNIT_PERCENTAGE = "percentage" HAP_UNIT_SECONDS = "seconds" # ### Properties ### PROP_FORMAT = "Format" PROP_MAX_VALUE = "maxValue" PROP_MIN_STEP = "minStep" PROP_MIN_VALUE = "minValue" PROP_PERMISSIONS = "Permissions" PROP_UNIT = "unit" PROP_VALID_VALUES = "ValidValues" PROP_NUMERIC = (PROP_MAX_VALUE, PROP_MIN_VALUE, PROP_MIN_STEP, PROP_UNIT)
33.099042
86
0.643629
8693c080676cb2787d00c99c4612bc9e39e2bff8
1,767
py
Python
configs/_base_/datasets/uniter/vqa_dataset_uniter.py
linxi1158/iMIX
af87a17275f02c94932bb2e29f132a84db812002
[ "Apache-2.0" ]
23
2021-06-26T08:45:19.000Z
2022-03-02T02:13:33.000Z
configs/_base_/datasets/uniter/vqa_dataset_uniter.py
XChuanLee/iMIX
99898de97ef8b45462ca1d6bf2542e423a73d769
[ "Apache-2.0" ]
null
null
null
configs/_base_/datasets/uniter/vqa_dataset_uniter.py
XChuanLee/iMIX
99898de97ef8b45462ca1d6bf2542e423a73d769
[ "Apache-2.0" ]
9
2021-06-10T02:36:20.000Z
2021-11-09T02:18:16.000Z
dataset_type = 'UNITER_VqaDataset' data_root = '/home/datasets/mix_data/UNITER/VQA/' train_datasets = ['train'] test_datasets = ['minival'] # name not in use, but have defined one to run vqa_cfg = dict( train_txt_dbs=[ data_root + 'vqa_train.db', data_root + 'vqa_trainval.db', data_root + 'vqa_vg.db', ], train_img_dbs=[ data_root + 'coco_train2014/', data_root + 'coco_val2014', data_root + 'vg/', ], val_txt_db=data_root + 'vqa_devval.db', val_img_db=data_root + 'coco_val2014/', ans2label_file=data_root + 'ans2label.json', max_txt_len=60, conf_th=0.2, max_bb=100, min_bb=10, num_bb=36, train_batch_size=20480, # 5120, val_batch_size=40960, # 10240, ) BUCKET_SIZE = 8192 train_data = dict( samples_per_gpu=vqa_cfg['train_batch_size'], workers_per_gpu=4, pin_memory=True, batch_sampler=dict( type='TokenBucketSampler', bucket_size=BUCKET_SIZE, batch_size=vqa_cfg['train_batch_size'], drop_last=True, size_multiple=8, ), data=dict( type=dataset_type, datacfg=vqa_cfg, train_or_val=True, ), ) test_data = dict( samples_per_gpu=vqa_cfg['val_batch_size'], workers_per_gpu=4, batch_sampler=dict( type='TokenBucketSampler', bucket_size=BUCKET_SIZE, batch_size=vqa_cfg['val_batch_size'], drop_last=False, size_multiple=8, ), pin_memory=True, data=dict( type=dataset_type, datacfg=vqa_cfg, train_or_val=False, ), ) post_processor = dict( type='Evaluator', metrics=[dict(type='UNITER_AccuracyMetric')], dataset_converters=[dict(type='UNITER_DatasetConverter')], )
24.205479
75
0.63837
869425b882c792777c4c9df4c4e4ede390b45006
752
py
Python
students/k3340/laboratory_works/laboratory_works/Arlakov_Denis/laboratiry_work_2_and_3/lab/django-react-ecommerce-master/home/urls.py
TonikX/ITMO_ICT_-WebProgramming_2020
ba566c1b3ab04585665c69860b713741906935a0
[ "MIT" ]
10
2020-03-20T09:06:12.000Z
2021-07-27T13:06:02.000Z
students/k3340/laboratory_works/laboratory_works/Arlakov_Denis/laboratiry_work_2_and_3/lab/django-react-ecommerce-master/home/urls.py
TonikX/ITMO_ICT_-WebProgramming_2020
ba566c1b3ab04585665c69860b713741906935a0
[ "MIT" ]
134
2020-03-23T09:47:48.000Z
2022-03-12T01:05:19.000Z
students/k3340/laboratory_works/laboratory_works/Arlakov_Denis/laboratiry_work_2_and_3/lab/django-react-ecommerce-master/home/urls.py
TonikX/ITMO_ICT_-WebProgramming_2020
ba566c1b3ab04585665c69860b713741906935a0
[ "MIT" ]
71
2020-03-20T12:45:56.000Z
2021-10-31T19:22:25.000Z
from django.conf import settings from django.conf.urls.static import static from django.contrib import admin from django.urls import path, include, re_path from django.views.generic import TemplateView urlpatterns = [ path('api-auth/', include('rest_framework.urls')), path('rest-auth/', include('rest_auth.urls')), path('rest-auth/registration/', include('rest_auth.registration.urls')), path('admin/', admin.site.urls), path('api/', include('core.api.urls')), ] if settings.DEBUG: urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) if not settings.DEBUG: urlpatterns += [re_path(r'^.*', TemplateView.as_view(template_name='index.html'))]
32.695652
78
0.678191
869447ef2f6a217e512b23fd0c00a4c4fa0f87a0
22,881
py
Python
20200416_Socialmail/mailserverUi.py
karta1782310/python-docx-automated-report-generation
f0e02a50a9e9547d131e583be0711aad72f08b51
[ "MIT" ]
null
null
null
20200416_Socialmail/mailserverUi.py
karta1782310/python-docx-automated-report-generation
f0e02a50a9e9547d131e583be0711aad72f08b51
[ "MIT" ]
null
null
null
20200416_Socialmail/mailserverUi.py
karta1782310/python-docx-automated-report-generation
f0e02a50a9e9547d131e583be0711aad72f08b51
[ "MIT" ]
null
null
null
#!/bin/bash # -*- coding: UTF-8 -*- # from PyQt5.QtWebEngineWidgets import QWebEngineView from PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget, QGridLayout, QMessageBox, QFileDialog, QLabel, QLineEdit, QPushButton, QComboBox, QCheckBox, QDateTimeEdit, QTextEdit, QTabWidget, QTableWidget, QTableWidgetItem, QHeaderView) from PyQt5.QtGui import QPalette, QColor, QBrush from PyQt5.QtCore import Qt, QDateTime from pyqtgraph import GraphicsLayoutWidget, setConfigOption, setConfigOptions import qdarkstyle, sys import mylibrary.genmail as gm from GenAndSendMail import insert_send_mail from server.database import Database from server.sendmail import Smtp from server.client import Client from email import generator from pandas import DataFrame from copy import deepcopy def main(): app = QApplication(sys.argv) gui = MailserverUi() gui.show() sys.exit(app.exec_()) if __name__ == '__main__': main()
41.75365
151
0.617193
86947b6782d353d9c52f3c8165971a18131a9c5c
3,869
py
Python
nntools/layers/corrmm.py
317070/nntools
00e2865b1f8246254b3adc22c37989a8b77718d5
[ "MIT" ]
null
null
null
nntools/layers/corrmm.py
317070/nntools
00e2865b1f8246254b3adc22c37989a8b77718d5
[ "MIT" ]
null
null
null
nntools/layers/corrmm.py
317070/nntools
00e2865b1f8246254b3adc22c37989a8b77718d5
[ "MIT" ]
null
null
null
""" GpuCorrMM-based convolutional layers """ import numpy as np import theano import theano.tensor as T from theano.sandbox.cuda.basic_ops import gpu_contiguous from theano.sandbox.cuda.blas import GpuCorrMM from .. import init from .. import nonlinearities from . import base # base class for all layers that rely on GpuCorrMM directly
37.931373
135
0.632205
8695dfaa20d2f47de4c76bf66aa5cf33728fae34
3,292
py
Python
tests/python/correctness/simple_test_aux_index.py
dubey/weaver
56a42fd2d0bbb14867ba792ca5461d16310a7387
[ "BSD-3-Clause" ]
163
2015-01-02T03:51:38.000Z
2022-03-21T23:06:39.000Z
tests/python/correctness/simple_test_aux_index.py
dubey/weaver
56a42fd2d0bbb14867ba792ca5461d16310a7387
[ "BSD-3-Clause" ]
1
2015-04-08T23:17:06.000Z
2015-04-24T15:25:26.000Z
tests/python/correctness/simple_test_aux_index.py
dubey/weaver
56a42fd2d0bbb14867ba792ca5461d16310a7387
[ "BSD-3-Clause" ]
20
2015-02-17T19:24:05.000Z
2020-10-29T01:59:18.000Z
#! /usr/bin/env python # # =============================================================== # Description: Sanity check for fresh install. # # Created: 2014-08-12 16:42:52 # # Author: Ayush Dubey, [email protected] # # Copyright (C) 2013, Cornell University, see the LICENSE file # for licensing agreement # =============================================================== # import sys try: import weaver.client as client except ImportError: import client config_file='' if len(sys.argv) > 1: config_file = sys.argv[1] # create client object c = client.Client('128.84.167.220', 2002, config_file) # check aux index assert c.aux_index() # 1. create node for user ayush c.begin_tx() c.create_node('ayush') c.set_node_properties({'type': 'user', 'age': '25'}, 'ayush') c.end_tx() # 2. create node for user egs c.begin_tx() c.create_node('egs') c.set_node_property('type', 'user', 'egs') c.end_tx() # 3. ayush follows egs c.begin_tx() c.create_edge('ayush', 'egs', 'e1') c.set_edge_property(edge='e1', key='type', value='follows') c.create_edge('egs', 'ayush', 'e2') c.set_edge_property(edge='e2', key='type', value='followed_by') c.end_tx() # 4. add a post and restrict visibility to followers only c.begin_tx() c.create_node('post') c.set_node_property('type', 'post', 'post') c.set_node_property('visibility', 'followers', 'post') e3 = c.create_edge('egs', 'post') c.set_edge_property(edge=e3, key='type', value='posted') c.end_tx() # 5. 'like' the post c.begin_tx() e4 = c.create_edge('post', 'ayush') c.set_edge_property(edge=e4, key='type', value='liked_by') c.end_tx() # 6. list all the people who like egs's post return_nodes = c.traverse('egs', {'type': 'user'}).out_edge({'type': 'posted'}).node({'type': 'post'}).out_edge({'type': 'liked_by'}).node({'type': 'user'}).execute() assert len(return_nodes) == 1, 'traversal returned incorrect #nodes' assert 'ayush' in return_nodes, 'traversal returned bad node handle' # 7. try to create node with same handle as before c.begin_tx() c.create_node('ayush') try: c.end_tx() assert False, 'create node passed' except client.WeaverError: pass # 8. try to create edge with same handle as before c.begin_tx() c.create_edge('ayush', 'egs', 'e1') try: c.end_tx() assert False, 'create edge passed' except client.WeaverError: pass # 9. add auxiliary handles to nodes c.begin_tx() c.add_alias('ad688', 'ayush') c.add_alias('el33th4x0r', 'egs') c.end_tx() # 10. list all the people who like egs's post # this time with aliases instead of handles return_nodes = c.traverse('el33th4x0r', {'type': 'user'}).out_edge({'type': 'posted'}).node({'type': 'post'}).out_edge({'type': 'liked_by'}).node({'type': 'user'}).execute() assert len(return_nodes) == 1, 'traversal returned incorrect #nodes' assert 'ayush' in return_nodes, 'traversal returned bad node handle' # 11. get node and check it is valid ad = c.get_node('ayush') assert 'ad688' in ad.aliases assert 'type' in ad.properties assert 'user' in ad.properties['type'] assert 'age' in ad.properties assert '25' in ad.properties['age'] assert 'e1' in ad.out_edges print 'Correctly executed 11 transactions of varying complexity, pass simple_test.' print 'Success, you have a working Weaver setup!'
28.626087
173
0.663123
8696b91ed345a9efbc515a25e28bfe35f30846c8
3,831
py
Python
ldtools/helpers.py
dmr/Ldtools
9cc5474404a07bd4b7ad756d31306dfc37a39c7b
[ "BSD-2-Clause" ]
3
2015-12-24T15:18:46.000Z
2022-02-09T06:56:40.000Z
ldtools/helpers.py
dmr/Ldtools
9cc5474404a07bd4b7ad756d31306dfc37a39c7b
[ "BSD-2-Clause" ]
1
2016-10-10T17:26:05.000Z
2016-10-10T17:26:05.000Z
ldtools/helpers.py
dmr/Ldtools
9cc5474404a07bd4b7ad756d31306dfc37a39c7b
[ "BSD-2-Clause" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import print_function, unicode_literals try: unicode except NameError: basestring = unicode = str # Python 3 import logging import rdflib from rdflib import compare logger = logging.getLogger("ldtools") RESET_SEQ = "\033[0m" COLOR_SEQ = "\033[1;%dm" BOLD_SEQ = "\033[1m" # The background is set with 40 plus the number of the color, and # the foreground with 30 # These are the sequences need to get colored ouput BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8) COL = { 'DEBUG': BLUE, 'INFO': MAGENTA, 'WARNING': YELLOW, 'CRITICAL': YELLOW, 'ERROR': RED} def my_graph_diff(graph1, graph2): """Compares graph2 to graph1 and highlights everything that changed. Colored if pygments available""" # quick fix for wrong type if not type(graph1) == type(graph2) == rdflib.Graph: if type(graph1) == rdflib.ConjunctiveGraph: g1contexts = list(graph1.contexts()) assert len(g1contexts) == 1 graph1 = g1contexts[0] if type(graph2) == rdflib.ConjunctiveGraph: g2contexts = list(graph2.contexts()) assert len(g2contexts) == 1 graph2 = g2contexts[0] # Return if both graphs are isomorphic iso1 = compare.to_isomorphic(graph1) iso2 = compare.to_isomorphic(graph2) if graph1.identifier == graph2.identifier: str_bit = u"The 2 '%s' Graphs" % graph1.identifier else: str_bit = (u"Graphs '%s' and '%s'" % (graph1.identifier, graph2.identifier)) if iso1 == iso2: logger.debug(u"%s are isomorphic" % str_bit) return print(u"Differences between %s." % str_bit) in_both, in_first, in_second = compare.graph_diff(iso1, iso2) sorted_first = dump_nt_sorted(in_first) sorted_second = dump_nt_sorted(in_second) import difflib diff = difflib.unified_diff( sorted_first, sorted_second, u'Original', u'Current', lineterm='' ) try: from pygments import highlight from pygments.formatters import terminal from pygments.lexers import web lexer = web.XmlLexer() formatter = terminal.TerminalFormatter() print(highlight(u'\n'.join(diff), lexer, formatter)) except ImportError: logger.info("Install pygments for colored diffs") print(u'\n'.join(diff)) except UnicodeDecodeError: print(u"Only in first", unicode(sorted_first)) print(u"Only in second", unicode(sorted_second))
30.895161
79
0.631689
8696be28bebb97248ddd7aa9ff8ffc4d35ce9393
1,420
py
Python
fakenet/diverters/debuglevels.py
AzzOnFire/flare-fakenet-ng
bafd7e97b61cd43190dee7f1d2c3f4388488af76
[ "Apache-2.0" ]
null
null
null
fakenet/diverters/debuglevels.py
AzzOnFire/flare-fakenet-ng
bafd7e97b61cd43190dee7f1d2c3f4388488af76
[ "Apache-2.0" ]
null
null
null
fakenet/diverters/debuglevels.py
AzzOnFire/flare-fakenet-ng
bafd7e97b61cd43190dee7f1d2c3f4388488af76
[ "Apache-2.0" ]
null
null
null
# Debug print levels for fine-grained debug trace output control DNFQUEUE = (1 << 0) # netfilterqueue DGENPKT = (1 << 1) # Generic packet handling DGENPKTV = (1 << 2) # Generic packet handling with TCP analysis DCB = (1 << 3) # Packet handlign callbacks DPROCFS = (1 << 4) # procfs DIPTBLS = (1 << 5) # iptables DNONLOC = (1 << 6) # Nonlocal-destined datagrams DDPF = (1 << 7) # DPF (Dynamic Port Forwarding) DDPFV = (1 << 8) # DPF (Dynamic Port Forwarding) Verbose DIPNAT = (1 << 9) # IP redirection for nonlocal-destined datagrams DMANGLE = (1 << 10) # Packet mangling DPCAP = (1 << 11) # Pcap write logic DIGN = (1 << 12) # Packet redirect ignore conditions DFTP = (1 << 13) # FTP checks DMISC = (1 << 27) # Miscellaneous DCOMP = 0x0fffffff # Component mask DFLAG = 0xf0000000 # Flag mask DEVERY = 0x0fffffff # Log everything, low verbosity DEVERY2 = 0x8fffffff # Log everything, complete verbosity DLABELS = { DNFQUEUE: 'NFQUEUE', DGENPKT: 'GENPKT', DGENPKTV: 'GENPKTV', DCB: 'CB', DPROCFS: 'PROCFS', DIPTBLS: 'IPTABLES', DNONLOC: 'NONLOC', DDPF: 'DPF', DDPFV: 'DPFV', DIPNAT: 'IPNAT', DMANGLE: 'MANGLE', DPCAP: 'PCAP', DIGN: 'IGN', DFTP: 'FTP', DIGN | DFTP: 'IGN-FTP', DMISC: 'MISC', } DLABELS_INV = {v.upper(): k for k, v in DLABELS.items()}
33.023256
72
0.592958
869714958dec93fb87488625f1ab7000485c9fb2
3,175
py
Python
multichannel_lstm/train.py
zhr1201/Multi-channel-speech-extraction-using-DNN
4e48869e02b815a8b094acc9251ac6586fda350c
[ "MIT" ]
65
2017-08-04T03:36:56.000Z
2022-03-10T07:25:17.000Z
multichannel_lstm/train.py
zhr1201/Multi-channel-speech-extraction-using-DNN
4e48869e02b815a8b094acc9251ac6586fda350c
[ "MIT" ]
7
2017-10-10T02:34:08.000Z
2019-09-27T08:59:27.000Z
multichannel_lstm/train.py
zhr1201/Multi-channel-speech-extraction-using-DNN
4e48869e02b815a8b094acc9251ac6586fda350c
[ "MIT" ]
39
2017-08-02T04:27:37.000Z
2021-11-03T06:43:25.000Z
''' Script for training the model ''' import tensorflow as tf import numpy as np from input import BatchGenerator from model import MultiRnn import time from datetime import datetime import os import matplotlib as mpl mpl.use('Agg') from matplotlib import pyplot as plt sum_dir = 'sum' # dir to write summary train_dir = 'ckpt' # dir to store the model data_dir = 'train.pkl' # dir of the data set NEFF = 129 # effective FFT points batch_size = 128 num_steps = 20 epochs = 2000 cell_type = 'NL_LSTM' state_size = 256 output_size = 129 num_layer = 3 learning_rate = 0.0001 # build the model rnn_model = MultiRnn( cell_type, state_size, output_size, batch_size, num_layer, learning_rate, num_steps) # input data and referene data placeholder in_data = tf.placeholder( tf.float32, [batch_size, num_steps, 2 * NEFF]) ref_data = tf.placeholder( tf.float32, [batch_size, num_steps, NEFF]) # make inference init_state, final_state, inf_data = rnn_model.inference(in_data) # compute loss loss = rnn_model.loss(inf_data, ref_data) saver = tf.train.Saver(tf.all_variables()) summary_op = tf.merge_all_summaries() train_op = rnn_model.train(loss) batch_gen = BatchGenerator(data_dir, batch_size, num_steps, epochs) with tf.Session() as sess: summary_writer = tf.train.SummaryWriter( sum_dir, sess.graph) sess.run(tf.initialize_all_variables()) steps = 0 # generator for epoch data for idx, epoch in enumerate(batch_gen.gen_epochs()): training_state = None # generator for batch data for f_data, b_data, r_data, v_data in epoch: start_time = time.time() steps += 1 in_data_np = np.concatenate((f_data, b_data), axis=2) if steps % 100 == 0: feed_dict = {in_data: in_data_np, ref_data: r_data} if training_state is not None: feed_dict[init_state] = training_state # training the net loss_value, training_state, _, summary_str, test_inf = sess.run( [loss, final_state, train_op, summary_op, inf_data], feed_dict) duration = time.time() - start_time sec_per_batch = float(duration) examples_per_sec = batch_size / duration format_str = ( '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f ' 'sec/batch, epoch %d)') print (format_str % (datetime.now(), steps, loss_value, examples_per_sec, sec_per_batch, idx)) summary_writer.add_summary(summary_str, steps) else: feed_dict = {in_data: in_data_np, ref_data: r_data} if training_state is not None: feed_dict[init_state] = training_state loss_value, training_state, _ = sess.run( [loss, final_state, train_op], feed_dict) if steps % 10000 == 0: checkpoint_path = os.path.join(train_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=steps)
33.776596
83
0.624252
8697573e23a0bff4599f9e6ef4bcf4db3b6b315f
4,002
py
Python
python_modules/dagster/dagster/daemon/cli/__init__.py
elsenorbw/dagster
b38822d7463812624dab0b2dae7c62e2a8d59828
[ "Apache-2.0" ]
null
null
null
python_modules/dagster/dagster/daemon/cli/__init__.py
elsenorbw/dagster
b38822d7463812624dab0b2dae7c62e2a8d59828
[ "Apache-2.0" ]
null
null
null
python_modules/dagster/dagster/daemon/cli/__init__.py
elsenorbw/dagster
b38822d7463812624dab0b2dae7c62e2a8d59828
[ "Apache-2.0" ]
null
null
null
import os import sys import threading import time import warnings from contextlib import ExitStack import click import pendulum from dagster import __version__ from dagster.core.instance import DagsterInstance from dagster.daemon.controller import ( DEFAULT_DAEMON_HEARTBEAT_TOLERANCE_SECONDS, DagsterDaemonController, all_daemons_healthy, all_daemons_live, daemon_controller_from_instance, debug_daemon_heartbeats, get_daemon_status, ) from dagster.utils.interrupts import capture_interrupts, raise_interrupts_as def create_dagster_daemon_cli(): commands = { "run": run_command, "health-check": health_check_command, "liveness-check": liveness_check_command, "wipe": wipe_command, "debug": debug_group, } return group cli = create_dagster_daemon_cli()
28.183099
121
0.696902
869758494ec4227a029bca8c4e214109b3aea62a
331
py
Python
tests/exhaustive/nfl_tests.py
atklaus/sportsreference
22a45ea83ce1608c3176f00d4f414d5b9463605c
[ "MIT" ]
1
2020-03-08T20:17:39.000Z
2020-03-08T20:17:39.000Z
tests/exhaustive/nfl_tests.py
atklaus/sportsreference
22a45ea83ce1608c3176f00d4f414d5b9463605c
[ "MIT" ]
null
null
null
tests/exhaustive/nfl_tests.py
atklaus/sportsreference
22a45ea83ce1608c3176f00d4f414d5b9463605c
[ "MIT" ]
null
null
null
import sys, os sys.path.append(os.path.dirname(os.path.dirname(sys.path[0]))) from sportsreference.nfl.teams import Teams for team in Teams(): print(team.name) for player in team.roster.players: print(player.name) for game in team.schedule: print(game.dataframe) print(game.dataframe_extended)
27.583333
62
0.700906
86979f732a31535e5210a87577515eada4d424aa
1,116
py
Python
rust-old/python/examples/map_fields.py
SerebryakovMA/quelea
4bac70d60852a454ad6533d08a02e018c75dc377
[ "MIT" ]
3
2021-03-01T15:35:49.000Z
2021-04-04T17:24:48.000Z
rust-old/python/examples/map_fields.py
SerebryakovMA/quelea
4bac70d60852a454ad6533d08a02e018c75dc377
[ "MIT" ]
null
null
null
rust-old/python/examples/map_fields.py
SerebryakovMA/quelea
4bac70d60852a454ad6533d08a02e018c75dc377
[ "MIT" ]
null
null
null
import numpy as np import matplotlib import matplotlib.pyplot as plt import sys sys.path.append("../") from quelea import * nx = 217 ny = 133 x0 = 0 x1 = 30 # lambdas y0 = 0 y1 = 20 # lambdas xs = np.linspace(x0, x1, nx) ys = np.linspace(y0, y1, ny) # 2d array of (x, y, z, t) coords = np.array( [ [x, y, 0, 0] for x in xs for y in ys ] ) # for map_fields function this should be converted from 2D to 1D array coords = coords.reshape((4 * nx * ny,)) ftype = 1 # plane wave a0 = 1 # normalized field amplitude omega = 1 # frequency fparam = [a0, 1, 0, 0, 0, 1, 0, 0, omega] # parameters of the plane wave ex, ey, ez, bx, by, bz = map_fields(coords, ftype, fparam) # now convert to 2d arrays ex = ex.reshape((nx, ny)) ey = ey.reshape((nx, ny)) ez = ez.reshape((nx, ny)) bx = bx.reshape((nx, ny)) by = by.reshape((nx, ny)) bz = bz.reshape((nx, ny)) ex = ex.transpose() ey = ey.transpose() ez = ez.transpose() bx = bx.transpose() by = by.transpose() bz = bz.transpose() plt.imshow(ey, cmap = 'RdYlBu', origin = 'lower', extent = [x0, x1, y0, y1]) plt.colorbar() plt.clim(-a0, a0) plt.savefig("map_fields.pdf")
21.882353
76
0.641577
86981ef4c2dc9e662bd6493203efcef25a7c5284
4,709
py
Python
test.py
t-kaichi/hyperspoof
6effdf03be8489ba74154a12416c69948681aa51
[ "MIT" ]
10
2021-06-23T09:42:30.000Z
2022-03-31T22:26:17.000Z
test.py
t-kaichi/hyperspoof
6effdf03be8489ba74154a12416c69948681aa51
[ "MIT" ]
null
null
null
test.py
t-kaichi/hyperspoof
6effdf03be8489ba74154a12416c69948681aa51
[ "MIT" ]
null
null
null
import os from absl import app from absl import flags import numpy as np import tqdm from tensorflow.keras import Model from albumentations import ( Compose, HorizontalFlip, RandomBrightness,RandomContrast, ShiftScaleRotate, ToFloat, VerticalFlip) from utils import reset_tf from eval_utils import calc_score_variance from models import build_seg_model, build_pixel_mlp_class_model from VegetableSequence import VegetableDataset, VegetableSequence from temporal_random_seed import TemporalRandomSeed import myFlags FLAGS = flags.FLAGS if __name__ == "__main__": app.run(main)
36.503876
84
0.660437
8698961278b2541aa172b54c8053ea36ceff0d54
4,612
py
Python
generator/apps.py
TheJacksonLaboratory/jaxid_generator
be5222d9c5ce57a169b94b0afd1ae9f7f10a66c1
[ "Apache-2.0" ]
2
2020-10-19T17:21:09.000Z
2020-10-20T14:27:25.000Z
generator/apps.py
cometsong/jaxid_generator
be5222d9c5ce57a169b94b0afd1ae9f7f10a66c1
[ "Apache-2.0" ]
null
null
null
generator/apps.py
cometsong/jaxid_generator
be5222d9c5ce57a169b94b0afd1ae9f7f10a66c1
[ "Apache-2.0" ]
null
null
null
from django.conf import settings from suit import apps from suit.apps import DjangoSuitConfig from suit.menu import ParentItem, ChildItem APP_NAME = settings.APP_NAME WIKI_URL = settings.WIKI_URL
37.803279
198
0.550954
869a0bbbdd3ad540b61675d429b25b1caee7f14d
38,519
py
Python
tiled-lutnet/training-software/MNIST-CIFAR-SVHN/models/MNIST/scripts/lutnet_init.py
awai54st/LUTNet
81b044f31d1131bee1a7fae41fc4d2fb102ea73a
[ "BSD-2-Clause" ]
38
2019-10-28T10:06:33.000Z
2022-02-21T21:38:39.000Z
tiled-lutnet/training-software/MNIST-CIFAR-SVHN/models/MNIST/scripts/lutnet_init.py
awai54st/LUTNet
81b044f31d1131bee1a7fae41fc4d2fb102ea73a
[ "BSD-2-Clause" ]
null
null
null
tiled-lutnet/training-software/MNIST-CIFAR-SVHN/models/MNIST/scripts/lutnet_init.py
awai54st/LUTNet
81b044f31d1131bee1a7fae41fc4d2fb102ea73a
[ "BSD-2-Clause" ]
13
2019-10-28T10:17:48.000Z
2021-08-10T21:37:11.000Z
import h5py import numpy as np np.set_printoptions(threshold=np.nan) from shutil import copyfile copyfile("dummy_lutnet.h5", "pretrained_bin.h5") # create pretrained.h5 using datastructure from dummy.h5 bl = h5py.File("baseline_pruned.h5", 'r') #dummy = h5py.File("dummy.h5", 'r') pretrained = h5py.File("pretrained_bin.h5", 'r+') # dense layer 1 bl_w1 = bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_1:0"] bl_pruning_mask = bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["pruning_mask:0"] bl_gamma = bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable:0"] zero_fill = np.zeros(np.shape(np.array(bl_w1))) pret_w1 = pretrained["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_1:0"] pret_pruning_mask = pretrained["model_weights"]["binary_dense_1"]["binary_dense_1"]["pruning_mask:0"] p_gamma = pretrained["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable:0"] pret_w1[...] = np.array(bl_w1) p_gamma[...] = np.array(bl_gamma) pret_pruning_mask[...] = np.array(bl_pruning_mask) print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask)))) # dense layer 2 bl_w1 = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_1:0"] bl_rand_map_0 = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_0:0"] bl_pruning_mask = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["pruning_mask:0"] bl_gamma = bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable:0"] bl_means = bl["model_weights"]["residual_sign_1"]["residual_sign_1"]["means:0"] pret_rand_map_0 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_0:0"] pret_rand_map_1 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_1:0"] pret_rand_map_2 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_2:0"] pret_pruning_mask = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["pruning_mask:0"] p_gamma = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable:0"] pret_means = pretrained["model_weights"]["residual_sign_1"]["residual_sign_1"]["means:0"] pret_c1 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_1:0"] pret_c2 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_2:0"] pret_c3 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_3:0"] pret_c4 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_4:0"] pret_c5 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_5:0"] pret_c6 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_6:0"] pret_c7 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_7:0"] pret_c8 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_8:0"] pret_c9 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_9:0"] pret_c10= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_10:0"] pret_c11= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_11:0"] pret_c12= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_12:0"] pret_c13= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_13:0"] pret_c14= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_14:0"] pret_c15= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_15:0"] pret_c16= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_16:0"] pret_c17= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_17:0"] pret_c18= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_18:0"] pret_c19= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_19:0"] pret_c20= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_20:0"] pret_c21= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_21:0"] pret_c22= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_22:0"] pret_c23= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_23:0"] pret_c24= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_24:0"] pret_c25= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_25:0"] pret_c26= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_26:0"] pret_c27= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_27:0"] pret_c28= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_28:0"] pret_c29= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_29:0"] pret_c30= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_30:0"] pret_c31= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_31:0"] pret_c32= pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_32:0"] pret_w1 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_33:0"] pret_rand_map_exp_0 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_exp_0:0"] pret_rand_map_exp_1 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_exp_1:0"] pret_rand_map_exp_2 = pretrained["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map_exp_2:0"] weight_shape = np.shape(bl_w1) tile_shape = np.shape(pret_c1) zero_fill = np.zeros(tile_shape) one_fill = np.ones(tile_shape) neg_one_fill = -np.ones(tile_shape) # randomisation and pruning recovery bl_w1_unroll = np.array(bl_w1) bl_w1 = np.array(bl_w1) rand_map_0 = np.arange(tile_shape[0]) np.random.shuffle(rand_map_0) rand_map_1 = np.arange(tile_shape[0]) np.random.shuffle(rand_map_1) rand_map_2 = np.arange(tile_shape[0]) np.random.shuffle(rand_map_2) pruning_mask = np.array(bl_pruning_mask).astype(bool) init_mask = np.logical_not(pruning_mask[rand_map_0]) pruning_mask_recover = np.logical_and(pruning_mask, init_mask)[np.argsort(rand_map_0)] pruning_mask = np.logical_or(pruning_mask, pruning_mask_recover) init_mask = np.reshape(init_mask, tile_shape) # expand randomisation map across tiles rand_map_0_expand = np.tile(rand_map_0,[weight_shape[0]/tile_shape[0]]) rand_map_1_expand = np.tile(rand_map_1,[weight_shape[0]/tile_shape[0]]) rand_map_2_expand = np.tile(rand_map_2,[weight_shape[0]/tile_shape[0]]) for i in range(weight_shape[0]): rand_map_0_expand[i] = rand_map_0_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_0_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0]) rand_map_1_expand[i] = rand_map_1_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_1_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0]) rand_map_2_expand[i] = rand_map_2_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_2_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0]) bl_w1_rand_0 = bl_w1_unroll[rand_map_0_expand] bl_w1_rand_0 = np.reshape(bl_w1_rand_0, weight_shape) w1 = bl_w1 # connect1 only c1 = one_fill c2 = neg_one_fill c3 = one_fill c4 = neg_one_fill c5 = one_fill c6 = neg_one_fill c7 = one_fill c8 = neg_one_fill c9 = one_fill c10 = neg_one_fill c11 = one_fill c12 = neg_one_fill c13 = one_fill c14 = neg_one_fill c15 = one_fill c16 = neg_one_fill c17 = neg_one_fill c18 = one_fill c19 = neg_one_fill c20 = one_fill c21 = neg_one_fill c22 = one_fill c23 = neg_one_fill c24 = one_fill c25 = neg_one_fill c26 = one_fill c27 = neg_one_fill c28 = one_fill c29 = neg_one_fill c30 = one_fill c31 = neg_one_fill c32 = one_fill pret_w1 [...] = w1 pret_c1 [...] = c1 pret_c2 [...] = c2 pret_c3 [...] = c3 pret_c4 [...] = c4 pret_c5 [...] = c5 pret_c6 [...] = c6 pret_c7 [...] = c7 pret_c8 [...] = c8 pret_c9 [...] = c9 pret_c10[...] = c10 pret_c11[...] = c11 pret_c12[...] = c12 pret_c13[...] = c13 pret_c14[...] = c14 pret_c15[...] = c15 pret_c16[...] = c16 pret_c17[...] = c17 pret_c18[...] = c18 pret_c19[...] = c19 pret_c20[...] = c20 pret_c21[...] = c21 pret_c22[...] = c22 pret_c23[...] = c23 pret_c24[...] = c24 pret_c25[...] = c25 pret_c26[...] = c26 pret_c27[...] = c27 pret_c28[...] = c28 pret_c29[...] = c29 pret_c30[...] = c30 pret_c31[...] = c31 pret_c32[...] = c32 pret_rand_map_0[...] = np.reshape(rand_map_0, (-1,1)).astype(float) pret_rand_map_1[...] = np.reshape(rand_map_1, (-1,1)).astype(float) pret_rand_map_2[...] = np.reshape(rand_map_2, (-1,1)).astype(float) p_gamma[...] = np.array(bl_gamma) pret_means[...] = np.array(bl_means) pret_pruning_mask[...] = np.array(bl_pruning_mask) rand_map_0_expand = np.reshape(rand_map_0_expand, [-1,1]).astype(float) pret_rand_map_exp_0[...] = rand_map_0_expand rand_map_1_expand = np.reshape(rand_map_1_expand, [-1,1]).astype(float) pret_rand_map_exp_1[...] = rand_map_1_expand rand_map_2_expand = np.reshape(rand_map_2_expand, [-1,1]).astype(float) pret_rand_map_exp_2[...] = rand_map_2_expand print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask)))) # dense layer 3 bl_w1 = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_1:0"] bl_rand_map_0 = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_0:0"] bl_pruning_mask = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["pruning_mask:0"] bl_gamma = bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable:0"] bl_means = bl["model_weights"]["residual_sign_2"]["residual_sign_2"]["means:0"] pret_rand_map_0 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_0:0"] pret_rand_map_1 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_1:0"] pret_rand_map_2 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_2:0"] pret_pruning_mask = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["pruning_mask:0"] p_gamma = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable:0"] pret_means = pretrained["model_weights"]["residual_sign_2"]["residual_sign_2"]["means:0"] pret_c1 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_1:0"] pret_c2 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_2:0"] pret_c3 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_3:0"] pret_c4 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_4:0"] pret_c5 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_5:0"] pret_c6 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_6:0"] pret_c7 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_7:0"] pret_c8 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_8:0"] pret_c9 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_9:0"] pret_c10= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_10:0"] pret_c11= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_11:0"] pret_c12= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_12:0"] pret_c13= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_13:0"] pret_c14= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_14:0"] pret_c15= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_15:0"] pret_c16= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_16:0"] pret_c17= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_17:0"] pret_c18= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_18:0"] pret_c19= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_19:0"] pret_c20= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_20:0"] pret_c21= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_21:0"] pret_c22= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_22:0"] pret_c23= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_23:0"] pret_c24= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_24:0"] pret_c25= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_25:0"] pret_c26= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_26:0"] pret_c27= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_27:0"] pret_c28= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_28:0"] pret_c29= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_29:0"] pret_c30= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_30:0"] pret_c31= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_31:0"] pret_c32= pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_32:0"] pret_w1 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_33:0"] pret_rand_map_exp_0 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_exp_0:0"] pret_rand_map_exp_1 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_exp_1:0"] pret_rand_map_exp_2 = pretrained["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map_exp_2:0"] weight_shape = np.shape(bl_w1) tile_shape = np.shape(pret_c1) zero_fill = np.zeros(tile_shape) one_fill = np.ones(tile_shape) neg_one_fill = -np.ones(tile_shape) # randomisation and pruning recovery bl_w1_unroll = np.array(bl_w1) bl_w1 = np.array(bl_w1) rand_map_0 = np.arange(tile_shape[0]) np.random.shuffle(rand_map_0) rand_map_1 = np.arange(tile_shape[0]) np.random.shuffle(rand_map_1) rand_map_2 = np.arange(tile_shape[0]) np.random.shuffle(rand_map_2) pruning_mask = np.array(bl_pruning_mask).astype(bool) init_mask = np.logical_not(pruning_mask[rand_map_0]) pruning_mask_recover = np.logical_and(pruning_mask, init_mask)[np.argsort(rand_map_0)] pruning_mask = np.logical_or(pruning_mask, pruning_mask_recover) init_mask = np.reshape(init_mask, tile_shape) # expand randomisation map across tiles rand_map_0_expand = np.tile(rand_map_0,[weight_shape[0]/tile_shape[0]]) rand_map_1_expand = np.tile(rand_map_1,[weight_shape[0]/tile_shape[0]]) rand_map_2_expand = np.tile(rand_map_2,[weight_shape[0]/tile_shape[0]]) for i in range(weight_shape[0]): rand_map_0_expand[i] = rand_map_0_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_0_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0]) rand_map_1_expand[i] = rand_map_1_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_1_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0]) rand_map_2_expand[i] = rand_map_2_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_2_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0]) bl_w1_rand_0 = bl_w1_unroll[rand_map_0_expand] bl_w1_rand_0 = np.reshape(bl_w1_rand_0, weight_shape) w1 = bl_w1 # connect1 only c1 = one_fill c2 = neg_one_fill c3 = one_fill c4 = neg_one_fill c5 = one_fill c6 = neg_one_fill c7 = one_fill c8 = neg_one_fill c9 = one_fill c10 = neg_one_fill c11 = one_fill c12 = neg_one_fill c13 = one_fill c14 = neg_one_fill c15 = one_fill c16 = neg_one_fill c17 = neg_one_fill c18 = one_fill c19 = neg_one_fill c20 = one_fill c21 = neg_one_fill c22 = one_fill c23 = neg_one_fill c24 = one_fill c25 = neg_one_fill c26 = one_fill c27 = neg_one_fill c28 = one_fill c29 = neg_one_fill c30 = one_fill c31 = neg_one_fill c32 = one_fill pret_w1 [...] = w1 pret_c1 [...] = c1 pret_c2 [...] = c2 pret_c3 [...] = c3 pret_c4 [...] = c4 pret_c5 [...] = c5 pret_c6 [...] = c6 pret_c7 [...] = c7 pret_c8 [...] = c8 pret_c9 [...] = c9 pret_c10[...] = c10 pret_c11[...] = c11 pret_c12[...] = c12 pret_c13[...] = c13 pret_c14[...] = c14 pret_c15[...] = c15 pret_c16[...] = c16 pret_c17[...] = c17 pret_c18[...] = c18 pret_c19[...] = c19 pret_c20[...] = c20 pret_c21[...] = c21 pret_c22[...] = c22 pret_c23[...] = c23 pret_c24[...] = c24 pret_c25[...] = c25 pret_c26[...] = c26 pret_c27[...] = c27 pret_c28[...] = c28 pret_c29[...] = c29 pret_c30[...] = c30 pret_c31[...] = c31 pret_c32[...] = c32 pret_rand_map_0[...] = np.reshape(rand_map_0, (-1,1)).astype(float) pret_rand_map_1[...] = np.reshape(rand_map_1, (-1,1)).astype(float) pret_rand_map_2[...] = np.reshape(rand_map_2, (-1,1)).astype(float) p_gamma[...] = np.array(bl_gamma) pret_means[...] = np.array(bl_means) pret_pruning_mask[...] = np.array(bl_pruning_mask) rand_map_0_expand = np.reshape(rand_map_0_expand, [-1,1]).astype(float) pret_rand_map_exp_0[...] = rand_map_0_expand rand_map_1_expand = np.reshape(rand_map_1_expand, [-1,1]).astype(float) pret_rand_map_exp_1[...] = rand_map_1_expand rand_map_2_expand = np.reshape(rand_map_2_expand, [-1,1]).astype(float) pret_rand_map_exp_2[...] = rand_map_2_expand print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask)))) # dense layer 4 bl_w1 = bl["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_1:0"] bl_rand_map_0 = bl["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_0:0"] bl_pruning_mask = bl["model_weights"]["binary_dense_4"]["binary_dense_4"]["pruning_mask:0"] bl_gamma = bl["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable:0"] bl_means = bl["model_weights"]["residual_sign_3"]["residual_sign_3"]["means:0"] pret_rand_map_0 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_0:0"] pret_rand_map_1 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_1:0"] pret_rand_map_2 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_2:0"] pret_pruning_mask = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["pruning_mask:0"] p_gamma = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable:0"] pret_means = pretrained["model_weights"]["residual_sign_3"]["residual_sign_3"]["means:0"] pret_c1 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_1:0"] pret_c2 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_2:0"] pret_c3 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_3:0"] pret_c4 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_4:0"] pret_c5 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_5:0"] pret_c6 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_6:0"] pret_c7 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_7:0"] pret_c8 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_8:0"] pret_c9 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_9:0"] pret_c10= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_10:0"] pret_c11= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_11:0"] pret_c12= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_12:0"] pret_c13= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_13:0"] pret_c14= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_14:0"] pret_c15= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_15:0"] pret_c16= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_16:0"] pret_c17= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_17:0"] pret_c18= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_18:0"] pret_c19= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_19:0"] pret_c20= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_20:0"] pret_c21= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_21:0"] pret_c22= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_22:0"] pret_c23= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_23:0"] pret_c24= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_24:0"] pret_c25= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_25:0"] pret_c26= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_26:0"] pret_c27= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_27:0"] pret_c28= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_28:0"] pret_c29= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_29:0"] pret_c30= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_30:0"] pret_c31= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_31:0"] pret_c32= pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_32:0"] pret_w1 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["Variable_33:0"] pret_rand_map_exp_0 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_exp_0:0"] pret_rand_map_exp_1 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_exp_1:0"] pret_rand_map_exp_2 = pretrained["model_weights"]["binary_dense_4"]["binary_dense_4"]["rand_map_exp_2:0"] weight_shape = np.shape(bl_w1) tile_shape = np.shape(pret_c1) zero_fill = np.zeros(tile_shape) one_fill = np.ones(tile_shape) neg_one_fill = -np.ones(tile_shape) # randomisation and pruning recovery bl_w1_unroll = np.array(bl_w1) bl_w1 = np.array(bl_w1) rand_map_0 = np.arange(tile_shape[0]) np.random.shuffle(rand_map_0) rand_map_1 = np.arange(tile_shape[0]) np.random.shuffle(rand_map_1) rand_map_2 = np.arange(tile_shape[0]) np.random.shuffle(rand_map_2) pruning_mask = np.array(bl_pruning_mask).astype(bool) init_mask = np.logical_not(pruning_mask[rand_map_0]) pruning_mask_recover = np.logical_and(pruning_mask, init_mask)[np.argsort(rand_map_0)] pruning_mask = np.logical_or(pruning_mask, pruning_mask_recover) init_mask = np.reshape(init_mask, tile_shape) # expand randomisation map across tiles rand_map_0_expand = np.tile(rand_map_0,[weight_shape[0]/tile_shape[0]]) rand_map_1_expand = np.tile(rand_map_1,[weight_shape[0]/tile_shape[0]]) rand_map_2_expand = np.tile(rand_map_2,[weight_shape[0]/tile_shape[0]]) for i in range(weight_shape[0]): rand_map_0_expand[i] = rand_map_0_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_0_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0]) rand_map_1_expand[i] = rand_map_1_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_1_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0]) rand_map_2_expand[i] = rand_map_2_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_2_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0]) bl_w1_rand_0 = bl_w1_unroll[rand_map_0_expand] bl_w1_rand_0 = np.reshape(bl_w1_rand_0, weight_shape) w1 = bl_w1 # connect1 only c1 = one_fill c2 = neg_one_fill c3 = one_fill c4 = neg_one_fill c5 = one_fill c6 = neg_one_fill c7 = one_fill c8 = neg_one_fill c9 = one_fill c10 = neg_one_fill c11 = one_fill c12 = neg_one_fill c13 = one_fill c14 = neg_one_fill c15 = one_fill c16 = neg_one_fill c17 = neg_one_fill c18 = one_fill c19 = neg_one_fill c20 = one_fill c21 = neg_one_fill c22 = one_fill c23 = neg_one_fill c24 = one_fill c25 = neg_one_fill c26 = one_fill c27 = neg_one_fill c28 = one_fill c29 = neg_one_fill c30 = one_fill c31 = neg_one_fill c32 = one_fill pret_w1 [...] = w1 pret_c1 [...] = c1 pret_c2 [...] = c2 pret_c3 [...] = c3 pret_c4 [...] = c4 pret_c5 [...] = c5 pret_c6 [...] = c6 pret_c7 [...] = c7 pret_c8 [...] = c8 pret_c9 [...] = c9 pret_c10[...] = c10 pret_c11[...] = c11 pret_c12[...] = c12 pret_c13[...] = c13 pret_c14[...] = c14 pret_c15[...] = c15 pret_c16[...] = c16 pret_c17[...] = c17 pret_c18[...] = c18 pret_c19[...] = c19 pret_c20[...] = c20 pret_c21[...] = c21 pret_c22[...] = c22 pret_c23[...] = c23 pret_c24[...] = c24 pret_c25[...] = c25 pret_c26[...] = c26 pret_c27[...] = c27 pret_c28[...] = c28 pret_c29[...] = c29 pret_c30[...] = c30 pret_c31[...] = c31 pret_c32[...] = c32 pret_rand_map_0[...] = np.reshape(rand_map_0, (-1,1)).astype(float) pret_rand_map_1[...] = np.reshape(rand_map_1, (-1,1)).astype(float) pret_rand_map_2[...] = np.reshape(rand_map_2, (-1,1)).astype(float) p_gamma[...] = np.array(bl_gamma) pret_means[...] = np.array(bl_means) pret_pruning_mask[...] = np.array(bl_pruning_mask) rand_map_0_expand = np.reshape(rand_map_0_expand, [-1,1]).astype(float) pret_rand_map_exp_0[...] = rand_map_0_expand rand_map_1_expand = np.reshape(rand_map_1_expand, [-1,1]).astype(float) pret_rand_map_exp_1[...] = rand_map_1_expand rand_map_2_expand = np.reshape(rand_map_2_expand, [-1,1]).astype(float) pret_rand_map_exp_2[...] = rand_map_2_expand print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask)))) # dense layer 5 bl_w1 = bl["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_1:0"] bl_rand_map_0 = bl["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_0:0"] bl_pruning_mask = bl["model_weights"]["binary_dense_5"]["binary_dense_5"]["pruning_mask:0"] bl_gamma = bl["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable:0"] bl_means = bl["model_weights"]["residual_sign_4"]["residual_sign_4"]["means:0"] pret_rand_map_0 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_0:0"] pret_rand_map_1 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_1:0"] pret_rand_map_2 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_2:0"] pret_pruning_mask = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["pruning_mask:0"] p_gamma = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable:0"] pret_means = pretrained["model_weights"]["residual_sign_4"]["residual_sign_4"]["means:0"] pret_c1 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_1:0"] pret_c2 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_2:0"] pret_c3 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_3:0"] pret_c4 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_4:0"] pret_c5 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_5:0"] pret_c6 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_6:0"] pret_c7 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_7:0"] pret_c8 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_8:0"] pret_c9 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_9:0"] pret_c10= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_10:0"] pret_c11= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_11:0"] pret_c12= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_12:0"] pret_c13= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_13:0"] pret_c14= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_14:0"] pret_c15= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_15:0"] pret_c16= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_16:0"] pret_c17= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_17:0"] pret_c18= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_18:0"] pret_c19= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_19:0"] pret_c20= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_20:0"] pret_c21= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_21:0"] pret_c22= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_22:0"] pret_c23= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_23:0"] pret_c24= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_24:0"] pret_c25= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_25:0"] pret_c26= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_26:0"] pret_c27= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_27:0"] pret_c28= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_28:0"] pret_c29= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_29:0"] pret_c30= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_30:0"] pret_c31= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_31:0"] pret_c32= pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_32:0"] pret_w1 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["Variable_33:0"] pret_rand_map_exp_0 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_exp_0:0"] pret_rand_map_exp_1 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_exp_1:0"] pret_rand_map_exp_2 = pretrained["model_weights"]["binary_dense_5"]["binary_dense_5"]["rand_map_exp_2:0"] weight_shape = np.shape(bl_w1) tile_shape = np.shape(pret_c1) zero_fill = np.zeros(tile_shape) one_fill = np.ones(tile_shape) neg_one_fill = -np.ones(tile_shape) # randomisation and pruning recovery bl_w1_unroll = np.array(bl_w1) bl_w1 = np.array(bl_w1) rand_map_0 = np.arange(tile_shape[0]) np.random.shuffle(rand_map_0) rand_map_1 = np.arange(tile_shape[0]) np.random.shuffle(rand_map_1) rand_map_2 = np.arange(tile_shape[0]) np.random.shuffle(rand_map_2) pruning_mask = np.array(bl_pruning_mask).astype(bool) init_mask = np.logical_not(pruning_mask[rand_map_0]) pruning_mask_recover = np.logical_and(pruning_mask, init_mask)[np.argsort(rand_map_0)] pruning_mask = np.logical_or(pruning_mask, pruning_mask_recover) init_mask = np.reshape(init_mask, tile_shape) # expand randomisation map across tiles rand_map_0_expand = np.tile(rand_map_0,[weight_shape[0]/tile_shape[0]]) rand_map_1_expand = np.tile(rand_map_1,[weight_shape[0]/tile_shape[0]]) rand_map_2_expand = np.tile(rand_map_2,[weight_shape[0]/tile_shape[0]]) for i in range(weight_shape[0]): rand_map_0_expand[i] = rand_map_0_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_0_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0]) rand_map_1_expand[i] = rand_map_1_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_1_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0]) rand_map_2_expand[i] = rand_map_2_expand[i] + (tile_shape[0]*(weight_shape[0]/tile_shape[0]-1)) * (rand_map_2_expand[i]/tile_shape[0]) + tile_shape[0]*(i%weight_shape[0]/tile_shape[0]) bl_w1_rand_0 = bl_w1_unroll[rand_map_0_expand] bl_w1_rand_0 = np.reshape(bl_w1_rand_0, weight_shape) w1 = bl_w1 # connect1 only c1 = one_fill c2 = neg_one_fill c3 = one_fill c4 = neg_one_fill c5 = one_fill c6 = neg_one_fill c7 = one_fill c8 = neg_one_fill c9 = one_fill c10 = neg_one_fill c11 = one_fill c12 = neg_one_fill c13 = one_fill c14 = neg_one_fill c15 = one_fill c16 = neg_one_fill c17 = neg_one_fill c18 = one_fill c19 = neg_one_fill c20 = one_fill c21 = neg_one_fill c22 = one_fill c23 = neg_one_fill c24 = one_fill c25 = neg_one_fill c26 = one_fill c27 = neg_one_fill c28 = one_fill c29 = neg_one_fill c30 = one_fill c31 = neg_one_fill c32 = one_fill pret_w1 [...] = w1 pret_c1 [...] = c1 pret_c2 [...] = c2 pret_c3 [...] = c3 pret_c4 [...] = c4 pret_c5 [...] = c5 pret_c6 [...] = c6 pret_c7 [...] = c7 pret_c8 [...] = c8 pret_c9 [...] = c9 pret_c10[...] = c10 pret_c11[...] = c11 pret_c12[...] = c12 pret_c13[...] = c13 pret_c14[...] = c14 pret_c15[...] = c15 pret_c16[...] = c16 pret_c17[...] = c17 pret_c18[...] = c18 pret_c19[...] = c19 pret_c20[...] = c20 pret_c21[...] = c21 pret_c22[...] = c22 pret_c23[...] = c23 pret_c24[...] = c24 pret_c25[...] = c25 pret_c26[...] = c26 pret_c27[...] = c27 pret_c28[...] = c28 pret_c29[...] = c29 pret_c30[...] = c30 pret_c31[...] = c31 pret_c32[...] = c32 pret_rand_map_0[...] = np.reshape(rand_map_0, (-1,1)).astype(float) pret_rand_map_1[...] = np.reshape(rand_map_1, (-1,1)).astype(float) pret_rand_map_2[...] = np.reshape(rand_map_2, (-1,1)).astype(float) p_gamma[...] = np.array(bl_gamma) pret_means[...] = np.array(bl_means) pret_pruning_mask[...] = np.array(bl_pruning_mask) rand_map_0_expand = np.reshape(rand_map_0_expand, [-1,1]).astype(float) pret_rand_map_exp_0[...] = rand_map_0_expand rand_map_1_expand = np.reshape(rand_map_1_expand, [-1,1]).astype(float) pret_rand_map_exp_1[...] = rand_map_1_expand rand_map_2_expand = np.reshape(rand_map_2_expand, [-1,1]).astype(float) pret_rand_map_exp_2[...] = rand_map_2_expand print(np.sum(np.array(bl_pruning_mask)), np.prod(np.shape(np.array(bl_pruning_mask)))) # bn 1 bl_beta = bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["beta:0"] bl_gamma = bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["gamma:0"] bl_moving_mean = bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["moving_mean:0"] bl_moving_variance = bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["moving_variance:0"] p_beta = pretrained["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["beta:0"] p_gamma = pretrained["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["gamma:0"] p_moving_mean = pretrained["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["moving_mean:0"] p_moving_variance = pretrained["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["moving_variance:0"] p_beta[...] = np.array(bl_beta) p_gamma[...] = np.array(bl_gamma) p_moving_mean[...] = np.array(bl_moving_mean) p_moving_variance[...] = np.array(bl_moving_variance) # bn 2 bl_beta = bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["beta:0"] bl_gamma = bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["gamma:0"] bl_moving_mean = bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["moving_mean:0"] bl_moving_variance = bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["moving_variance:0"] p_beta = pretrained["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["beta:0"] p_gamma = pretrained["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["gamma:0"] p_moving_mean = pretrained["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["moving_mean:0"] p_moving_variance = pretrained["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["moving_variance:0"] p_beta[...] = np.array(bl_beta) p_gamma[...] = np.array(bl_gamma) p_moving_mean[...] = np.array(bl_moving_mean) p_moving_variance[...] = np.array(bl_moving_variance) # bn 3 bl_beta = bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["beta:0"] bl_gamma = bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["gamma:0"] bl_moving_mean = bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["moving_mean:0"] bl_moving_variance = bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["moving_variance:0"] p_beta = pretrained["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["beta:0"] p_gamma = pretrained["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["gamma:0"] p_moving_mean = pretrained["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["moving_mean:0"] p_moving_variance = pretrained["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["moving_variance:0"] p_beta[...] = np.array(bl_beta) p_gamma[...] = np.array(bl_gamma) p_moving_mean[...] = np.array(bl_moving_mean) p_moving_variance[...] = np.array(bl_moving_variance) # bn 4 bl_beta = bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["beta:0"] bl_gamma = bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["gamma:0"] bl_moving_mean = bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["moving_mean:0"] bl_moving_variance = bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["moving_variance:0"] p_beta = pretrained["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["beta:0"] p_gamma = pretrained["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["gamma:0"] p_moving_mean = pretrained["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["moving_mean:0"] p_moving_variance = pretrained["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["moving_variance:0"] p_beta[...] = np.array(bl_beta) p_gamma[...] = np.array(bl_gamma) p_moving_mean[...] = np.array(bl_moving_mean) p_moving_variance[...] = np.array(bl_moving_variance) # bn 5 bl_beta = bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["beta:0"] bl_gamma = bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["gamma:0"] bl_moving_mean = bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["moving_mean:0"] bl_moving_variance = bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["moving_variance:0"] p_beta = pretrained["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["beta:0"] p_gamma = pretrained["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["gamma:0"] p_moving_mean = pretrained["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["moving_mean:0"] p_moving_variance = pretrained["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["moving_variance:0"] p_beta[...] = np.array(bl_beta) p_gamma[...] = np.array(bl_gamma) p_moving_mean[...] = np.array(bl_moving_mean) p_moving_variance[...] = np.array(bl_moving_variance) pretrained.close()
47.731103
192
0.749708
869a42d471e5a0264cf98babfcdd88a6669b3cbc
12,970
py
Python
pkgs/nltk-3.2-py27_0/lib/python2.7/site-packages/nltk/classify/weka.py
wangyum/anaconda
6e5a0dbead3327661d73a61e85414cf92aa52be6
[ "Apache-2.0", "BSD-3-Clause" ]
null
null
null
pkgs/nltk-3.2-py27_0/lib/python2.7/site-packages/nltk/classify/weka.py
wangyum/anaconda
6e5a0dbead3327661d73a61e85414cf92aa52be6
[ "Apache-2.0", "BSD-3-Clause" ]
null
null
null
pkgs/nltk-3.2-py27_0/lib/python2.7/site-packages/nltk/classify/weka.py
wangyum/anaconda
6e5a0dbead3327661d73a61e85414cf92aa52be6
[ "Apache-2.0", "BSD-3-Clause" ]
null
null
null
# Natural Language Toolkit: Interface to Weka Classsifiers # # Copyright (C) 2001-2015 NLTK Project # Author: Edward Loper <[email protected]> # URL: <http://nltk.org/> # For license information, see LICENSE.TXT """ Classifiers that make use of the external 'Weka' package. """ from __future__ import print_function import time import tempfile import os import subprocess import re import zipfile from sys import stdin from nltk import compat from nltk.probability import DictionaryProbDist from nltk.internals import java, config_java from nltk.classify.api import ClassifierI _weka_classpath = None _weka_search = ['.', '/usr/share/weka', '/usr/local/share/weka', '/usr/lib/weka', '/usr/local/lib/weka',] if __name__ == '__main__': from nltk.classify.util import names_demo, binary_names_demo_features classifier = names_demo(make_classifier, binary_names_demo_features)
37.485549
85
0.562606
869a8a31a260884a519f22c5d9a29b22876f051f
5,502
py
Python
src/si/data/dataset.py
pg428/SIB
b887c2011eb3a04d119a93b3932785d182e331d3
[ "Apache-2.0" ]
null
null
null
src/si/data/dataset.py
pg428/SIB
b887c2011eb3a04d119a93b3932785d182e331d3
[ "Apache-2.0" ]
null
null
null
src/si/data/dataset.py
pg428/SIB
b887c2011eb3a04d119a93b3932785d182e331d3
[ "Apache-2.0" ]
null
null
null
import pandas as pd import numpy as np from src.si.util.util import label_gen __all__ = ['Dataset'] def hasLabel(self): """Returns True if the dataset constains labels (a dependent variable)""" return self.Y is not None def getNumFeatures(self): """Returns the number of features""" return self.X.shape[1] def getNumClasses(self): """Returns the number of label classes or 0 if the dataset has no dependent variable.""" return len(np.unique(self.Y)) if self.hasLabel() else 0 def writeDataset(self, filename, sep=","): """Saves the dataset to a file :param filename: The output file path :type filename: str :param sep: The fields separator, defaults to "," :type sep: str, optional """ fullds = np.hstack((self.X, self.Y.reshape(len(self.Y), 1))) np.savetxt(filename, fullds, delimiter=sep) def toDataframe(self): """ Converts the dataset into a pandas DataFrame""" if self.hasLabel(): df = pd.DataFrame(np.hstack((self.X, self.Y.reshape(len(self.Y), 1))), columns=self.xnames[:]+[self.yname]) #columns=np.hstack((self.xnames, self.yname))) else: df = pd.DataFrame(self.X.copy(), columns=self.xnames[:]) return df def summary(dataset, format='df'): """ Returns the statistics of a dataset(mean, std, max, min) :param dataset: A Dataset object :type dataset: si.data.Dataset :param format: Output format ('df':DataFrame, 'dict':dictionary ), defaults to 'df' :type format: str, optional """ if format not in ["df", "dict"]: raise Exception("Invalid format. Choose between 'df' and 'dict'.") if dataset.hasLabel(): data = np.hstack((dataset.X, dataset.Y.reshape(len(dataset.Y), 1))) #data = np.hstack([dataset.X, np.reshape(dataset.Y, (-1, 1))]) columns = dataset.xnames[:] + [dataset.yname] else: data = dataset.X columns = dataset.xnames[:] stats = {} if type(dataset.Y[0]) is str: for i in range(data.shape[1]-1): #ve colunas _means = np.mean(data[:, i], axis=0) _vars = np.var(data[:, i], axis=0) _maxs = np.max(data[:, i], axis=0) _mins = np.min(data[:, i], axis=0) stat = {"mean": _means, "var": _vars, "max": _maxs, "min": _mins } stats[columns[i]] = stat else: for i in range(data.shape[1]): # ve colunas _means = np.mean(data[:, i], axis=0) _vars = np.var(data[:, i], axis=0) _maxs = np.max(data[:, i], axis=0) _mins = np.min(data[:, i], axis=0) stat = {"mean": _means, "var": _vars, "max": _maxs, "min": _mins } stats[columns[i]] = stat # _means = np.mean(data, axis=0) # _vars = np.var(data, axis=0) # _maxs = np.max(data, axis=0) # _mins = np.min(data, axis=0) # stats = {} # for i in range(data.shape[1]): # stat = {"mean": _means[i], # "var": _vars[i], # "max": _maxs[i], # "min": _mins[i] # } # stats[columns[i]] = stat if format == "dict": return stats else: return pd.DataFrame(stats)
32.364706
166
0.526172
869b1718c19f9cae150c01d7070087f780577bf0
3,357
py
Python
stubs/m5stack_flowui-1_4_0-beta/display.py
RonaldHiemstra/micropython-stubs
d97f879b01f6687baaebef1c7e26a80909c3cff3
[ "MIT" ]
38
2020-10-18T21:59:44.000Z
2022-03-17T03:03:28.000Z
stubs/m5stack_flowui-1_4_0-beta/display.py
RonaldHiemstra/micropython-stubs
d97f879b01f6687baaebef1c7e26a80909c3cff3
[ "MIT" ]
176
2020-10-18T14:31:03.000Z
2022-03-30T23:22:39.000Z
stubs/m5stack_flowui-1_4_0-beta/display.py
RonaldHiemstra/micropython-stubs
d97f879b01f6687baaebef1c7e26a80909c3cff3
[ "MIT" ]
6
2020-12-28T21:11:12.000Z
2022-02-06T04:07:50.000Z
""" Module: 'display' on M5 FlowUI v1.4.0-beta """ # MCU: (sysname='esp32', nodename='esp32', release='1.11.0', version='v1.11-284-g5d8e1c867 on 2019-08-30', machine='ESP32 module with ESP32') # Stubber: 1.3.1
13.481928
141
0.49866
869c4c6a792894e8eb7116f05f76e9950b851051
364
py
Python
dbclient/__init__.py
dmoore247/db-migration
cc75d491d7dd7e9e24b5a35dd3d1080317b25520
[ "Apache-2.0" ]
null
null
null
dbclient/__init__.py
dmoore247/db-migration
cc75d491d7dd7e9e24b5a35dd3d1080317b25520
[ "Apache-2.0" ]
null
null
null
dbclient/__init__.py
dmoore247/db-migration
cc75d491d7dd7e9e24b5a35dd3d1080317b25520
[ "Apache-2.0" ]
null
null
null
import json, requests, datetime from cron_descriptor import get_description from .dbclient import dbclient from .JobsClient import JobsClient from .ClustersClient import ClustersClient from .WorkspaceClient import WorkspaceClient from .ScimClient import ScimClient from .LibraryClient import LibraryClient from .HiveClient import HiveClient from .parser import *
30.333333
44
0.857143
869cca1a77b5e47ef9d5c83e59910c957d0e0eeb
1,145
py
Python
UI/ControlSlider/__init__.py
peerke88/SkinningTools
db761f569ba179231dc64183ebfca1684429ab96
[ "MIT" ]
7
2021-12-16T06:03:18.000Z
2022-02-15T01:49:37.000Z
UI/weightEditor/__init__.py
peerke88/SkinningTools
db761f569ba179231dc64183ebfca1684429ab96
[ "MIT" ]
null
null
null
UI/weightEditor/__init__.py
peerke88/SkinningTools
db761f569ba179231dc64183ebfca1684429ab96
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # SkinWeights command and component editor # Copyright (C) 2018 Trevor van Hoof # Website: http://www.trevorius.com # # pyqt attribute sliders # Copyright (C) 2018 Daniele Niero # Website: http://danieleniero.com/ # # neighbour finding algorythm # Copyright (C) 2018 Jan Pijpers # Website: http://www.janpijpers.com/ # # skinningTools and UI # Copyright (C) 2018 Perry Leijten # Website: http://www.perryleijten.com # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See http://www.gnu.org/licenses/gpl.html for a copy of the GNU General # Public License. # --------------------------------------------------------------------------------------
36.935484
89
0.672489
869e135c2c869c0e98bb08d38ef8fc9d0c3c1530
11,744
py
Python
homeassistant/components/fritz/sensor.py
EuleMitKeule/core
3af54d96c7dcc3f7087d1196e6ab0db029301ee7
[ "Apache-2.0" ]
3
2022-02-18T14:03:39.000Z
2022-03-26T20:26:55.000Z
homeassistant/components/fritz/sensor.py
EuleMitKeule/core
3af54d96c7dcc3f7087d1196e6ab0db029301ee7
[ "Apache-2.0" ]
74
2020-08-05T07:20:27.000Z
2022-03-23T12:47:28.000Z
homeassistant/components/fritz/sensor.py
marecabo/home-assistant
e33774a61e7fcc88aff752dfa4618dd26a746872
[ "Apache-2.0" ]
2
2020-06-06T21:55:32.000Z
2022-03-06T04:18:21.000Z
"""AVM FRITZ!Box binary sensors.""" from __future__ import annotations from collections.abc import Callable from dataclasses import dataclass from datetime import datetime, timedelta import logging from typing import Any, Literal from fritzconnection.core.exceptions import ( FritzActionError, FritzActionFailedError, FritzConnectionException, FritzInternalError, FritzServiceError, ) from fritzconnection.lib.fritzstatus import FritzStatus from homeassistant.components.sensor import ( STATE_CLASS_MEASUREMENT, STATE_CLASS_TOTAL_INCREASING, SensorEntity, SensorEntityDescription, ) from homeassistant.config_entries import ConfigEntry from homeassistant.const import ( DATA_GIGABYTES, DATA_RATE_KILOBITS_PER_SECOND, DATA_RATE_KILOBYTES_PER_SECOND, DEVICE_CLASS_TIMESTAMP, ENTITY_CATEGORY_DIAGNOSTIC, SIGNAL_STRENGTH_DECIBELS, ) from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.util.dt import utcnow from .common import FritzBoxBaseEntity, FritzBoxTools from .const import DOMAIN, DSL_CONNECTION, UPTIME_DEVIATION _LOGGER = logging.getLogger(__name__) def _uptime_calculation(seconds_uptime: float, last_value: datetime | None) -> datetime: """Calculate uptime with deviation.""" delta_uptime = utcnow() - timedelta(seconds=seconds_uptime) if ( not last_value or abs((delta_uptime - last_value).total_seconds()) > UPTIME_DEVIATION ): return delta_uptime return last_value def _retrieve_device_uptime_state( status: FritzStatus, last_value: datetime ) -> datetime: """Return uptime from device.""" return _uptime_calculation(status.device_uptime, last_value) def _retrieve_connection_uptime_state( status: FritzStatus, last_value: datetime | None ) -> datetime: """Return uptime from connection.""" return _uptime_calculation(status.connection_uptime, last_value) def _retrieve_external_ip_state(status: FritzStatus, last_value: str) -> str: """Return external ip from device.""" return status.external_ip # type: ignore[no-any-return] def _retrieve_kb_s_sent_state(status: FritzStatus, last_value: str) -> float: """Return upload transmission rate.""" return round(status.transmission_rate[0] / 1000, 1) # type: ignore[no-any-return] def _retrieve_kb_s_received_state(status: FritzStatus, last_value: str) -> float: """Return download transmission rate.""" return round(status.transmission_rate[1] / 1000, 1) # type: ignore[no-any-return] def _retrieve_max_kb_s_sent_state(status: FritzStatus, last_value: str) -> float: """Return upload max transmission rate.""" return round(status.max_bit_rate[0] / 1000, 1) # type: ignore[no-any-return] def _retrieve_max_kb_s_received_state(status: FritzStatus, last_value: str) -> float: """Return download max transmission rate.""" return round(status.max_bit_rate[1] / 1000, 1) # type: ignore[no-any-return] def _retrieve_gb_sent_state(status: FritzStatus, last_value: str) -> float: """Return upload total data.""" return round(status.bytes_sent / 1000 / 1000 / 1000, 1) # type: ignore[no-any-return] def _retrieve_gb_received_state(status: FritzStatus, last_value: str) -> float: """Return download total data.""" return round(status.bytes_received / 1000 / 1000 / 1000, 1) # type: ignore[no-any-return] def _retrieve_link_kb_s_sent_state(status: FritzStatus, last_value: str) -> float: """Return upload link rate.""" return round(status.max_linked_bit_rate[0] / 1000, 1) # type: ignore[no-any-return] def _retrieve_link_kb_s_received_state(status: FritzStatus, last_value: str) -> float: """Return download link rate.""" return round(status.max_linked_bit_rate[1] / 1000, 1) # type: ignore[no-any-return] def _retrieve_link_noise_margin_sent_state( status: FritzStatus, last_value: str ) -> float: """Return upload noise margin.""" return status.noise_margin[0] / 10 # type: ignore[no-any-return] def _retrieve_link_noise_margin_received_state( status: FritzStatus, last_value: str ) -> float: """Return download noise margin.""" return status.noise_margin[1] / 10 # type: ignore[no-any-return] def _retrieve_link_attenuation_sent_state( status: FritzStatus, last_value: str ) -> float: """Return upload line attenuation.""" return status.attenuation[0] / 10 # type: ignore[no-any-return] def _retrieve_link_attenuation_received_state( status: FritzStatus, last_value: str ) -> float: """Return download line attenuation.""" return status.attenuation[1] / 10 # type: ignore[no-any-return] SENSOR_TYPES: tuple[FritzSensorEntityDescription, ...] = ( FritzSensorEntityDescription( key="external_ip", name="External IP", icon="mdi:earth", value_fn=_retrieve_external_ip_state, ), FritzSensorEntityDescription( key="device_uptime", name="Device Uptime", device_class=DEVICE_CLASS_TIMESTAMP, entity_category=ENTITY_CATEGORY_DIAGNOSTIC, value_fn=_retrieve_device_uptime_state, ), FritzSensorEntityDescription( key="connection_uptime", name="Connection Uptime", device_class=DEVICE_CLASS_TIMESTAMP, entity_category=ENTITY_CATEGORY_DIAGNOSTIC, value_fn=_retrieve_connection_uptime_state, ), FritzSensorEntityDescription( key="kb_s_sent", name="Upload Throughput", state_class=STATE_CLASS_MEASUREMENT, native_unit_of_measurement=DATA_RATE_KILOBYTES_PER_SECOND, icon="mdi:upload", value_fn=_retrieve_kb_s_sent_state, ), FritzSensorEntityDescription( key="kb_s_received", name="Download Throughput", state_class=STATE_CLASS_MEASUREMENT, native_unit_of_measurement=DATA_RATE_KILOBYTES_PER_SECOND, icon="mdi:download", value_fn=_retrieve_kb_s_received_state, ), FritzSensorEntityDescription( key="max_kb_s_sent", name="Max Connection Upload Throughput", native_unit_of_measurement=DATA_RATE_KILOBITS_PER_SECOND, icon="mdi:upload", entity_category=ENTITY_CATEGORY_DIAGNOSTIC, value_fn=_retrieve_max_kb_s_sent_state, ), FritzSensorEntityDescription( key="max_kb_s_received", name="Max Connection Download Throughput", native_unit_of_measurement=DATA_RATE_KILOBITS_PER_SECOND, icon="mdi:download", entity_category=ENTITY_CATEGORY_DIAGNOSTIC, value_fn=_retrieve_max_kb_s_received_state, ), FritzSensorEntityDescription( key="gb_sent", name="GB sent", state_class=STATE_CLASS_TOTAL_INCREASING, native_unit_of_measurement=DATA_GIGABYTES, icon="mdi:upload", value_fn=_retrieve_gb_sent_state, ), FritzSensorEntityDescription( key="gb_received", name="GB received", state_class=STATE_CLASS_TOTAL_INCREASING, native_unit_of_measurement=DATA_GIGABYTES, icon="mdi:download", value_fn=_retrieve_gb_received_state, ), FritzSensorEntityDescription( key="link_kb_s_sent", name="Link Upload Throughput", native_unit_of_measurement=DATA_RATE_KILOBITS_PER_SECOND, icon="mdi:upload", value_fn=_retrieve_link_kb_s_sent_state, connection_type=DSL_CONNECTION, ), FritzSensorEntityDescription( key="link_kb_s_received", name="Link Download Throughput", native_unit_of_measurement=DATA_RATE_KILOBITS_PER_SECOND, icon="mdi:download", value_fn=_retrieve_link_kb_s_received_state, connection_type=DSL_CONNECTION, ), FritzSensorEntityDescription( key="link_noise_margin_sent", name="Link Upload Noise Margin", native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS, icon="mdi:upload", value_fn=_retrieve_link_noise_margin_sent_state, connection_type=DSL_CONNECTION, ), FritzSensorEntityDescription( key="link_noise_margin_received", name="Link Download Noise Margin", native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS, icon="mdi:download", value_fn=_retrieve_link_noise_margin_received_state, connection_type=DSL_CONNECTION, ), FritzSensorEntityDescription( key="link_attenuation_sent", name="Link Upload Power Attenuation", native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS, icon="mdi:upload", value_fn=_retrieve_link_attenuation_sent_state, connection_type=DSL_CONNECTION, ), FritzSensorEntityDescription( key="link_attenuation_received", name="Link Download Power Attenuation", native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS, icon="mdi:download", value_fn=_retrieve_link_attenuation_received_state, connection_type=DSL_CONNECTION, ), )
33.458689
94
0.712364
869e772414d99f560741ba4d5f3b4440b61ae41b
2,931
py
Python
Ifc/IfcBase.py
gsimon75/IFC_parser
f9fbe2afa48795bbb502530bc9ab5c4db842e10f
[ "BSD-2-Clause" ]
28
2019-12-02T11:41:14.000Z
2022-03-02T22:53:24.000Z
Ifc/IfcBase.py
gsimon75/IFC_parser
f9fbe2afa48795bbb502530bc9ab5c4db842e10f
[ "BSD-2-Clause" ]
4
2019-08-30T13:52:40.000Z
2022-02-02T02:31:31.000Z
Ifc/IfcBase.py
gsimon75/IFC_parser
f9fbe2afa48795bbb502530bc9ab5c4db842e10f
[ "BSD-2-Clause" ]
6
2020-07-11T22:35:07.000Z
2022-03-18T15:12:46.000Z
from Ifc.ClassRegistry import ifc_class, ifc_abstract_class, ifc_fallback_class class Omitted: """ Marked with '*' it states that some supertype had defined that attribute, but in the subtype it is a derived (calculated) value, so it no longer makes sense to explicitely assign value to it. """ # TODO: Haven't tried if it can be handled 'just as expected' # class-level, enough to reference, no need to create multiple instances (doesn't hurt though) omitted = Omitted() # vim: set sw=4 ts=4 et:
21.23913
132
0.624701
869e8784f6deaecfb703cc98502b159dc7530a96
5,330
py
Python
middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/utils/data_collector.py
xe1gyq/stx-utils
93b7f7dc2c6732db8c8ae0eb3f52ace4df714dc9
[ "Apache-2.0" ]
null
null
null
middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/utils/data_collector.py
xe1gyq/stx-utils
93b7f7dc2c6732db8c8ae0eb3f52ace4df714dc9
[ "Apache-2.0" ]
null
null
null
middleware/io-monitor/recipes-common/io-monitor/io-monitor/io_monitor/utils/data_collector.py
xe1gyq/stx-utils
93b7f7dc2c6732db8c8ae0eb3f52ace4df714dc9
[ "Apache-2.0" ]
null
null
null
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright (c) 2016 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # import logging import os from io_monitor.constants import DOMAIN from io_monitor.utils.data_window import DataCollectionWindow LOG = logging.getLogger(DOMAIN)
36.013514
77
0.630206
869e8ff896779ff36d9b024ced2d268e80c7682a
19,793
py
Python
examples/language-modeling/debias_lm_hps_tune.py
SoumyaBarikeri/transformers
996c6e113404000f50444287aa8a31a174ebd92f
[ "Apache-2.0" ]
1
2021-08-07T06:06:45.000Z
2021-08-07T06:06:45.000Z
examples/language-modeling/debias_lm_hps_tune.py
SoumyaBarikeri/transformers
996c6e113404000f50444287aa8a31a174ebd92f
[ "Apache-2.0" ]
null
null
null
examples/language-modeling/debias_lm_hps_tune.py
SoumyaBarikeri/transformers
996c6e113404000f50444287aa8a31a174ebd92f
[ "Apache-2.0" ]
2
2021-05-31T08:50:50.000Z
2022-01-26T13:14:58.000Z
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, CTRL, BERT, RoBERTa, XLNet). GPT, GPT-2 and CTRL are fine-tuned using a causal language modeling (CLM) loss. BERT and RoBERTa are fine-tuned using a masked language modeling (MLM) loss. XLNet is fine-tuned using a permutation language modeling (PLM) loss. """ import logging import math import os from dataclasses import dataclass, field from typing import Optional import torch from transformers.optimization import AdamW, get_linear_schedule_with_warmup from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, HfArgumentParser, # LineByLineTextDatasetLabels, LineByLineTextDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) import ray from ray import tune from transformers.file_utils import is_torch_tpu_available from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR from ray.tune.schedulers import PopulationBasedTraining from ray.tune import CLIReporter # if is_wandb_available(): # import wandb ray.shutdown() ray.init(log_to_driver=True, ignore_reinit_error=True) logger = logging.getLogger(__name__) MODEL_CONFIG_CLASSES = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def get_dataset( args: DataTrainingArguments, tokenizer: PreTrainedTokenizer, evaluate: bool = False, cache_dir: Optional[str] = None, ): file_path = args.eval_data_file if evaluate else args.train_data_file if args.line_by_line: return LineByLineTextDataset(tokenizer=tokenizer, file_path=file_path, block_size=args.block_size) # return LineByLineTextDatasetLabels(tokenizer=tokenizer, file_path=file_path, block_size=args.block_size) else: return TextDataset( tokenizer=tokenizer, file_path=file_path, block_size=args.block_size, overwrite_cache=args.overwrite_cache, cache_dir=cache_dir, ) class TuneTransformerTrainer(Trainer): def recover_checkpoint(tune_checkpoint_dir, model_name=None): if tune_checkpoint_dir is None or len(tune_checkpoint_dir) == 0: return model_name # Get subdirectory used for Huggingface. subdirs = [ os.path.join(tune_checkpoint_dir, name) for name in os.listdir(tune_checkpoint_dir) if os.path.isdir(os.path.join(tune_checkpoint_dir, name)) ] # There should only be 1 subdir. assert len(subdirs) == 1, subdirs return subdirs[0] # def train_transformer(config, checkpoint_dir=None): # train_dataset, eval_dataset = get_datasets(config) # # training_args = TrainingArguments( # output_dir=tune.get_trial_dir(), # learning_rate=config["learning_rate"], # do_train=True, # do_eval=True, # evaluate_during_training=True, # # Run eval after every epoch. # eval_steps=(len(train_dataset) // config["per_gpu_train_batch_size"]) + # 1, # # We explicitly set save to 0, and do checkpointing in evaluate instead # save_steps=0, # num_train_epochs=config["num_epochs"], # max_steps=config["max_steps"], # per_device_train_batch_size=config["per_gpu_train_batch_size"], # per_device_eval_batch_size=config["per_gpu_val_batch_size"], # warmup_steps=0, # weight_decay=config["weight_decay"], # logging_dir="./logs", # ) # # model_name_or_path = recover_checkpoint(checkpoint_dir, config["model_name"]) # # num_labels = glue_tasks_num_labels[config["task_name"]] # # config = AutoConfig.from_pretrained( # model_name_or_path, # num_labels=num_labels, # finetuning_task=task_name, # ) # model = AutoModelForSequenceClassification.from_pretrained( # model_name_or_path, # config=config, # ) # # # Use our modified TuneTransformerTrainer # tune_trainer = TuneTransformerTrainer( # model=model, # args=training_args, # train_dataset=train_dataset, # eval_dataset=eval_dataset, # compute_metrics=utils.build_compute_metrics_fn(task_name), # ) # tune_trainer.train(model_name_or_path) if __name__ == "__main__": parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) model_args, data_args, training_args = parser.parse_args_into_dataclasses() config = { # These 3 configs below were defined earlier "model_name": model_args.model_name_or_path, "task_name": "CLM", "data_dir": "", "per_gpu_val_batch_size": 32, "per_gpu_train_batch_size": tune.choice([16, 32, 64]), "learning_rate": tune.uniform(1e-5, 5e-5), "weight_decay": tune.uniform(0.0, 0.3), "num_epochs": tune.choice([2, 3, 4, 5]), "max_steps": -1, # We use num_epochs instead. "wandb": { "project": "pbt_transformers", "reinit": True, "allow_val_change": True } } logger.info(config) scheduler = PopulationBasedTraining( time_attr="training_iteration", metric="eval_loss", mode="min", perturbation_interval=2, hyperparam_mutations={ "weight_decay": lambda: tune.uniform(0.0, 0.3).func(None), "learning_rate": lambda: tune.uniform(1e-5, 5e-5).func(None), "per_gpu_train_batch_size": [16, 32, 64], }) reporter = CLIReporter( parameter_columns={ "weight_decay": "w_decay", "learning_rate": "lr", "per_gpu_train_batch_size": "train_bs/gpu", "num_epochs": "num_epochs" }, metric_columns=[ "eval_acc", "eval_loss", "epoch", "training_iteration" ]) analysis = tune.run( train_transformer, resources_per_trial={ "cpu": 1, "gpu": 1 }, config=config, num_samples=3, scheduler=scheduler, keep_checkpoints_num=3, checkpoint_score_attr="training_iteration", progress_reporter=reporter, local_dir="./ray_results/", name="tune_trans") best_config = analysis.get_best_config(metric="eval_loss", mode="min") print(best_config)
38.210425
175
0.664477
869ee02cc744c1a084a226d08c1391e0d7881239
1,373
py
Python
checksums.py
pgp/RootHelperClientTestInteractions
6b9e9cc9f10eb2bf9b9dafa851ed56005f7666b5
[ "Apache-2.0" ]
1
2019-05-04T12:29:41.000Z
2019-05-04T12:29:41.000Z
checksums.py
pgp/RootHelperClientTestInteractions
6b9e9cc9f10eb2bf9b9dafa851ed56005f7666b5
[ "Apache-2.0" ]
null
null
null
checksums.py
pgp/RootHelperClientTestInteractions
6b9e9cc9f10eb2bf9b9dafa851ed56005f7666b5
[ "Apache-2.0" ]
null
null
null
from net_common import * import struct import sys if __name__ == "__main__": sock = get_connected_local_socket() path = encodeString('/dev/shm/exampleDir') # path = encodeString('/dev/null') sock.sendall(bytearray(b'\x0A')) # HASH request # sock.sendall(bytearray(b'\x01')) # choose MD5 algorithm sock.sendall(bytearray(b'\x06')) # choose SHA3-224 algorithm sock.sendall(getDirHashOpts(withNames=True,ignoreUnixHiddenFiles=False)) # send dirHashOpts byte (unused for regular files) sock.sendall(struct.pack("@H", len(path))) # len of path as unsigned short sock.sendall(path) resp = sock.recv(1) # response first byte: \x00 OK or \xFF ERROR if resp != b'\x00': print("Error byte received, errno is:", struct.unpack("@i", sock.recv(4))[0]) sys.exit(0) # print(toHex(sock.recv(16))) # 128 bit (16 byte) md5 digest size print(toHex(sock.recv(28))) # 224 bit (28 byte) sha3-224 digest size sock.close()
40.382353
128
0.627822
869f105dabe3ba66d48901d5ab1ef89fe7052f2e
624
py
Python
investment_report/migrations/0020_auto_20180911_1005.py
uktrade/pir-api
79747ceab042c42c287e2b7471f6dade70f68693
[ "MIT" ]
1
2021-02-02T19:08:55.000Z
2021-02-02T19:08:55.000Z
investment_report/migrations/0020_auto_20180911_1005.py
uktrade/invest-pir-api
be56efddf9dfdf81c8557441a9a54d9a4dd4bab1
[ "MIT" ]
21
2018-07-10T10:20:47.000Z
2022-03-24T09:36:29.000Z
investment_report/migrations/0020_auto_20180911_1005.py
uktrade/pir-api
79747ceab042c42c287e2b7471f6dade70f68693
[ "MIT" ]
1
2021-02-04T11:28:37.000Z
2021-02-04T11:28:37.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.11.13 on 2018-09-11 10:05 from __future__ import unicode_literals import config.s3 from django.db import migrations, models
27.130435
155
0.653846
86a0198d73981242105ec1cd9a6582cfdecf35e4
317
py
Python
tests/test-scripts/threadpools.py
whalesalad/filprofiler
9c12cbe62ad1fed5d59d923013739bb3377bc24c
[ "Apache-2.0" ]
521
2020-06-18T14:27:22.000Z
2022-03-30T08:29:10.000Z
tests/test-scripts/threadpools.py
whalesalad/filprofiler
9c12cbe62ad1fed5d59d923013739bb3377bc24c
[ "Apache-2.0" ]
277
2020-06-18T14:11:36.000Z
2022-03-28T09:51:11.000Z
tests/test-scripts/threadpools.py
sthagen/python-filprofiler
5efe9fcca539d31f5423bbfa39a3f49176925350
[ "Apache-2.0" ]
17
2020-06-26T05:29:43.000Z
2022-03-28T09:57:08.000Z
"""Validate that number of threads in thread pools is set to 1.""" import numexpr import blosc import threadpoolctl # APIs that return previous number of threads: assert numexpr.set_num_threads(2) == 1 assert blosc.set_nthreads(2) == 1 for d in threadpoolctl.threadpool_info(): assert d["num_threads"] == 1, d
24.384615
66
0.747634
86a15534f296338602eb17c7dad23025e0241a4e
3,208
py
Python
scripts/viewStokespat.py
David-McKenna/AntPat
45618659994b27e2654f1effd6d9baa15867b6d3
[ "ISC" ]
5
2016-06-21T14:54:23.000Z
2021-04-06T06:23:25.000Z
scripts/viewStokespat.py
David-McKenna/AntPat
45618659994b27e2654f1effd6d9baa15867b6d3
[ "ISC" ]
null
null
null
scripts/viewStokespat.py
David-McKenna/AntPat
45618659994b27e2654f1effd6d9baa15867b6d3
[ "ISC" ]
2
2019-10-25T03:16:06.000Z
2020-11-15T14:18:46.000Z
#!/usr/bin/env python """A simple viewer for Stokes patterns based on two far-field pattern files. (Possibly based on one FF pattern files if it has two requests: one for each polarization channel.)""" import os import argparse import numpy import matplotlib.pyplot as plt from antpat.reps.sphgridfun.tvecfun import TVecFields from antpat.radfarfield import RadFarField from antpat.dualpolelem import DualPolElem FEKOsuffix = 'ffe' GRASPsuffix = 'swe' NECsuffix = 'out' def Jones2Stokes(Jones): """Convert Jones matrix to Stokes vector. This assumes dual-pol antenna receiving unpolarized unit valued radiation i.e. incoming Stokes = (1,0,0,0).""" brightmat = numpy.matmul(Jones, numpy.swapaxes(numpy.conjugate(Jones),-1,-2)) StokesI = numpy.real(brightmat[...,0,0]+brightmat[...,1,1]) StokesQ = numpy.real(brightmat[...,0,0]-brightmat[...,1,1]) StokesU = numpy.real(brightmat[...,0,1]+brightmat[...,1,0]) StokesV = numpy.imag(brightmat[...,0,1]-brightmat[...,1,0]) return StokesI, StokesQ, StokesU, StokesV if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("p_chan_file", help='Filename of polarization channel p') parser.add_argument("q_chan_file", help='Filename of polarization channel p') parser.add_argument("freq", nargs='?', type=float, help="Frequency in Hertz") args = parser.parse_args() if args.p_chan_file.endswith(FEKOsuffix): plotStokes_fromFEKOfiles(args.p_chan_file, args.q_chan_file, args.freq) elif args.p_chan_file.endswith(GRASPsuffix): print("Not implemented yet.") elif args.p_chan_file.endswith(NECsuffix): print("Not implemented yet.") else: print("Far-field pattern file type not known") exit(1)
36.044944
102
0.674564
86a15d2cf1ab721951e4abf4f4b561d571ed4d1c
2,141
py
Python
utils.py
lingjiao10/Facial-Expression-Recognition.Pytorch
f5ba0e527347af3778d44eb7045e4970d01641a6
[ "MIT" ]
null
null
null
utils.py
lingjiao10/Facial-Expression-Recognition.Pytorch
f5ba0e527347af3778d44eb7045e4970d01641a6
[ "MIT" ]
null
null
null
utils.py
lingjiao10/Facial-Expression-Recognition.Pytorch
f5ba0e527347af3778d44eb7045e4970d01641a6
[ "MIT" ]
1
2019-10-02T02:26:39.000Z
2019-10-02T02:26:39.000Z
'''Some helper functions for PyTorch, including: - progress_bar: progress bar mimic xlua.progress. - set_lr : set the learning rate - clip_gradient : clip gradient ''' import os import sys import time import math import torch import torch.nn as nn import torch.nn.init as init from torch.autograd import Function # if sys.platform == 'win32': term_width = 80 else: print('###', os.popen('stty size', 'r').read()) _, term_width = os.popen('stty size', 'r').read().split() term_width = int(term_width) TOTAL_BAR_LENGTH = 30. last_time = time.time() begin_time = last_time #[==>........ 19/225 ...........] | Loss: 1.961 | Acc: 22.000% (537/2432)
27.101266
76
0.604858
86a1bd490fa794c86a7ba173a9dce9709f3eb600
2,236
py
Python
string-method/src/analysis/FE_analysis/index_converter.py
delemottelab/gpcr-string-method-2019
b50786a4a8747d56ad04ede525592eb31f1890fd
[ "MIT" ]
null
null
null
string-method/src/analysis/FE_analysis/index_converter.py
delemottelab/gpcr-string-method-2019
b50786a4a8747d56ad04ede525592eb31f1890fd
[ "MIT" ]
null
null
null
string-method/src/analysis/FE_analysis/index_converter.py
delemottelab/gpcr-string-method-2019
b50786a4a8747d56ad04ede525592eb31f1890fd
[ "MIT" ]
3
2020-03-16T04:33:50.000Z
2021-03-19T17:25:59.000Z
from __future__ import absolute_import, division, print_function import logging import sys logging.basicConfig( stream=sys.stdout, level=logging.DEBUG, format='%(asctime)s %(name)s-%(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S') import numpy as np import utils logger = logging.getLogger("indexconverter")
38.551724
116
0.609123
86a2581b9feb29958228bc644f88e652dbe7a0fa
38
py
Python
Ex029 Aula 11-Cores no Terminal.py
andersontmachado/ExerciciosPython
ebd93eb4127dadedee8b719ccc4bc20fc151d0ad
[ "MIT" ]
1
2020-04-30T14:47:15.000Z
2020-04-30T14:47:15.000Z
Ex029 Aula 11-Cores no Terminal.py
andersontmachado/exerciciospython
ebd93eb4127dadedee8b719ccc4bc20fc151d0ad
[ "MIT" ]
null
null
null
Ex029 Aula 11-Cores no Terminal.py
andersontmachado/exerciciospython
ebd93eb4127dadedee8b719ccc4bc20fc151d0ad
[ "MIT" ]
null
null
null
print('\033[7;30mOla mundo\033[m!!!')
19
37
0.631579
86a37502649b0fcb2349b60e7e2d86e82dd233f5
12,050
py
Python
cirq-pasqal/cirq_pasqal/pasqal_device.py
pavoljuhas/Cirq
b6d6577be61d216ce2f29f8c64ae5879cf3087d5
[ "Apache-2.0" ]
1
2022-02-05T22:17:39.000Z
2022-02-05T22:17:39.000Z
cirq-pasqal/cirq_pasqal/pasqal_device.py
pavoljuhas/Cirq
b6d6577be61d216ce2f29f8c64ae5879cf3087d5
[ "Apache-2.0" ]
null
null
null
cirq-pasqal/cirq_pasqal/pasqal_device.py
pavoljuhas/Cirq
b6d6577be61d216ce2f29f8c64ae5879cf3087d5
[ "Apache-2.0" ]
null
null
null
# Copyright 2020 The Cirq Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import FrozenSet, Callable, List, Sequence, Any, Union, Dict import numpy as np import networkx as nx import cirq from cirq import _compat, GridQubit, LineQubit from cirq.ops import NamedQubit from cirq_pasqal import ThreeDQubit, TwoDQubit, PasqalGateset
37.42236
99
0.630954
86a3efacb490990d88c7dfa47acc3b8f0d98c63a
22,798
py
Python
command_line/show.py
huwjenkins/dials
885a2f6ea3900dd0c9fcc15c03561fb45452c3bb
[ "BSD-3-Clause" ]
null
null
null
command_line/show.py
huwjenkins/dials
885a2f6ea3900dd0c9fcc15c03561fb45452c3bb
[ "BSD-3-Clause" ]
1
2019-06-03T16:09:12.000Z
2019-06-04T12:47:20.000Z
command_line/show.py
rjgildea/dials
0665a385d644bbef7541fb2d33c7a3c5a748e2b4
[ "BSD-3-Clause" ]
null
null
null
import os import sys import numpy as np import iotbx.phil from cctbx import uctbx from dxtbx.model.experiment_list import ExperimentListFactory from scitbx.math import five_number_summary import dials.util from dials.array_family import flex from dials.util import Sorry, tabulate help_message = """ Examples:: dials.show models.expt dials.show image_*.cbf dials.show observations.refl """ phil_scope = iotbx.phil.parse( """\ show_scan_varying = False .type = bool .help = "Whether or not to show the crystal at each scan point." show_shared_models = False .type = bool .help = "Show which models are linked to which experiments" show_all_reflection_data = False .type = bool .help = "Whether or not to print individual reflections" show_intensities = False .type = bool show_centroids = False .type = bool show_profile_fit = False .type = bool show_flags = False .type = bool .help = "Show a summary table of reflection flags" show_identifiers = False .type = bool .help = "Show experiment identifiers map if set" image_statistics{ show_corrected = False .type = bool .help = "Show statistics on the distribution of values in each corrected image" show_raw = False .type = bool .help = "Show statistics on the distribution of values in each raw image" } max_reflections = None .type = int .help = "Limit the number of reflections in the output." """, process_includes=True, ) def _create_flag_count_table(table): """Generate a summary table of flag values in a reflection table. :param table: A reflection table :returns: A string of the formatted flags table """ # Calculate the counts of entries that match each flag numpy_flags = table["flags"].as_numpy_array() flag_count = { flag: np.sum(numpy_flags & value != 0) for value, flag in table.flags.values.items() } # Work out the numeric-value order of the flags flag_order = sorted(table.flags.values.values(), key=lambda x: x.real) # Build the actual table flag_rows = [["Flag", "Count", "%"]] max_count_len = max(5, len(str(max(flag_count.values())))) last_flag = None for flag in flag_order: indent = "" # As a hint for reading, indent any 'summary' flags. # A summary flag is any flag which overlaps with the previous one. if last_flag and (last_flag.real & flag.real): indent = " " last_flag = flag # Add the row to the table we're building flag_rows.append( [ indent + flag.name, "{:{:d}d}".format(flag_count[flag], max_count_len), f"{100 * flag_count[flag] / len(table):5.01f}", ] ) # Build the array of output strings text = [] text.append("Reflection flags:") text.append(tabulate(flag_rows, headers="firstrow")) return "\n".join(text) if __name__ == "__main__": run()
32.992764
88
0.530485
86a477c71ec5eb0f689387ca230eaa223627c82b
8,749
py
Python
app/config/env_jesa.py
OuissalTAIM/jenkins
7ea5bcdeb6c0bb3cc14c2826a68e4f521de163c1
[ "BSD-1-Clause" ]
null
null
null
app/config/env_jesa.py
OuissalTAIM/jenkins
7ea5bcdeb6c0bb3cc14c2826a68e4f521de163c1
[ "BSD-1-Clause" ]
6
2021-02-02T22:52:41.000Z
2022-03-12T00:37:30.000Z
app/config/env_jesa.py
OuissalTAIM/jenkins
7ea5bcdeb6c0bb3cc14c2826a68e4f521de163c1
[ "BSD-1-Clause" ]
null
null
null
# -*- coding: utf-8 -*- from enum import Enum, IntEnum, unique import os APP_NAME = "mine2farm" NETWORK_NAME = "CenterAxis" LOG_LEVEL_CONSOLE = "WARNING" LOG_LEVEL_FILE = "INFO" APP_FOLDER = os.getenv("JESA_MINE2FARM_HOME", "C:/GitRepos/mine2farm/") LOG_FOLDER = APP_FOLDER + "app/log/" LOG_FILE = "%(asctime)_" + APP_NAME + ".log" OUTPUT_FOLDER = "%s%s" % (APP_FOLDER, "outputs/") CANVAS_URL = "http://127.0.0.1/canvas.xlsm" # DB DB_NAME = None DB_HOST = "172.29.161.208" DB_PORT = 5006 DATA_SERVICE_ADD = "172.29.161.208" DATA_SERVICE_PORT = 5001 # Results DB_RESULT_NAME = "%s_results" % DB_NAME if DB_NAME is not None else None DB_DETAILED_RESULT_COLLECTION_NAME = "detailed" DB_GLOBAL_RESULT_COLLECTION_NAME = "global" DB_GLOBAL_BEST_RESULT_COLLECTION_NAME = "global_best" DB_DETAILED_BEST_RESULT_COLLECTION_NAME = "detailed_best" DB_SENSITIVITY_COLLECTION_NAME = "sensitivity" RESULT_BATCHES_SIZE = 25 HEAD_DATA_BITS = 17 DB_NAME_BITS = 20 RANDOMIZE_RESULTS = False # RabbitMQ RABBITMQ_SERVER = "localhost" RABBITMQ_SIMULATOR_QUEUE_NAME = "SIMULATE" RABBITMQ_CYCLE = 3 RABBITMQ_DETAILED_RESULT_QUEUE_NAME = "SAVE_DETAIL" RABBITMQ_GLOBAL_RESULT_QUEUE_NAME = "SAVE_GLOBAL" RABBITMQ_MAX_WORKER = RABBITMQ_CYCLE RABBITMQ_PATH = "C:\\Program Files\\RabbitMQ Server\\rabbitmq_server-3.8.1\\sbin" # Memcached MEMCACHED_SERVER = 'localhost' MEMCACHED_PORT = 11211 # Dashboard DB_LOAD_FROM_SERVICE = True # Monitoring MONITORING_APP_NAME = "mine2farm_monitor" MONITORING_SERVER = "172.29.161.208" MONITORING_PORT = 5002 MONITORING_DB_NAME = "task_history" MONITORING_COLLECTION_HISTORY_NAME = "task" MONITORING_COLLECTION_HISTORY_BEST_NAME = "best_scenarios_history" MONITORING_STEP = 1 MONITORING_NB_PAGE = 10 # Mongodb-bi MONGODB_BI_PATH = "C:\\Program Files\\MongoDB\\Connector for BI\\2.13\\bin" # Mongodb MONGO_SERVER_PATH = "C:\\Program Files\\MongoDB\\Server\\4.0\\bin" # params LOGISTICS_LP = False MODE_DEBUG = False GRANUL_RELAX = False # Model MONIKER_SEPARATOR = "/" WACC = 0.1 T0 = 2020 TMAX = 2031 PIPELINE_SCHEMA = { PipelineLayer.COMMON: { "type": PipelineType.COMMON, "dico": ["location", "opex", "unit", "currency", "output", "names", "products"] }, PipelineLayer.MINE: { "type": PipelineType.PRODUCER, "dico": ["mine.name", "mine.extraction", "mine.quality", "mine.capex"], "options": "mining_options", "production": "mining_specific_production", "opex": "mining_opex___specific_consumptions", "capex": "mining_capex", "priority_mines": "prioritymines" }, PipelineLayer.BENEFICIATION: { "type": PipelineType.PRODUCER, "dico": ["beneficiation.name", "beneficitation.process", "beneficitation.quality", "beneficitation.capex"], "options": "beneficiation_options", "production": "beneficiation_production", "opex": "beneficiation_opex___specific_consumptions", "capex": "beneficiation_capex" }, PipelineLayer.SAP: { "type": PipelineType.PRODUCER, "dico": ["sap.name", "sap.process", "sap.product", "sap.capex", "sap.capacity[kt]"], "options": "sap___power_plant_options", "production": "sap___power_plant_production", "opex": "sap___power_plant_opex___specific_consumptions", "capex": "sap___power_plant_capex", "product_type": "sap.product" }, PipelineLayer.PAP: { "type": PipelineType.PRODUCER, "dico": ["pap.name", "pap.process", "pap.product", "pap.capex", "pap.size[kt]", "pap.input"], "options": "pap_options", "production": "pap_production", "opex": "pap_opex___specific_consumptions", "capex": "pap_capex", "product_type": "pap.product" }, PipelineLayer.GRANULATION: { "type": PipelineType.PRODUCER, "dico": ["granulation.name", "granulation.process", "granulation.product", "granulation.capex", "granulation.input"], "options": "granulation_options", "production": "granulation_production", "opex": "granulation_opex", "capex": "granulation_capex" }, PipelineLayer.LOGISTICS: { "type": PipelineType.TRANSPORT, "dico": ["logistics.name", "logistics.process", "logistics.product", "logistics.capex"], "options": "logistics_options", "production": None, "opex": "logistics_opex", "capex": "logistics_capex" }, PipelineLayer.RAW_MATERIALS: { "type": PipelineType.PRICE, "data": "raw_materials" }, PipelineLayer.SALES_PLAN: { "type": PipelineType.SALES, "data": "sales_plan" }, PipelineLayer.UNIT_CONVERSION_MATRIX: { "type": PipelineType.COMMON, "data": "conv_matrix" }, } SUPPLY_CHAIN = "mine2port" DEPARTURE_ARRIVAL = {SUPPLY_CHAIN: (PipelineLayer.MINE), "sap2pap": (PipelineLayer.SAP, PipelineLayer.PAP)} COMBO_NODES = { PipelineLayer.MINE_BENEFICIATION: { "url": "mining_wp_connections", "upstream_layer": PipelineLayer.MINE, "downstream_layer": PipelineLayer.BENEFICIATION } } COMBO_NODES_SEPARATION = "--" SCENARIO_GEN_TYPE = ScenarioGeneratorType.FROM_OPTIONS PIPELINE_METADATA = { PipelineLayer.MINE: { "type": PipelineType.PRODUCER, "production": ["Name", "Extraction", "Quality", "Unit"], "opex": ["Name", "Extraction", "Capacity", "Item", "Unit"], "capex": ["Name", "Extraction", "Capacity", "Item", "Unit", "CAPEX"] }, PipelineLayer.BENEFICIATION: { "type": PipelineType.PRODUCER, "production": ["Process", "InputQuality", "OutputQuality", "Humidity", "Unit"], "opex": ["Process", "InputQuality", "OutputQuality", "Item", "Unit"], "capex": ["Name", "Process", "Capacity", "Item", "Unit", "CAPEX"] }, PipelineLayer.SAP: { "type": PipelineType.PRODUCER, "production": ["Location", "Process", "Product", "Unit"], "opex": ["Location", "Process", "Item", "Unit"], "capex": ["Location", "Process", "Capacity", "Item", "Unit", "CAPEX"] }, PipelineLayer.PAP: { "type": PipelineType.PRODUCER, "production": ["Process", "Input", "Product", "Unit"], "opex": ["Location", "Process", "Capacity", "Input", "Item", "Product", "Unit"], "capex": ["Location", "Process", "Capacity", "Item", "Unit", "CAPEX"] }, PipelineLayer.GRANULATION: { "type": PipelineType.PRODUCER, "production": ["Process", "Input", "Product", "Unit"], "opex": ["Location", "ProductionSite", "Process", "Capacity", "Product", "Item", "Unit"], "capex": ["Location", "ProductionSite", "Product", "Process", "Capacity", "Item", "Unit", "CAPEX"] }, PipelineLayer.LOGISTICS: { "type": PipelineType.TRANSPORT, "opex": ["Upstream", "Downstream", "Method", "Product", "Capacity", "Item", "Unit"], "capex": ["Upstream", "Downstream", "Method", "Product", "Capacity", "Item", "Unit", "CAPEX"] }, PipelineLayer.RAW_MATERIALS: { "type": PipelineType.PRICE, "columns": ["Item", "Unit"] }, PipelineLayer.SALES_PLAN: { "type": PipelineType.PRICE, "columns": ["Type", "Product", "Unit"] }, PipelineLayer.UNIT_CONVERSION_MATRIX: { "type": PipelineType.COMMON, "columns": ["Initial Unit", "Uniform Unit", "Conversion Rate"] }, } SHUFFLE_LEVELS = { PipelineLayer.MINE: ShuffleLevel.UNDEFINED, PipelineLayer.BENEFICIATION: ShuffleLevel.UNDEFINED, PipelineLayer.SAP: ShuffleLevel.SHUFFLE_WITH_UNNAMED, PipelineLayer.PAP: ShuffleLevel.SHUFFLE_WITH_UNNAMED, PipelineLayer.GRANULATION: ShuffleLevel.UNDEFINED, PipelineLayer.LOGISTICS: ShuffleLevel.UNDEFINED, PipelineLayer.MINE_BENEFICIATION: ShuffleLevel.UNDEFINED }
29.758503
125
0.65356
86a51a9e777416f598dec47e504f16a9b1fa7744
433
py
Python
myFirstApp/travello/models.py
cankush625/Django
a3e874a69fbf34bf9123a7d60697a2449c7591c6
[ "MIT" ]
null
null
null
myFirstApp/travello/models.py
cankush625/Django
a3e874a69fbf34bf9123a7d60697a2449c7591c6
[ "MIT" ]
10
2020-02-12T03:08:48.000Z
2022-02-10T11:27:50.000Z
myFirstApp/travello/models.py
cankush625/Django
a3e874a69fbf34bf9123a7d60697a2449c7591c6
[ "MIT" ]
null
null
null
from django.db import models # Create your models here.
22.789474
48
0.637413
86a5619ddeca5e16cc4b5d0ebb8500be1708f077
6,001
py
Python
app/app.py
Moustique-bot/hands-on-2021
fd023f0a431f72ef2c48e3a469be42e2de9e2957
[ "MIT" ]
null
null
null
app/app.py
Moustique-bot/hands-on-2021
fd023f0a431f72ef2c48e3a469be42e2de9e2957
[ "MIT" ]
null
null
null
app/app.py
Moustique-bot/hands-on-2021
fd023f0a431f72ef2c48e3a469be42e2de9e2957
[ "MIT" ]
null
null
null
import base64 import io import dash import dash_core_components as dcc import dash_html_components as html import dash_bootstrap_components as dbc from dash.dependencies import Input, Output import numpy as np import tensorflow as tf from PIL import Image from constants import CLASSES import yaml with open('app.yaml') as yaml_data : params = yaml.safe_load(yaml_data) IMAGE_WIDTH = params['IMAGE_WIDTH'] IMAGE_HEIGHT = params['IMAGE_HEIGHT'] PATH_MODEL = params['PATH_MODEL'] # Load DNN model classifier = tf.keras.models.load_model(PATH_MODEL) def classify_image(image, model, image_box=None): """Classify image by model Parameters ---------- content: image content model: tf/keras classifier Returns ------- class id returned by model classifier """ images_list = [] image = image.resize((IMAGE_WIDTH, IMAGE_HEIGHT), box=image_box) # box argument clips image to (x1, y1, x2, y2) image = np.array(image) images_list.append(image) return model.predict_classes(np.array(images_list)) app = dash.Dash('Traffic Signs Recognition', external_stylesheets=[dbc.themes.BOOTSTRAP]) pre_style = { 'whiteSpace': 'pre-wrap', 'wordBreak': 'break-all', 'whiteSpace': 'normal' } # Define application layout navbar = dbc.NavbarSimple( children=[ dbc.DropdownMenu( children=[ dbc.DropdownMenuItem('Rseau de Neurones', header=True), dbc.DropdownMenuItem('SVM', href="#"), ], nav=True, in_navbar=True, label='Modle', ), ], brand="Menu", brand_href="#", color= "#d90054", dark=True ) cards = html.Div( [ dbc.Card( dbc.CardBody( [ html.H5("Prsentation", className="card-title"), html.P( [ 'Cette application pour but de raliser des modles capables de classer des panneaux de signalisation allemand partir d\'une image. L\'application fonctionne de la manire suivante : vous dposer une image l\'emplacement indiqu et la prdiction du modle apparait immdiatement en dessous. En haut droite vous pouvez slectionner le modle que vous voulez tester.', ], className='card-text', ), ] ), className='w-75 mb-3', color='#f1cbd1', outline='Black', style={ 'margin-top': '75px', 'margin-left': '185px'}, ), ] ) app.layout = html.Div([ html.Div([navbar]), html.Div(cards), dcc.Upload( id='bouton-chargement', children=html.Div([ 'Cliquer-dposer ou ', html.A('slectionner une image') ]), style={ 'width': '50%', 'height': '60px', 'lineHeight': '60px', 'borderWidth': '1px', 'borderStyle': 'dashed', 'borderRadius': '5px', 'textAlign': 'center', 'margin-top': '75px', 'margin-left': '370px', } ), html.Div(id='mon-image'), html.Div(id='ma-zone-resultat') ]) # Manage interactions with callbacks # Start the application if __name__ == '__main__': app.run_server(debug=True)
31.920213
402
0.551408
86a57ddfcf5854e170f6cff9e4deb86cb8f9d464
1,214
py
Python
books/rakutenapi.py
NobukoYano/LibraryApp
623f60614f15ab760e1c0d2f18954ce948f2d2a3
[ "MIT" ]
1
2019-04-27T11:18:42.000Z
2019-04-27T11:18:42.000Z
books/rakutenapi.py
NobukoYano/LibrayApp
623f60614f15ab760e1c0d2f18954ce948f2d2a3
[ "MIT" ]
11
2020-02-12T00:11:23.000Z
2022-02-10T07:59:24.000Z
books/rakutenapi.py
NobukoYano/LibrayApp
623f60614f15ab760e1c0d2f18954ce948f2d2a3
[ "MIT" ]
null
null
null
import json import requests from django.conf import settings
27.590909
74
0.559308
86a6435f792c7fc0f926b7e3c4e14710ea4c902b
26
py
Python
Random-Programs/optimization/root/v4.py
naumoff0/Archive
d4ad2da89abb1576dd5a7c72ded6bf9b45c3f610
[ "MIT" ]
null
null
null
Random-Programs/optimization/root/v4.py
naumoff0/Archive
d4ad2da89abb1576dd5a7c72ded6bf9b45c3f610
[ "MIT" ]
null
null
null
Random-Programs/optimization/root/v4.py
naumoff0/Archive
d4ad2da89abb1576dd5a7c72ded6bf9b45c3f610
[ "MIT" ]
null
null
null
print(int(input(""))**0.5)
26
26
0.576923
86a699aa985f4eb39369d4b317e19a2eb2706a0b
18,710
py
Python
sdk/authorization/azure-mgmt-authorization/azure/mgmt/authorization/v2018_01_01_preview/models/_models_py3.py
rsdoherty/azure-sdk-for-python
6bba5326677468e6660845a703686327178bb7b1
[ "MIT" ]
2,728
2015-01-09T10:19:32.000Z
2022-03-31T14:50:33.000Z
sdk/authorization/azure-mgmt-authorization/azure/mgmt/authorization/v2018_01_01_preview/models/_models_py3.py
rsdoherty/azure-sdk-for-python
6bba5326677468e6660845a703686327178bb7b1
[ "MIT" ]
17,773
2015-01-05T15:57:17.000Z
2022-03-31T23:50:25.000Z
sdk/authorization/azure-mgmt-authorization/azure/mgmt/authorization/v2018_01_01_preview/models/_models_py3.py
rsdoherty/azure-sdk-for-python
6bba5326677468e6660845a703686327178bb7b1
[ "MIT" ]
1,916
2015-01-19T05:05:41.000Z
2022-03-31T19:36:44.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, List, Optional from azure.core.exceptions import HttpResponseError import msrest.serialization
32.147766
165
0.615446
86a6e60b85eb87efd2531834b58c525dde29390d
21,322
py
Python
school/views.py
pa-one-patel/college_managenment
be6f6dcac1f7e01f71d95f445e2118e8eec3fe3a
[ "MIT" ]
1
2021-04-11T12:05:44.000Z
2021-04-11T12:05:44.000Z
school/views.py
aliffauzi/schoolmanagement
6a4477af01df148404d1ff2941f74accb5717b09
[ "MIT" ]
6
2021-03-19T04:10:49.000Z
2021-09-22T19:06:14.000Z
school/views.py
aliffauzi/schoolmanagement
6a4477af01df148404d1ff2941f74accb5717b09
[ "MIT" ]
1
2021-04-11T12:07:08.000Z
2021-04-11T12:07:08.000Z
from django.shortcuts import render,redirect,reverse from . import forms,models from django.db.models import Sum from django.contrib.auth.models import Group from django.http import HttpResponseRedirect from django.contrib.auth.decorators import login_required,user_passes_test #for showing signup/login button for teacher(by sumit) #for showing signup/login button for teacher(by sumit) #for showing signup/login button for student(by sumit) #for checking user is techer , student or admin(by sumit) #for dashboard of adminnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn(by sumit) #for teacher sectionnnnnnnn by adminnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn(by sumit) #for student by adminnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn(by sumit) #attendance related viewwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww(by sumit) #fee related view by adminnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn(by sumit) #notice related viewsssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss(by sumit) #for TEACHER LOGIN SECTIONNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN(by sumit) #FOR STUDENT AFTER THEIR Loginnnnnnnnnnnnnnnnnnnnn(by sumit) # for aboutus and contact ussssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss (by sumit) def aboutus_view(request): return render(request,'school/aboutus.html') def contactus_view(request): sub = forms.ContactusForm() if request.method == 'POST': sub = forms.ContactusForm(request.POST) if sub.is_valid(): email = sub.cleaned_data['Email'] name=sub.cleaned_data['Name'] message = sub.cleaned_data['Message'] send_mail(str(name)+' || '+str(email),message, EMAIL_HOST_USER, ['[email protected]'], fail_silently = False) return render(request, 'school/contactussuccess.html') return render(request, 'school/contactus.html', {'form':sub})
33.211838
123
0.710909
86a72a80401e7713121d2f9ca2a2d2dc62069b97
16,684
py
Python
PaddleCV/tracking/pytracking/features/deep.py
weiwei1115/models
e2c96c5f64b1dc8f0d5d9aa121300b87150e11e3
[ "Apache-2.0" ]
2
2021-05-15T07:35:04.000Z
2021-07-15T07:01:13.000Z
PaddleCV/tracking/pytracking/features/deep.py
weiwei1115/models
e2c96c5f64b1dc8f0d5d9aa121300b87150e11e3
[ "Apache-2.0" ]
null
null
null
PaddleCV/tracking/pytracking/features/deep.py
weiwei1115/models
e2c96c5f64b1dc8f0d5d9aa121300b87150e11e3
[ "Apache-2.0" ]
4
2021-08-11T08:25:10.000Z
2021-10-16T07:41:59.000Z
import os import numpy as np from paddle import fluid from ltr.models.bbreg.atom import atom_resnet50, atom_resnet18 from ltr.models.siamese.siam import siamfc_alexnet from ltr.models.siam.siam import SiamRPN_AlexNet, SiamMask_ResNet50_sharp, SiamMask_ResNet50_base from pytracking.admin.environment import env_settings from pytracking.features.featurebase import MultiFeatureBase from pytracking.libs import TensorList from pytracking.libs.paddle_utils import n2p
33.705051
98
0.552805
86a7a933257c5b58ca131b6e09db3e5af93d5f4e
19,069
py
Python
netesto/local/psPlot.py
fakeNetflix/facebook-repo-fbkutils
16ec0c024322c163e7dbe691812ba8fdf5b511ad
[ "BSD-3-Clause" ]
346
2016-04-08T17:04:29.000Z
2021-09-30T06:05:47.000Z
netesto/local/psPlot.py
fakeNetflix/facebook-repo-fbkutils
16ec0c024322c163e7dbe691812ba8fdf5b511ad
[ "BSD-3-Clause" ]
38
2016-04-26T14:58:17.000Z
2021-10-07T20:43:39.000Z
netesto/local/psPlot.py
fakeNetflix/facebook-repo-fbkutils
16ec0c024322c163e7dbe691812ba8fdf5b511ad
[ "BSD-3-Clause" ]
76
2016-04-08T17:59:23.000Z
2021-09-05T13:18:27.000Z
#!/usr/bin/env python2 import sys import random import os.path import shutil import commands import types import math #gsPath = '/usr/local/bin/gs' gsPath = 'gs' logFile = '/dev/null' #logFile = 'plot.log' #--- class PsPlot(fname, pageHeader, pageSubHeader, plotsPerPage) # #--- Main # def main(): tMin = 0 tMax = 100000 stateList = [0,1,2,2,3,3,3,3,4] fname = 'sched.txt' if len(sys.argv) == 2: fname = sys.argv[1] elif len(sys.argv) == 3: tMin = int(sys.argv[1]) tMax = int(sys.argv[2]) elif len(sys.argv) == 4: tMin = int(sys.argv[1]) tMax = int(sys.argv[2]) fname = sys.argv[3] elif len(sys.argv) != 1: print 'USAGE: psPlot.py [tMin tMax] [fname]' sys.exit(1) print 'tMin,tMax: ', tMin, tMax, 'fname: ', fname p = PsPlot('./p', 'Header', 'SubHeader', 1) fromStateList = [] toStateList = [] time1List = [] time2List = [] indx = 0 oldTime = 0 fin = open(fname, 'r') for inputLine in fin: inputLine = inputLine.replace(' ','') inputLine = inputLine.replace("'", '') i1 = inputLine.find('(') i2 = inputLine.find(')') inputList = inputLine[i1+1:i2-1].split(',') s1 = stateList[int(inputList[0])] s2 = stateList[int(inputList[1])] t = int(inputList[2]) if indx != 0 and t >= tMin and t <= tMax: fromStateList.append(s1) toStateList.append(s2) time1List.append(oldTime) time2List.append(t) oldTime = t indx += 1 p.SetPlot(tMin, tMax, 0, 0, 2, 0, 'Time', 'Socket/State', 'Chavey\'s Plot') state = 0 while state <= 4: t1List = [] t2List = [] sList = [] indx = 0 for s in toStateList: if s == state: t1List.append(time1List[indx]) t2List.append(time2List[indx]) sList.append(0.10 + s*0.20) indx += 1 p.PlotData(1,t1List, t2List, sList, 'Test', '0.1 in 0 '+p.SetColor(p.colors[state])+' plotWbarsC', sys.stdout) state += 1 image = p.GetImage(sys.stdout) print 'Image file: ', image p.End() if __name__ == "__main__": main()
31.260656
147
0.457968
86a7e9fe107833a210f5b3b41b68cc42c51f48ee
402
py
Python
physio2go/exercises/migrations/0003_auto_20161128_1753.py
hamole/physio2go
ebd14c9406e2b6818dc649e4863a734bf812e9b0
[ "MIT" ]
null
null
null
physio2go/exercises/migrations/0003_auto_20161128_1753.py
hamole/physio2go
ebd14c9406e2b6818dc649e4863a734bf812e9b0
[ "MIT" ]
null
null
null
physio2go/exercises/migrations/0003_auto_20161128_1753.py
hamole/physio2go
ebd14c9406e2b6818dc649e4863a734bf812e9b0
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by Django 1.10.3 on 2016-11-28 06:53 from __future__ import unicode_literals from django.db import migrations
20.1
49
0.621891
86a8e1ed877d30bb9fe2c31cbcb8f214021f1ba6
2,006
py
Python
setup.py
pasinskim/mender-python-client
d6f3dc86ec46b0b249a112c5037bea579266e649
[ "Apache-2.0" ]
null
null
null
setup.py
pasinskim/mender-python-client
d6f3dc86ec46b0b249a112c5037bea579266e649
[ "Apache-2.0" ]
71
2020-12-21T05:08:13.000Z
2022-01-31T02:04:26.000Z
setup.py
pasinskim/mender-python-client
d6f3dc86ec46b0b249a112c5037bea579266e649
[ "Apache-2.0" ]
11
2020-12-02T14:46:58.000Z
2021-12-02T06:43:25.000Z
# Copyright 2021 Northern.tech AS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools import re VERSIONFILE = "src/mender/_version.py" version_string_line = open(VERSIONFILE, "rt").read() VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]" match = re.search(VSRE, version_string_line, re.M) if match: version_string = match.group(1) else: raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,)) with open("README.md", "r", encoding="utf-8") as fh: long_description = fh.read() setuptools.setup( name="mender-python-client-mendersoftware", version=version_string, license="Apache 2.0", author="Mendersoftware", author_email="[email protected]", description="A Python implementation of the Mender client interface", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/mendersoftware/mender-python-client", classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", ], keywords=["mender", "OTA", "updater"], packages=setuptools.find_packages(where="src"), install_requires=["cryptography", "requests", "msgpack", "websockets"], entry_points={"console_scripts": ["mender-python-client=mender.mender:main"]}, package_dir={"": "src"}, python_requires=">=3.6", zip_safe=False, include_package_data=True, )
37.849057
82
0.698903
86a985b6e0366a5f31612b64e590684791f59ced
740
py
Python
Q295-v2.py
Linchin/python_leetcode_git
3d08ab04bbdbd2ce268f33c501fbb149662872c7
[ "MIT" ]
null
null
null
Q295-v2.py
Linchin/python_leetcode_git
3d08ab04bbdbd2ce268f33c501fbb149662872c7
[ "MIT" ]
null
null
null
Q295-v2.py
Linchin/python_leetcode_git
3d08ab04bbdbd2ce268f33c501fbb149662872c7
[ "MIT" ]
null
null
null
""" 295 find median from data stream hard """ from heapq import * sol = MedianFinder() sol.addNum(1) print(sol.findMedian()) sol.addNum(2) print(sol.findMedian())
18.5
53
0.558108
86a988c6aa7f35cfd3902d0931e8d87597572497
3,445
py
Python
raisimPy/examples/newtonsCradle.py
mstoelzle/raisimLib
81f33a1b82f296e9622f950bc292f61bee2d2c2f
[ "Apache-2.0" ]
null
null
null
raisimPy/examples/newtonsCradle.py
mstoelzle/raisimLib
81f33a1b82f296e9622f950bc292f61bee2d2c2f
[ "Apache-2.0" ]
null
null
null
raisimPy/examples/newtonsCradle.py
mstoelzle/raisimLib
81f33a1b82f296e9622f950bc292f61bee2d2c2f
[ "Apache-2.0" ]
null
null
null
import os import numpy as np import raisimpy as raisim import math import time raisim.World.setLicenseFile(os.path.dirname(os.path.abspath(__file__)) + "/../../rsc/activation.raisim") world = raisim.World() ground = world.addGround() world.setTimeStep(0.001) world.setMaterialPairProp("steel", "steel", 0.1, 1.0, 0.0) pin1 = world.addSphere(0.1, 0.8) pin1.setAppearance("1,0,0,0.3") pin1.setPosition(0.0, 0.0, 3.0) pin1.setBodyType(raisim.BodyType.STATIC) pin2 = world.addSphere(0.1, 0.8) pin2.setAppearance("0,1,0,0.3") pin2.setPosition(0.3, 0.0, 3.0) pin2.setBodyType(raisim.BodyType.STATIC) pin3 = world.addSphere(0.1, 0.8) pin3.setAppearance("0,0,1,0.3") pin3.setPosition(0.6, 0.0, 3.0) pin3.setBodyType(raisim.BodyType.STATIC) pin4 = world.addSphere(0.1, 0.8) pin4.setAppearance("1,0,0,0.3") pin4.setPosition(0.9, 0.0, 3.0) pin4.setBodyType(raisim.BodyType.STATIC) pin5 = world.addSphere(0.1, 0.8) pin5.setPosition(0.9, 0.0, 6.0) pin5.setBodyType(raisim.BodyType.STATIC) pin6 = world.addSphere(0.1, 0.8) pin6.setPosition(-3., 0.0, 7.0) pin6.setBodyType(raisim.BodyType.STATIC) pin7 = world.addSphere(0.1, 0.8) pin7.setPosition(-4., 0.0, 7.0) pin7.setBodyType(raisim.BodyType.STATIC) anymalB_urdf_file = os.path.dirname(os.path.abspath(__file__)) + "/../../rsc/anymal/urdf/anymal.urdf" anymalC_urdf_file = os.path.dirname(os.path.abspath(__file__)) + "/../../rsc/anymal_c/urdf/anymal.urdf" anymalC = world.addArticulatedSystem(anymalC_urdf_file) anymalB = world.addArticulatedSystem(anymalB_urdf_file) jointNominalConfig = np.array([-3, 0, 4.54, 1.0, 0.0, 0.0, 0.0, 0.03, 0.4, -0.8, -0.03, 0.4, -0.8, 0.03, -0.4, 0.8, -0.03, -0.4, 0.8]) jointVelocityTarget = np.zeros([anymalC.getDOF()]) jointPgain = np.ones(anymalC.getDOF()) * 100.0 jointDgain = np.ones(anymalC.getDOF()) * 1.0 anymalC.setGeneralizedCoordinate(jointNominalConfig) anymalC.setPdGains(jointPgain, jointDgain) anymalC.setPdTarget(jointNominalConfig, jointVelocityTarget) anymalC.setName("anymalC") jointNominalConfig[0] = -4 anymalB.setGeneralizedCoordinate(jointNominalConfig) anymalB.setPdGains(jointPgain, jointDgain) anymalB.setPdTarget(jointNominalConfig, jointVelocityTarget) anymalB.setName("anymalB") ball1 = world.addSphere(0.1498, 0.8, "steel") ball1.setPosition(0, 0.0, 1.0) ball2 = world.addSphere(0.1499, 0.8, "steel") ball2.setPosition(0.3, 0.0, 1.0) ball3 = world.addSphere(0.1499, 0.8, "steel") ball3.setPosition(0.6, 0.0, 1.0) ball4 = world.addSphere(0.1499, 0.8, "steel") ball4.setPosition(2.9, 0.0, 3.0) box = world.addBox(.1, .1, .1, 1) box.setPosition(0.9, 0.0, 4.2) world.addStiffWire(pin1, 0, np.zeros(3), ball1, 0, np.zeros(3), 2.0) world.addStiffWire(pin2, 0, np.zeros(3), ball2, 0, np.zeros(3), 2.0) world.addStiffWire(pin3, 0, np.zeros(3), ball3, 0, np.zeros(3), 2.0) world.addStiffWire(pin4, 0, np.zeros(3), ball4, 0, np.zeros(3), 2.0) wire5 = world.addCompliantWire(pin5, 0, np.zeros(3), box, 0, np.zeros(3), 2.0, 200) wire5.setStretchType(raisim.StretchType.BOTH) wire6 = world.addCompliantWire(pin6, 0, np.zeros(3), anymalC, 0, np.zeros(3), 2.0, 1000) wire6.setStretchType(raisim.StretchType.BOTH) wire7 = world.addCustomWire(pin7, 0, np.zeros(3), anymalB, 0, np.zeros(3), 2.0) wire7.setTension(310) server = raisim.RaisimServer(world) server.launchServer(8080) for i in range(500000): time.sleep(0.001) server.integrateWorldThreadSafe() if i == 5000: world.removeObject(wire7) server.killServer()
32.196262
134
0.722787
86aa12779a6111083d5f447b8a7b523841c60e96
15,132
py
Python
nova/virt/hyperv/volumeops.py
viveknandavanam/nova
556377b6915936467436c9d5bb33bc0e22244e1e
[ "Apache-2.0" ]
1
2019-07-29T10:30:24.000Z
2019-07-29T10:30:24.000Z
nova/virt/hyperv/volumeops.py
viveknandavanam/nova
556377b6915936467436c9d5bb33bc0e22244e1e
[ "Apache-2.0" ]
11
2017-06-19T01:28:55.000Z
2017-06-23T02:01:47.000Z
nova/virt/hyperv/volumeops.py
viveknandavanam/nova
556377b6915936467436c9d5bb33bc0e22244e1e
[ "Apache-2.0" ]
3
2018-04-04T15:15:01.000Z
2018-04-19T18:14:25.000Z
# Copyright 2012 Pedro Navarro Perez # Copyright 2013 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Management class for Storage-related functions (attach, detach, etc). """ import time from os_brick.initiator import connector from os_win import utilsfactory from oslo_log import log as logging from oslo_utils import strutils import nova.conf from nova import exception from nova.i18n import _, _LE, _LI, _LW from nova import utils from nova.virt import driver from nova.virt.hyperv import constants LOG = logging.getLogger(__name__) CONF = nova.conf.CONF class FCVolumeDriver(BaseVolumeDriver): _is_block_dev = True _protocol = constants.STORAGE_PROTOCOL_FC
41.231608
79
0.630849
86aa70a303cf42efa31de488c8f84aac08996583
1,326
py
Python
-Loan-Approval-Analysis/code.py
lakshit-sharma/greyatom-python-for-data-science
55a6e5a4c54a4f7135cc09fb287d2f2fa1d36413
[ "MIT" ]
null
null
null
-Loan-Approval-Analysis/code.py
lakshit-sharma/greyatom-python-for-data-science
55a6e5a4c54a4f7135cc09fb287d2f2fa1d36413
[ "MIT" ]
null
null
null
-Loan-Approval-Analysis/code.py
lakshit-sharma/greyatom-python-for-data-science
55a6e5a4c54a4f7135cc09fb287d2f2fa1d36413
[ "MIT" ]
null
null
null
# -------------- # Importing header files import numpy as np import pandas as pd from scipy.stats import mode # code starts here bank = pd.read_csv(path) categorical_var = bank.select_dtypes(include = 'object') print(categorical_var) numerical_var = bank.select_dtypes(include = 'number') print(numerical_var) banks = bank.drop(columns=['Loan_ID']) bank_mode = banks.mode() banks = banks.fillna(bank_mode.iloc[0]) print(banks.isnull().sum()) avg_loan_amount = pd.pivot_table(banks, index=['Gender', 'Married', 'Self_Employed'], values='LoanAmount', aggfunc = 'mean') print(avg_loan_amount) loan_approved_se = banks[ (banks['Self_Employed'] == "Yes") & (banks['Loan_Status'] == "Y") ] loan_approved_nse = banks[ (banks['Self_Employed'] == "No") & (banks['Loan_Status'] == "Y") ] percentage_se = (len(loan_approved_se) / 614) * 100 percentage_nse = (len(loan_approved_nse) / 614) * 100 # loan amount term loan_term = banks['Loan_Amount_Term'].apply(lambda x: int(x)/12 ) big_loan_term=len(loan_term[loan_term>=25]) print(big_loan_term) columns_to_show = ['ApplicantIncome', 'Credit_History'] loan_groupby=banks.groupby(['Loan_Status'])[columns_to_show] # Check the mean value mean_values=loan_groupby.agg([np.mean]) print(mean_values) # code ends here
24.109091
125
0.69457
86aa77866191f8899234ee88d0a38f765c6e8d3e
7,673
py
Python
others/train_RNN.py
jacobswan1/Video2Commonsense
4dcef76360a29702fd90b7030a39a123da6db19e
[ "MIT" ]
31
2021-01-07T00:42:05.000Z
2022-01-18T16:44:09.000Z
others/train_RNN.py
jacobswan1/Video2Commonsense
4dcef76360a29702fd90b7030a39a123da6db19e
[ "MIT" ]
7
2021-01-07T00:41:28.000Z
2021-12-01T09:29:49.000Z
others/train_RNN.py
jacobswan1/Video2Commonsense
4dcef76360a29702fd90b7030a39a123da6db19e
[ "MIT" ]
4
2021-02-04T04:55:20.000Z
2021-07-25T06:50:44.000Z
''' Training Scropt for V2C captioning task. ''' __author__ = 'Jacob Zhiyuan Fang' import os import numpy as np from opts import * from utils.utils import * import torch.optim as optim from model.Model import Model from torch.utils.data import DataLoader from utils.dataloader import VideoDataset from model.transformer.Optim import ScheduledOptim if __name__ == '__main__': opt = parse_opt() opt = vars(opt) main(opt)
41.475676
121
0.571224
86ab2a7a0d57050e80f3f20e1f2f61131ca45a9a
487
py
Python
new-influx-client.py
benlamonica/energy-monitor
86714a365c91cc05c265de81bce191ff4ab585f8
[ "MIT" ]
null
null
null
new-influx-client.py
benlamonica/energy-monitor
86714a365c91cc05c265de81bce191ff4ab585f8
[ "MIT" ]
null
null
null
new-influx-client.py
benlamonica/energy-monitor
86714a365c91cc05c265de81bce191ff4ab585f8
[ "MIT" ]
null
null
null
import influxdb_client from influxdb_client import InfluxDBClient bucket = "python-client-sandbox" org = "Energy Monitor" token = "miQdAvNXHiNDVVzPzV5FpkCaR_8qdQ-L1FlPCOXQPI325Kbrh1fgfhkcDUZ4FepaebDdpZ-A1gmtnnjU0_hViA==" url = "http://localhost:9999" client = InfluxDBClient(url=url, token=token, org=org) writeApi = client.write_api() write_api.write("my-bucket", "my-org", [{"measurement": "h2o_feet", "tags": {"location": "coyote_creek"}, "fields": {"water_level": 1}, "time": 1}])
40.583333
148
0.755647
86ab8849571d80e31e545baaa8fc3a7e45faa001
6,176
py
Python
tests/test_agent/test_manhole.py
guidow/pyfarm-agent
bb5d464f9f6549a3db3529a93e3d9f388b365586
[ "Apache-2.0" ]
null
null
null
tests/test_agent/test_manhole.py
guidow/pyfarm-agent
bb5d464f9f6549a3db3529a93e3d9f388b365586
[ "Apache-2.0" ]
null
null
null
tests/test_agent/test_manhole.py
guidow/pyfarm-agent
bb5d464f9f6549a3db3529a93e3d9f388b365586
[ "Apache-2.0" ]
null
null
null
# No shebang line, this module is meant to be imported # # Copyright 2014 Oliver Palmer # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from collections import namedtuple from pprint import pprint from random import randint from StringIO import StringIO from textwrap import dedent try: from unittest.mock import patch except ImportError: # pragma: no cover from mock import patch from twisted.internet.protocol import ServerFactory from twisted.cred.portal import Portal from twisted.conch.telnet import ( ITelnetProtocol, TelnetBootstrapProtocol, TelnetTransport) from pyfarm.agent.testutil import TestCase from pyfarm.agent.manhole import ( LoggingManhole, TransportProtocolFactory, TelnetRealm, manhole_factory, show) Peer = namedtuple("Peer", ("host", "port"))
32.505263
79
0.615771
86abdce88613d6ee71e638ae7487297146c3e7a8
338
py
Python
func-button/klSigmode.py
xcgoo/uiKLine
80683401d7dc66262ae645db4c2780d6e71be551
[ "MIT" ]
232
2017-10-11T09:19:03.000Z
2022-03-09T01:34:49.000Z
func-button/klSigmode.py
DON-2020-LEE/uiKLine-2
fd1d0dca5fd6b1542af4b10c110e39361b29d378
[ "MIT" ]
8
2017-12-09T09:10:15.000Z
2021-04-22T03:35:26.000Z
func-button/klSigmode.py
DON-2020-LEE/uiKLine-2
fd1d0dca5fd6b1542af4b10c110e39361b29d378
[ "MIT" ]
132
2017-10-11T09:16:29.000Z
2022-02-09T10:37:57.000Z
# coding: utf-8 """ """ #---------------------------------------------------------------------- def klSigmode(self): """""" if self.mode == 'deal': self.canvas.updateSig(self.signalsOpen) self.mode = 'dealOpen' else: self.canvas.updateSig(self.signals) self.mode = 'deal'
21.125
71
0.446746
86acd0c8a74d48d7a1cf116cc0a40300ec411cd2
16,459
py
Python
utils/thin.py
BnF-jadis/projet
212b1e7b179a564650fb959d9c2565648178f6b6
[ "CC-BY-3.0" ]
5
2021-06-17T12:48:45.000Z
2022-01-22T22:23:44.000Z
utils/thin.py
BnF-jadis/projet
212b1e7b179a564650fb959d9c2565648178f6b6
[ "CC-BY-3.0" ]
7
2020-11-13T18:42:14.000Z
2022-02-10T01:31:07.000Z
utils/thin.py
BnF-jadis/projet
212b1e7b179a564650fb959d9c2565648178f6b6
[ "CC-BY-3.0" ]
1
2021-10-17T10:49:45.000Z
2021-10-17T10:49:45.000Z
# 2020, BackThen Maps # Coded by Remi Petitpierre https://github.com/RPetitpierre # For Bibliothque nationale de France (BnF) import cv2, thinning, os import numpy as np import pandas as pd import shapefile as shp from skimage.measure import approximate_polygon from PIL import Image, ImageDraw from utils.utils import * from utils.match import toLatLon Image.MAX_IMAGE_PIXELS = 500000000 def skeletonize(road_network: np.ndarray, path: str = "workshop/vectorized.png", largest_component: bool = False): ''' Thinning/skeletonization of the road network image to a wired model. Input(s): road_network: black and white image of the road network (streets in white) path: path where the skeletonized image should be saved largest_component: if True, only the largest road network component will be kept Output(s): vectorized: skeletonized image ''' assert len(road_network.shape) == 2, 'ERROR: road_network must be grayscale image' img = cv2.resize(road_network, (road_network.shape[1]//2, road_network.shape[0]//2)) vectorized = thinning.guo_hall_thinning(img) vectorized[vectorized > 100] = 255 vectorized[vectorized <= 100] = 0 if largest_component: try: _, labels, stats, _ = cv2.connectedComponentsWithStats(vectorized.copy(), connectivity=8, stats=cv2.CC_STAT_AREA) stats = stats[1:] main_component = (np.argmax(stats[:,4])+1).astype('int32') vectorized = (labels == main_component).astype('uint8')*255 except: 'Warning: Skeletonization failed to apply largest_component = True param. Skipping.' cv2.imwrite(path, vectorized) return vectorized def findNodes(image: np.ndarray): ''' Find the nodes in the road network skeleton image. Input(s): image: skeletonized image Output(s): nodes: array of nodes coordinates (x, y) degree: degrees of the nodes (2=endpoint, 4=crossroads of 3 streets, 5=crossroads of 4 streets, etc.) addresses: directions of the crossing roads, with regard to the node ''' img = image.copy() # Find row and column locations that are non-zero (rows, cols) = np.nonzero(img) nodes, degree, addresses = [], [], [] for (r,c) in zip(rows, cols): if r > 0 and c > 0 and r < image.shape[0]-1 and c < image.shape[1]-1: # Extract an 8-connected neighbourhood (col_neigh, row_neigh) = np.meshgrid(np.array([c-1, c, c+1]), np.array([r-1, r, r+1])) # Cast to int to index into image col_neigh = col_neigh.astype('int') row_neigh = row_neigh.astype('int') # Convert into a single 1D array and check for non-zero locations pix_neighbourhood = img[row_neigh, col_neigh].ravel() != 0 # If the number of non-zero locations equals 2, add this to our list of coordinates n_neighbours = np.sum(pix_neighbourhood) if (n_neighbours == 2) or (n_neighbours >= 4): nodes.append((r, c)) degree.append(n_neighbours) direction_set = np.where(pix_neighbourhood == True)[0] direction_set = direction_set[direction_set != 4] addresses.append(direction_set) nodes = np.asarray(nodes) return nodes, degree, addresses def explorePath(start_x: int, start_y: int, start_dir: int, image: np.ndarray, nodes_grid: np.ndarray): ''' Follow the path from one given start node and direction until the next node, and stores the pixels on the way. Input(s): start_x: start node x-coordinate start_y: start node y-coordinate start_dir: starting direction ({0, 1, 2, 3, -, 5, 6, 7, 8}) image: skeletonized image of the road network nodes_grid: grid of the nodes of the skeletonized image Output(s): way: list of pixel coordinates on the way direction: last direction to reach the 2nd node nodes_grid[x, y]: degree of the arrival node ''' direction = start_dir x, y = start_x, start_y assert image[x, y] != 0, 'ERROR: start point is not white' end = False way = [(x, y)] # First iteration new_x, new_y = absoluteWay(x, y, direction) assert image[new_x, new_y] != 0, 'ERROR: 2nd point is not white' way.append((new_x, new_y)) x, y = new_x, new_y wrong_paths = noTurnBack(direction) wrong_paths_active = True if nodes_grid[x, y]: end = True direction = 8-start_dir while not(end): if x > 0 and y > 0 and x < image.shape[0]-1 and y < image.shape[1]-1: # Extract an 8-connected neighbourhood (row_neigh, col_neigh) = np.meshgrid(np.array([x-1, x, x+1]), np.array([y-1, y, y+1])) # Cast to int to index into image col_neigh, row_neigh = col_neigh.astype('int'), row_neigh.astype('int') # Convert into a single 1D array and check for non-zero locations try: pix_neighbourhood = image[row_neigh, col_neigh].transpose().ravel() != 0 except: print(x, y, image.shape, ) raise AssertionError() # If the number of non-zero locations equals 2, add this to our list of coordinates n_neighbours = np.sum(pix_neighbourhood) direction_set = np.where(pix_neighbourhood == True)[0] last_ds = [wrong_paths] last_ds.append(direction_set) direction_set = direction_set[direction_set != 4] last_ds.append(direction_set) direction_set = direction_set[direction_set != (8-direction)] last_ds.append(direction_set) direction_set = np.asarray(avoidDiagonalEdges(direction_set, direction)) last_ds.append(direction_set) if wrong_paths_active: for wrong_path in wrong_paths: direction_set = direction_set[direction_set != wrong_path] wrong_paths_active = False if len(direction_set) != 1: end = True break direction = direction_set[0] new_x, new_y = absoluteWay(x, y, direction) way.append((new_x, new_y)) x, y = new_x, new_y if nodes_grid[x, y]: end = True else: end = True return way, direction, nodes_grid[x, y] def findSegments(df_nodes: pd.DataFrame, image: np.ndarray, min_length: int = 30, return_simple_ways: bool = True): ''' Find all the road segments in the network. Keep the ones that are longer than a given length or non-terminal. Optionally, compute the Douglas-Peucker simple itinerary of each segment and return it. Input(s): df_nodes: list of nodes image: skeletonized image of the road network min_length: min segment length if the segment is terminal return_simple_ways: if True, compute the Douglas-Peucker simple itinerary of each segment and return it Output(s): (Optional)(simple_ways: the Douglas-Peucker simple itinerary of each segmenty) ways: list of segments, containing all the pixels on the way between each couple of nodes nodes_grid: image containing all the nodes found in the image and their degree ''' img = image.copy() done, ways = [], [] df_nodes = df_nodes.sort_values(by='degree').reset_index(drop=True) nodes_grid = np.zeros(image.shape) for ind, row in df_nodes[['x', 'y', 'degree']].iterrows(): nodes_grid[row['x'], row['y']] = row['degree'] nodes_grid = nodes_grid.astype('int') for ind, node in df_nodes.iterrows(): for direct in node['address']: code = str(node['x']) + '_' + str(node['y']) + '_' + str(direct) if not(code in done): way, last_direct, degree = explorePath(start_x=node['x'], start_y=node['y'], start_dir=direct, image=img, nodes_grid=nodes_grid) if not((len(way) <= min_length) and ((node['degree'] == 2) or (degree == 2))): done.append(str(way[-1][0]) + '_' + str(way[-1][1]) + '_' + str(8-last_direct)) ways.append(way) if return_simple_ways: simple_ways = [] for way in ways: inv_way = np.asarray([np.asarray(way)[:,1], image.shape[0]-np.asarray(way)[:,0]]).transpose() simple_ways.append(approximate_polygon(np.asarray(inv_way), tolerance=1.6).tolist()) return simple_ways, ways, nodes_grid else: return ways, nodes_grid def toPNG(segments: list, vectorized: np.ndarray, out_path: str): ''' Save a given set of segments as a bitmap image from the road network. Input(s): segments: list of segments, containing all the pixels on the way between each couple of nodes vectorized: skeletonized image of the road network out_path: the path, where the output bitmap image should be save ''' canvas = (np.ones(vectorized.shape)*255).astype('uint8') cv2.imwrite('workshop/canvas.png', canvas); bitmap = Image.open('workshop/canvas.png') draw = ImageDraw.Draw(bitmap) for segment in segments: coords = [] for point in segment: coords.append((point[1], point[0])) draw.line(coords, fill = 'black', width=0) bitmap.save(out_path)
37.663616
125
0.584118
86acd82b514b30458fa54cefc7db6d72f32e8646
875
py
Python
easy2fa/tests/test_checkinput.py
lutostag/otp
0792548fa51c489cdc5fcb01a3c6dad1cd453154
[ "MIT" ]
3
2018-01-22T13:45:12.000Z
2022-01-27T04:17:52.000Z
easy2fa/tests/test_checkinput.py
lutostag/otp
0792548fa51c489cdc5fcb01a3c6dad1cd453154
[ "MIT" ]
1
2017-01-24T23:57:51.000Z
2017-12-11T14:33:32.000Z
easy2fa/tests/test_checkinput.py
lutostag/otp
0792548fa51c489cdc5fcb01a3c6dad1cd453154
[ "MIT" ]
null
null
null
from unittest import TestCase from unittest.mock import patch from easy2fa import cli
33.653846
74
0.634286
86ad342de7b5dfdb142a5dff63b155f6c655c5c6
2,845
py
Python
bert_finetuning/data_loader.py
nps1ngh/adversarial-bert-german-attacks-defense
3cca292ec4c3c07945f4198ae81e1f671462ed90
[ "Apache-2.0" ]
null
null
null
bert_finetuning/data_loader.py
nps1ngh/adversarial-bert-german-attacks-defense
3cca292ec4c3c07945f4198ae81e1f671462ed90
[ "Apache-2.0" ]
null
null
null
bert_finetuning/data_loader.py
nps1ngh/adversarial-bert-german-attacks-defense
3cca292ec4c3c07945f4198ae81e1f671462ed90
[ "Apache-2.0" ]
null
null
null
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler from bert_finetuning.data import GermanData """ ** FOR DEBUGGING ** if __name__ == "__main__": ## define data paths germeval_data_paths = { "train": "./datasets/hasoc_dataset/hasoc_german_train.csv", "dev": "./datasets/hasoc_dataset/hasoc_german_validation.csv", "test": "./datasets/hasoc_dataset/hasoc_german_test.csv", } hasoc_german_data_paths = { "train": "./datasets/hasoc_dataset/hasoc_german_train.csv", "dev": "./datasets/hasoc_dataset/hasoc_german_validation.csv", "test": "./datasets/hasoc_dataset/hasoc_german_test.csv", } ## create dataloaders print("creating germeval dataloaders...") germ_eval_dataloader = GermanDataLoader(germeval_data_paths) print("creating hasoc dataloaders...") hasoc_german_dataloader = GermanDataLoader(hasoc_german_data_paths) """
31.966292
89
0.634798
86ae167dd0746f0077e0b0c327435fcca99f837b
1,973
py
Python
data/dirty_mnist.py
Karthik-Ragunath/DDU
b9daae9304bdeb222857884ef8cb3b6b3d004d33
[ "MIT" ]
43
2021-05-20T14:07:53.000Z
2022-03-23T12:58:26.000Z
data/dirty_mnist.py
Karthik-Ragunath/DDU
b9daae9304bdeb222857884ef8cb3b6b3d004d33
[ "MIT" ]
3
2021-09-19T20:49:21.000Z
2022-03-07T10:25:47.000Z
data/dirty_mnist.py
Karthik-Ragunath/DDU
b9daae9304bdeb222857884ef8cb3b6b3d004d33
[ "MIT" ]
8
2021-06-26T15:28:45.000Z
2022-02-19T02:07:05.000Z
import torch import numpy as np import torch.utils.data as data from torch.utils.data import Subset from data.fast_mnist import create_MNIST_dataset from data.ambiguous_mnist.ambiguous_mnist_dataset import AmbiguousMNIST
34.017241
113
0.737456
86ae868b0b9598e5f2e99607cce26d99b3a34dc3
4,147
py
Python
vantage6/server/resource/recover.py
jaspersnel/vantage6-server
88ad40d23cc36eaba57c170929f7ccdd0011720a
[ "Apache-2.0" ]
2
2020-10-19T08:59:08.000Z
2022-03-07T10:30:21.000Z
vantage6/server/resource/recover.py
jaspersnel/vantage6-server
88ad40d23cc36eaba57c170929f7ccdd0011720a
[ "Apache-2.0" ]
67
2020-04-15T09:43:31.000Z
2022-03-18T08:29:17.000Z
vantage6/server/resource/recover.py
jaspersnel/vantage6-server
88ad40d23cc36eaba57c170929f7ccdd0011720a
[ "Apache-2.0" ]
2
2021-01-21T15:09:26.000Z
2021-04-19T14:58:10.000Z
# -*- coding: utf-8 -*- import logging import datetime from flask import request, render_template from flask_jwt_extended import ( create_access_token, decode_token ) from jwt.exceptions import DecodeError from flasgger import swag_from from http import HTTPStatus from pathlib import Path from sqlalchemy.orm.exc import NoResultFound from vantage6.common import logger_name from vantage6.server import db from vantage6.server.resource import ( ServicesResources ) module_name = logger_name(__name__) log = logging.getLogger(module_name) # ------------------------------------------------------------------------------ # Resources / API's # ------------------------------------------------------------------------------
30.718519
80
0.590306
86b032b82ee76fccb3eab7e57dd8b06b6868e592
2,633
py
Python
examples/basic_examples/aws_sns_sqs_middleware_service.py
tranvietanh1991/tomodachi
a815fc718b6cc42dc3fe241abb0e5a5829eba0e8
[ "MIT" ]
1
2021-11-01T02:18:55.000Z
2021-11-01T02:18:55.000Z
examples/basic_examples/aws_sns_sqs_middleware_service.py
tranvietanh1991/tomodachi
a815fc718b6cc42dc3fe241abb0e5a5829eba0e8
[ "MIT" ]
1
2020-12-28T16:16:53.000Z
2020-12-28T16:16:53.000Z
examples/basic_examples/aws_sns_sqs_middleware_service.py
tranvietanh1991/tomodachi
a815fc718b6cc42dc3fe241abb0e5a5829eba0e8
[ "MIT" ]
null
null
null
import os from typing import Any, Callable, Dict import tomodachi from tomodachi import aws_sns_sqs, aws_sns_sqs_publish from tomodachi.discovery import AWSSNSRegistration from tomodachi.envelope import JsonBase
39.298507
122
0.692366
86b0a422c8bc9f85b86cb962da85b578f24f06e1
425
py
Python
ex9.py
ThitsarAung/python-exercises
bca97875e25f9621fc5f58ab1d360426a21efc7f
[ "MIT" ]
null
null
null
ex9.py
ThitsarAung/python-exercises
bca97875e25f9621fc5f58ab1d360426a21efc7f
[ "MIT" ]
null
null
null
ex9.py
ThitsarAung/python-exercises
bca97875e25f9621fc5f58ab1d360426a21efc7f
[ "MIT" ]
null
null
null
types_of_people = 10 x = f"There are {types_of_people} types of people." binary = "binary" do_not = "don't" y = f"Those who know {binary} and those who {do_not}." print(x) print(y) print(f"I said: {x}") print(f"I also said: '{y}'") hilarious = False joke_evaluation = "Isn't that joke so funny?! {}" print(joke_evaluation.format(hilarious)) w="This is the left side of..." e="a string with a right side." print(w + e)
18.478261
54
0.672941
86b2f2b4446116811cbd5f27739dd93c92634c93
7,182
py
Python
mmdnn/conversion/caffe/writer.py
2yz/MMdnn
13d909e4b591a5043b74b611e412c3c0a5eba0cc
[ "MIT" ]
3,442
2017-11-20T08:39:51.000Z
2019-05-06T10:51:19.000Z
mmdnn/conversion/caffe/writer.py
2yz/MMdnn
13d909e4b591a5043b74b611e412c3c0a5eba0cc
[ "MIT" ]
430
2017-11-29T04:21:48.000Z
2019-05-06T05:37:37.000Z
mmdnn/conversion/caffe/writer.py
2yz/MMdnn
13d909e4b591a5043b74b611e412c3c0a5eba0cc
[ "MIT" ]
683
2017-11-20T08:50:34.000Z
2019-05-04T04:25:14.000Z
import base64 from google.protobuf import json_format from importlib import import_module import json import numpy as np import os import sys from mmdnn.conversion.caffe.errors import ConversionError from mmdnn.conversion.caffe.common_graph import fetch_attr_value from mmdnn.conversion.caffe.utils import get_lower_case, get_upper_case, get_real_name
35.731343
92
0.589112
86b35d8336f90b1f441624f230053b48e0260a33
1,258
py
Python
week1/85-maximal-rectangle.py
LionTao/algo_weekend
d25756761d47491b8c78ecf8a857080497910c76
[ "Unlicense" ]
null
null
null
week1/85-maximal-rectangle.py
LionTao/algo_weekend
d25756761d47491b8c78ecf8a857080497910c76
[ "Unlicense" ]
null
null
null
week1/85-maximal-rectangle.py
LionTao/algo_weekend
d25756761d47491b8c78ecf8a857080497910c76
[ "Unlicense" ]
null
null
null
""" leetcode-85 0 1 , rows x cols , 1 , """ from typing import List
32.25641
81
0.509539
86b35f885b38c215bfc2684f695ba3ae9b742e9a
9,347
py
Python
pandapower/test/opf/test_costs_pwl.py
mathildebadoual/pandapower
9ba4bcb78e84b644d2ba6df0c08e285c54af8ddc
[ "BSD-3-Clause" ]
1
2020-10-19T06:39:15.000Z
2020-10-19T06:39:15.000Z
pandapower/test/opf/test_costs_pwl.py
miek770/pandapower
de004efc1b7432a633792af4f551f7635a02db47
[ "BSD-3-Clause" ]
null
null
null
pandapower/test/opf/test_costs_pwl.py
miek770/pandapower
de004efc1b7432a633792af4f551f7635a02db47
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics # and Energy System Technology (IEE), Kassel. All rights reserved. import numpy as np import pytest from pandapower.optimal_powerflow import OPFNotConverged import pandapower as pp try: import pplog as logging except ImportError: import logging logger = logging.getLogger(__name__) logger.setLevel("DEBUG") def test_cost_piecewise_linear_gen(): """ Testing a very simple network for the resulting cost value constraints with OPF """ # boundaries: vm_max = 1.05 vm_min = 0.95 # create net net = pp.create_empty_network() pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.) pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4) pp.create_gen(net, 1, p_kw=-100, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50, min_q_kvar=-50) pp.create_ext_grid(net, 0) pp.create_load(net, 1, p_kw=20, controllable=False) pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876, max_loading_percent=100 * 690) pp.create_piecewise_linear_cost(net, 0, "gen", np.array([[-150, -100], [-75, -50], [0, 0]])) # run OPF pp.runopp(net, verbose=False) assert net["OPF_converged"] assert net.res_cost - net.res_gen.p_kw.values / 1.5 < 1e-3 def test_cost_piecewise_linear_eg(): """ Testing a very simple network for the resulting cost value constraints with OPF """ # boundaries: vm_max = 1.05 vm_min = 0.95 # create net net = pp.create_empty_network() pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.) pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10) pp.create_ext_grid(net, 0, max_p_kw=0, min_p_kw=-50) pp.create_gen(net, 1, p_kw=-10, max_p_kw=0, min_p_kw=-50, controllable=True) # pp.create_ext_grid(net, 0) pp.create_load(net, 1, p_kw=20, controllable=False) pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876, max_loading_percent=100 * 690) pp.create_piecewise_linear_cost(net, 0, "ext_grid", np.array([[-50, -500], [0, 0]])) # run OPF pp.runopp(net, verbose=False) assert net["OPF_converged"] assert net.res_cost - - net.res_ext_grid.p_kw.values * 10 < 1e-3 # check and assert result def test_get_costs(): """ Testing a very simple network for the resulting cost value constraints with OPF """ # boundaries: vm_max = 1.05 vm_min = 0.95 # create net net = pp.create_empty_network() pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.) pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4) pp.create_gen(net, 1, p_kw=-100, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50, min_q_kvar=-50) pp.create_ext_grid(net, 0) pp.create_load(net, 1, p_kw=20, controllable=False) pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876, max_loading_percent=100 * 690) pp.create_piecewise_linear_cost(net, 0, "gen", np.array([[-150, -300], [0, 0]])) # run OPF pp.runopp(net, verbose=False) assert net["OPF_converged"] assert net.res_cost == 2 * net.res_gen.p_kw.values # check and assert result def test_cost_piecewise_linear_sgen(): """ Testing a very simple network for the resulting cost value constraints with OPF """ # boundaries: vm_max = 1.05 vm_min = 0.95 # create net net = pp.create_empty_network() pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.) pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4) pp.create_sgen(net, 1, p_kw=-100, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50, min_q_kvar=-50) pp.create_ext_grid(net, 0) pp.create_load(net, 1, p_kw=20, controllable=False) pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876, max_loading_percent=100 * 690) pp.create_piecewise_linear_cost(net, 0, "sgen", np.array([[-150, -100], [-75, -50], [0, 0]])) # run OPF pp.runopp(net, verbose=False) assert net["OPF_converged"] assert net.res_cost - net.res_sgen.p_kw.values / 1.5 < 1e-3 def test_cost_piecewise_linear_load(): """ Testing a very simple network for the resulting cost value constraints with OPF """ # boundaries: vm_max = 1.05 vm_min = 0.95 # create net net = pp.create_empty_network() pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.) pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4) pp.create_load(net, 1, p_kw=100, controllable=True, max_p_kw=150, min_p_kw=50, max_q_kvar=0, min_q_kvar=0) pp.create_ext_grid(net, 0) pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876, max_loading_percent=100 * 690) pp.create_piecewise_linear_cost(net, 0, "load", np.array([[0, 0], [75, 50], [150, 100]])) # run OPF pp.runopp(net, verbose=False) assert net["OPF_converged"] assert abs(net.res_cost - net.res_load.p_kw.values / 1.5) < 1e-3 def test_cost_piecewise_linear_sgen_uneven_slopes(): """ Testing a very simple network for the resulting cost value constraints with OPF """ # boundaries: vm_max = 1.05 vm_min = 0.95 # create net net = pp.create_empty_network() pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.) pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4) pp.create_sgen(net, 1, p_kw=-100, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50, min_q_kvar=-50) pp.create_ext_grid(net, 0) pp.create_load(net, 1, p_kw=20, controllable=False) pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876, max_loading_percent=100 * 690) pp.create_piecewise_linear_cost(net, 0, "sgen", np.array([[-150, -200], [-75, -50], [0, 0]])) # run OPF pp.runopp(net, verbose=False) assert net["OPF_converged"] assert net.res_cost - net.res_sgen.p_kw.values / 1.5 < 1e-3 def test_cost_piecewise_linear_load_uneven_slopes(): """ Testing a very simple network for the resulting cost value constraints with OPF """ # boundaries: vm_max = 1.05 vm_min = 0.95 # create net net = pp.create_empty_network() pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.) pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4) pp.create_load(net, 1, p_kw=100, controllable=True, max_p_kw=150, min_p_kw=50, max_q_kvar=0, min_q_kvar=0) pp.create_ext_grid(net, 0) pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876, max_loading_percent=100 * 690) pp.create_piecewise_linear_cost(net, 0, "load", np.array([[0, 0], [75, 51], [150, 101]])) # run OPF with pytest.raises(OPFNotConverged): pp.runopp(net, verbose=False) assert net["OPF_converged"] assert abs(net.res_cost - net.res_load.p_kw.values / 1.5) < 1e-3 def test_cost_piecewise_linear_sgen_very_unsteady_slopes(): """ Testing a very simple network for the resulting cost value constraints with OPF """ # boundaries: vm_max = 1.5 vm_min = 0.5 # create net net = pp.create_empty_network() pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.) pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4) pp.create_sgen(net, 1, p_kw=-1000, controllable=True, max_p_kw=0, min_p_kw=-1500, max_q_kvar=50, min_q_kvar=-50) pp.create_ext_grid(net, 0) pp.create_load(net, 1, p_kw=20, controllable=False) pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876, max_loading_percent=100 * 690) pp.create_piecewise_linear_cost(net, 0, "sgen", np.array([[-1500, 2],[-750,1 ], [0,2]])) # run OPF pp.runopp(net, verbose=False) assert net["OPF_converged"] # assert net.res_cost - net.res_sgen.p_kw.values / 1.5 < 1e-3 if __name__ == "__main__": # test_cost_piecewise_linear_sgen_very_unsteady_slopes() pytest.main(["test_costs_pwl.py", "-s"])
37.09127
99
0.644378
86b3d8112beb6b385c29392912e1d48581db14c2
680
py
Python
cookie_refresh.py
guoxianru/cookie_pool_lite
02c4b2009b4c8aa3306ae1f5f7c5decde1eb5f3f
[ "Apache-2.0" ]
null
null
null
cookie_refresh.py
guoxianru/cookie_pool_lite
02c4b2009b4c8aa3306ae1f5f7c5decde1eb5f3f
[ "Apache-2.0" ]
null
null
null
cookie_refresh.py
guoxianru/cookie_pool_lite
02c4b2009b4c8aa3306ae1f5f7c5decde1eb5f3f
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # @Author: GXR # @CreateTime: 2022-01-20 # @UpdateTime: 2022-01-20 import redis import config import cookie_login from cookie_api import app red = redis.Redis( host=config.REDIS_HOST, port=config.REDIS_PORT, db=config.REDIS_DB, decode_responses=True, ) # cookie if __name__ == "__main__": run_cookie_refresh()
18.888889
59
0.679412
86b46d24f10eba79c88afa632d31ceb83f18b3b1
292
py
Python
feemodel/app/__init__.py
bitcoinfees/feemodel
5d582d87eca6e54eb20b81f4e21c81273a59b468
[ "MIT" ]
12
2015-08-12T03:00:59.000Z
2017-06-08T11:01:09.000Z
feemodel/app/__init__.py
bitcoinfees/feemodel
5d582d87eca6e54eb20b81f4e21c81273a59b468
[ "MIT" ]
6
2015-11-10T04:02:25.000Z
2016-03-16T02:57:14.000Z
feemodel/app/__init__.py
bitcoinfees/feemodel
5d582d87eca6e54eb20b81f4e21c81273a59b468
[ "MIT" ]
3
2016-03-10T17:08:41.000Z
2022-02-24T18:51:21.000Z
from feemodel.app.transient import TransientOnline from feemodel.app.pools import PoolsOnlineEstimator from feemodel.app.predict import Prediction from feemodel.app.simonline import SimOnline __all__ = [ 'TransientOnline', 'PoolsOnlineEstimator', 'Prediction', 'SimOnline' ]
24.333333
51
0.784247
86b4af0033c71e00f4e30f0ac3bfd045c1932aa8
760
py
Python
examples/server/models/image_file_upload.py
ParikhKadam/django-angular
1fdd2ab3211ed1655acc2d172d826ed7f3ad0574
[ "MIT" ]
941
2015-01-01T18:17:43.000Z
2022-02-26T07:45:40.000Z
examples/server/models/image_file_upload.py
ParikhKadam/django-angular
1fdd2ab3211ed1655acc2d172d826ed7f3ad0574
[ "MIT" ]
228
2015-01-11T16:36:34.000Z
2022-03-11T23:17:15.000Z
examples/server/models/image_file_upload.py
ParikhKadam/django-angular
1fdd2ab3211ed1655acc2d172d826ed7f3ad0574
[ "MIT" ]
294
2015-01-04T09:01:33.000Z
2022-02-26T07:45:41.000Z
# -*- coding: utf-8 -*- from __future__ import unicode_literals # start tutorial from django.db import models from djng.forms import NgModelFormMixin, NgFormValidationMixin from djng.styling.bootstrap3.forms import Bootstrap3ModelForm
28.148148
82
0.728947
86b6adb997cbd21ec9e8e9a5843dcd2235408ae3
2,997
py
Python
python/tvm/topi/hexagon/slice_ops/add_subtract_multiply.py
yangulei/tvm
d2cbdf381b68134951bfd7525c6a3a67838e5bdf
[ "Apache-2.0" ]
4,640
2017-08-17T19:22:15.000Z
2019-11-04T15:29:46.000Z
python/tvm/topi/hexagon/slice_ops/add_subtract_multiply.py
dmlc/tvm
1e0e9548a6875241267481a4223b4dbf29fa1641
[ "Apache-2.0" ]
2,863
2017-08-17T19:55:50.000Z
2019-11-04T17:18:41.000Z
python/tvm/topi/hexagon/slice_ops/add_subtract_multiply.py
yelite/tvm
7ae919292d42f5858d4db04533bca67b4b5bb44f
[ "Apache-2.0" ]
1,352
2017-08-17T19:30:38.000Z
2019-11-04T16:09:29.000Z
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name """Compute and schedule for add, multiply, subtract slice op Please note the following assumptions made by the implementation: 1) The inputs will be multiple of crouton layout except for the axis that needs broadcasting.""" from tvm import te from tvm import tir from tvm import topi from ..utils import get_layout_transform_fn def add_broadcast_compute(input_a, input_b): """Call the add op from topi""" return topi.add(input_a, input_b) def subtract_broadcast_compute(input_a, input_b): """Call the subtract op from topi""" return topi.subtract(input_a, input_b) def multiply_broadcast_compute(input_a, input_b): """Call the multiply op from topi""" return topi.multiply(input_a, input_b) def tir_broadcast_schedule( out_m, input_a, input_b, output_layout: str, input_a_layout: str, input_b_layout: str, op_name: str, ): """Schedule for input and output layout nhwc-8h2w32c2w-2d considering broadcast""" func = te.create_prim_func([input_a, input_b, out_m]) s = tir.Schedule(func) block_dict = {"add": "T_add", "subtract": "T_subtract", "multiply": "T_multiply"} block = s.get_block(block_dict[op_name]) if input_a_layout == "nhwc-8h2w32c2w-2d": input_a_transformed_layout = get_layout_transform_fn(input_a_layout) s.transform_layout(block, buffer=("read", 0), index_map=input_a_transformed_layout) if input_b_layout == "nhwc-8h2w32c2w-2d": input_b_transformed_layout = get_layout_transform_fn(input_b_layout) s.transform_layout(block, buffer=("read", 1), index_map=input_b_transformed_layout) output_transformed_layout = get_layout_transform_fn(output_layout) s.transform_layout(block, buffer=("write", 0), index_map=output_transformed_layout) n, h, w, c = s.get_loops(block) h_o, h_i = s.split(h, [None, 8]) w_o, w_i = s.split(w, [None, 4]) c_o, c_i = s.split(c, [None, 32]) wio, wii = s.split(w_i, [None, 2]) s.reorder(n, h_o, w_o, c_o, h_i, wio, c_i, wii) fused = s.fuse(c_i, wii) s.vectorize(fused) return s
34.056818
97
0.703704
86b7ef11958dc926cec50bcec5a016a3d479c413
6,634
py
Python
python_modules/automation/automation/docker/dagster_docker.py
jrouly/dagster
2b3104db2fc6439050f7825d4b9ebaf39ddf6c0c
[ "Apache-2.0" ]
null
null
null
python_modules/automation/automation/docker/dagster_docker.py
jrouly/dagster
2b3104db2fc6439050f7825d4b9ebaf39ddf6c0c
[ "Apache-2.0" ]
1
2021-06-21T18:30:02.000Z
2021-06-25T21:18:39.000Z
python_modules/automation/automation/docker/dagster_docker.py
jrouly/dagster
2b3104db2fc6439050f7825d4b9ebaf39ddf6c0c
[ "Apache-2.0" ]
null
null
null
import contextlib import os from collections import namedtuple import yaml from dagster import __version__ as current_dagster_version from dagster import check from .ecr import ecr_image, get_aws_account_id, get_aws_region from .utils import ( execute_docker_build, execute_docker_push, execute_docker_tag, python_version_image_tag, ) # Default repository prefix used for local images DEFAULT_LOCAL_PREFIX = "dagster" # Location of the template assets used here IMAGES_PATH = os.path.join(os.path.dirname(__file__), "images")
38.569767
100
0.655412
86b8aba13af33d7534f429cc7d5eda4e95f58299
13,716
py
Python
chrome/test/telemetry/chromeos/login_unittest.py
Fusion-Rom/android_external_chromium_org
d8b126911c6ea9753e9f526bee5654419e1d0ebd
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
231
2015-01-08T09:04:44.000Z
2021-12-30T03:03:10.000Z
chrome/test/telemetry/chromeos/login_unittest.py
Fusion-Rom/android_external_chromium_org
d8b126911c6ea9753e9f526bee5654419e1d0ebd
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
1
2018-02-10T21:00:08.000Z
2018-03-20T05:09:50.000Z
chrome/test/telemetry/chromeos/login_unittest.py
Fusion-Rom/android_external_chromium_org
d8b126911c6ea9753e9f526bee5654419e1d0ebd
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
268
2015-01-21T05:53:28.000Z
2022-03-25T22:09:01.000Z
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import json import logging import os import unittest from telemetry.core import browser_finder from telemetry.core import exceptions from telemetry.core import extension_to_load from telemetry.core import util from telemetry.core.backends.chrome import cros_interface from telemetry.unittest import options_for_unittests
42.203077
80
0.646544
86b8d88ae37a5647339fb11a5a98693e6a0c570d
790
py
Python
generator/database.py
Neotrinost/Neotrinost.ir
f501b8cf410c1e6ec6cc4e5fce935147b8be1e61
[ "MIT" ]
4
2021-05-02T17:35:30.000Z
2021-11-08T12:55:14.000Z
generator/database.py
Neotrinost/Flask_Neotrinost
f501b8cf410c1e6ec6cc4e5fce935147b8be1e61
[ "MIT" ]
4
2021-07-12T19:08:01.000Z
2021-08-13T19:37:50.000Z
generator/database.py
Neotrinost/Neotrinost.ir
f501b8cf410c1e6ec6cc4e5fce935147b8be1e61
[ "MIT" ]
2
2021-08-08T15:10:07.000Z
2021-11-15T08:59:22.000Z
import sqlite3
35.909091
106
0.596203
86babfbac8b5c2af0dd5e02e52be427fd0ffce35
3,688
py
Python
crits/backdoors/forms.py
frbapolkosnik/crits
1278c034f2238e2fe34e65e32ce241128a014df2
[ "MIT" ]
22
2015-01-14T19:49:32.000Z
2022-01-26T12:18:52.000Z
crits/backdoors/forms.py
frbapolkosnik/crits
1278c034f2238e2fe34e65e32ce241128a014df2
[ "MIT" ]
null
null
null
crits/backdoors/forms.py
frbapolkosnik/crits
1278c034f2238e2fe34e65e32ce241128a014df2
[ "MIT" ]
6
2015-01-22T21:25:52.000Z
2021-04-12T23:24:14.000Z
from django import forms from django.forms.utils import ErrorList from crits.campaigns.campaign import Campaign from crits.core.forms import add_bucketlist_to_form, add_ticket_to_form from crits.core.handlers import get_item_names, get_source_names from crits.core.user_tools import get_user_organization from crits.core import form_consts from crits.vocabulary.relationships import RelationshipTypes relationship_choices = [(c, c) for c in RelationshipTypes.values(sort=True)]
44.97561
117
0.629067
86bb18dffc0306993885a2bc13f98c2bb5b4a5b0
7,471
py
Python
src/aprl/agents/monte_carlo.py
fkamrani/adversarial-policies
53e129c2083f6557ddc18dbb39e4e633a2d7ab9b
[ "MIT" ]
211
2019-02-22T08:07:25.000Z
2022-03-14T10:44:20.000Z
src/aprl/agents/monte_carlo.py
fkamrani/adversarial-policies
53e129c2083f6557ddc18dbb39e4e633a2d7ab9b
[ "MIT" ]
51
2019-02-08T01:39:49.000Z
2022-02-15T21:21:46.000Z
src/aprl/agents/monte_carlo.py
fkamrani/adversarial-policies
53e129c2083f6557ddc18dbb39e4e633a2d7ab9b
[ "MIT" ]
41
2019-04-23T05:01:49.000Z
2022-03-16T06:51:19.000Z
"""Monte Carlo receding horizon control.""" from abc import ABC, abstractmethod from multiprocessing import Pipe, Process import gym from stable_baselines.common.vec_env import CloudpickleWrapper from aprl.common.mujoco import MujocoState, ResettableEnv def _worker(remote, parent_remote, dynamic_fn_wrapper, horizon, trajectories): parent_remote.close() dynamics = dynamic_fn_wrapper.var() dynamics.reset() mc = MonteCarloSingle(dynamics, horizon, trajectories) try: while True: cmd, x = remote.recv() if cmd == "seed": mc.seed(x) elif cmd == "search": best_u, best_r = mc.best_action(x) remote.send((best_u, best_r)) elif cmd == "close": remote.close() break else: raise NotImplementedError except KeyboardInterrupt: print("MonteCarloParallel worker: got KeyboardInterrupt") finally: dynamics.close()
37.355
99
0.63191
86bb2ac534bb948d97b846d6681e205945c4c9dd
2,063
py
Python
machineLearnInAction/bayes.py
xuwening/tensorflowDemo
65687a61e16f947b7ec8a85d12213f954a71542b
[ "MIT" ]
null
null
null
machineLearnInAction/bayes.py
xuwening/tensorflowDemo
65687a61e16f947b7ec8a85d12213f954a71542b
[ "MIT" ]
null
null
null
machineLearnInAction/bayes.py
xuwening/tensorflowDemo
65687a61e16f947b7ec8a85d12213f954a71542b
[ "MIT" ]
null
null
null
import numpy as np if __name__ == '__main__': postinList, classVec = loadDataSet() myVocabList = createVocabList(postinList) # print(setOfWords2Vec(myVocabList, postinList[0])) trainMat = [] for postinDoc in postinList: trainMat.append(setOfWords2Vec(myVocabList, postinDoc)) print(trainMat) p0V, p1V, pAb = trainNB0(trainMat, classVec) print(p0V, p1V, pAb)
31.738462
97
0.573921
86bbd227d8b7715b6a7438754f63aeb34b54d300
169
py
Python
py/debug/__init__.py
segrids/arduino_due
f375020b81459eae9b325aa3646ff84efc2853e8
[ "MIT" ]
3
2021-08-20T16:03:37.000Z
2022-03-23T20:23:30.000Z
py/debug/__init__.py
segrids/testbench
f375020b81459eae9b325aa3646ff84efc2853e8
[ "MIT" ]
null
null
null
py/debug/__init__.py
segrids/testbench
f375020b81459eae9b325aa3646ff84efc2853e8
[ "MIT" ]
null
null
null
from .swd import SWD from .ahb import AHB from .debugger import Debugger, HaltError, NotHaltedError try: from .dwarf import ELFDebugger except ImportError: pass
21.125
57
0.775148
86bc2f5f9e49100c67489c79936cc4b670708f66
72
py
Python
HAP-NodeJS/Switch3_1.py
cbdunc2/pi-kit
bf7e9e118af7853d509e0a10c95ba5d8564bb157
[ "MIT" ]
null
null
null
HAP-NodeJS/Switch3_1.py
cbdunc2/pi-kit
bf7e9e118af7853d509e0a10c95ba5d8564bb157
[ "MIT" ]
null
null
null
HAP-NodeJS/Switch3_1.py
cbdunc2/pi-kit
bf7e9e118af7853d509e0a10c95ba5d8564bb157
[ "MIT" ]
null
null
null
import subprocess subprocess.Popen(['sh', '../Switches/Switch3_On.sh'])
24
53
0.736111