max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
HW 1/P1.py | minotaur487/MIT-OCW-Problem-Sets | 0 | 12793151 | # Problem 1
def remaining_balance(min_monthly_payment_rate, balance, ann):
payment = round(min_monthly_payment_rate*balance, 2)
interest_paid = round(ann / 12 * balance, 2)
principal_paid = payment - interest_paid
return balance - principal_paid, payment, principal_paid
bal = float(input("Enter the outstanding balance on your credit card: "))
annual_interest_rate = float(input("Enter the annual credit card interest rate as a decimal: "))
monthly_payment_rate = float(input("Enter the minimum monthly payment rate as a decimal: "))
total_amount_paid = 0
for x in range(12):
remaining_bal, min_monthly, prin = remaining_balance(monthly_payment_rate, bal, annual_interest_rate)
remaining_bal, min_monthly, prin = round(remaining_bal, 2), round(min_monthly, 2), round(prin, 2)
print(f'Month: {x+1}')
print(f'Minimum monthly payment: ${min_monthly}')
print(f'Principle paid: ${prin}')
print(f'Remaining balance: ${remaining_bal}')
bal = remaining_bal
total_amount_paid += min_monthly
print("RESULT")
print(f'Total amount paid: ${round(total_amount_paid, 2)}')
print(f'Remaining balance: ${bal}') | 4.0625 | 4 |
pypykatz/kerberos/functiondefs/asn1structs.py | wisdark/pypykatz | 1,861 | 12793152 |
from asn1crypto import core
from minikerberos.protocol.asn1_structs import krb5int32, APOptions, Ticket, EncryptedData, AP_REQ
UNIVERSAL = 0
APPLICATION = 1
CONTEXT = 2
TAG = 'explicit'
class MechType(core.ObjectIdentifier):
_map = {
#'': 'SNMPv2-SMI::enterprises.311.2.2.30',
'1.3.6.1.4.1.311.2.2.10': 'NTLMSSP - Microsoft NTLM Security Support Provider',
'1.2.840.48018.1.2.2' : 'MS KRB5 - Microsoft Kerberos 5',
'1.2.840.113554.1.2.2' : 'KRB5 - Kerberos 5',
'1.2.840.113554.1.2.2.3': 'KRB5 - Kerberos 5 - User to User',
'1.3.6.1.4.1.311.2.2.30': 'NEGOEX - SPNEGO Extended Negotiation Security Mechanism',
}
class InitialContextToken(core.Sequence):
class_ = 1
tag = 0
_fields = [
('thisMech', MechType, {'optional': False}),
('unk_bool', core.Boolean, {'optional': False}),
('innerContextToken', core.Any, {'optional': False}),
]
_oid_pair = ('thisMech', 'innerContextToken')
_oid_specs = {
'KRB5 - Kerberos 5': AP_REQ,
} | 1.773438 | 2 |
KitchenMLTest.py | shyampurk/kitchenML | 0 | 12793153 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 19 12:14:06 2018
@author: Admin
"""
#from pandas import Series
#from statsmodels.graphics.tsaplots import plot_acf
#from statsmodels.graphics.tsaplots import plot_pacf
#from matplotlib import pyplot
#from pandas import DataFrame
#from pandas import read_csv
#from pandas import datetime
#
#def parser(x):
# return datetime.strptime(x, '%Y-%m-%d')
#
#series = read_csv('recom_train.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
##print(series.head())
#
#pyplot.figure(figsize=(30,10))
#pyplot.subplot(211)
#plot_acf(series, ax=pyplot.gca())
#pyplot.subplot(212)
#plot_pacf(series, ax=pyplot.gca())
#pyplot.show()
import warnings
#from pandas import Series
from statsmodels.tsa.arima_model import ARIMA
from sklearn.metrics import mean_squared_error
from math import sqrt
from pandas import datetime
from pandas import read_csv
# evaluate an ARIMA model for a given order (p,d,q) and return RMSE
def evaluate_arima_model(X, arima_order):
# prepare training dataset
X = X.astype('float32')
train_size = int(len(X) * 0.50)
train, test = X[0:train_size], X[train_size:]
history = [x for x in train]
# make predictions
predictions = list()
for t in range(len(test)):
model = ARIMA(history, order=arima_order)
model_fit = model.fit(disp=0)
yhat = model_fit.forecast()[0]
predictions.append(yhat)
history.append(test[t])
# calculate out of sample error
mse = mean_squared_error(test, predictions)
rmse = sqrt(mse)
return rmse
# evaluate combinations of p, d and q values for an ARIMA model
def evaluate_models(dataset, p_values, d_values, q_values):
dataset = dataset.astype('float32')
best_score, best_cfg = float("inf"), None
for p in p_values:
for d in d_values:
for q in q_values:
order = (p,d,q)
try:
mse = evaluate_arima_model(dataset, order)
if mse < best_score:
best_score, best_cfg = mse, order
print('ARIMA%s MSE=%.3f' % (order,mse))
except:
continue
print('Best ARIMA%s MSE=%.3f' % (best_cfg, best_score))
# load dataset
def parser(x):
return datetime.strptime(x, '%Y-%m-%d')
series = read_csv('data/recom_train.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
# evaluate parameters
p_values = range(0,13)
d_values = range(0, 4)
q_values = range(0, 13)
warnings.filterwarnings("ignore")
evaluate_models(series.values, p_values, d_values, q_values) | 2.765625 | 3 |
tf1x/tests/analysis/atas_test.py | dpaiton/DeepSparseCoding | 12 | 12793154 | import os
import sys
ROOT_DIR = os.path.dirname(os.path.dirname(os.getcwd()))
if ROOT_DIR not in sys.path: sys.path.append(ROOT_DIR)
import numpy as np
import tensorflow as tf
from DeepSparseCoding.tf1x.analysis.base_analyzer import Analyzer
"""
Test for activity triggered analysis
NOTE: Should be executed from the repository's root directory
"""
class ActivityTriggeredAverageTest(tf.test.TestCase):
def testBasic(self):
rand_state = np.random.RandomState(1234)
rand_mean = 2.0
rand_var = 10
num_images = 50
num_pixels = 12
num_neurons = 24
base_analyzer = Analyzer()
model_weights = rand_state.normal(loc=0.0, scale=1.0, size=(num_pixels, num_neurons))
images = rand_state.normal(loc=rand_mean, scale=rand_var, size=[num_images, num_pixels])
# Batch size is greater than num images (shouldn't use batches)
batch_size = 100
atas_1 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size)
# Batch size is less than num images, but divides evenly
batch_size = 10
atas_2 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size)
# Batch size is less than num_images, but does not divide evenly
batch_size = 13
atas_3 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size)
self.assertAllClose(atas_1, atas_2, rtol=1e-06, atol=1e-06)
self.assertAllClose(atas_1, atas_3, rtol=1e-06, atol=1e-06)
if __name__ == "__main__":
tf.test.main()
| 2.390625 | 2 |
trainer/train_forward.py | deanzadok/finemotions | 0 | 12793155 | <gh_stars>0
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
from tensorflow.python.framework.ops import prepend_name_scope
sys.path.append('.')
sys.path.append('..')
import os
import argparse
import numpy as np
import pandas as pd
import tensorflow as tf
from data.load import DataManagement
from trainer.losses import ForwardKinematicsError, ConfigurationDynamicsError, ForwardDynamicsError
from trainer.utils import ConfigManager, wait_for_gpu, initiate_model, set_random_seed
import time
import subprocess as sp
parser = argparse.ArgumentParser()
parser.add_argument('--json', '-json', help='name of json file', default='config/mfm/train_mfm_unet_aida_all_us2multikey_debug.json', type=str)
args = parser.parse_args()
# tf function to train
@tf.function
def train(timestamps, images, conf_y, dev_x, dev_y, cfg):
with tf.GradientTape() as tape:
# get predictions
if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey':
pred_confs, pred_devs = model(images)
elif cfg.mode == 'us2conf':
pred_confs = model(images)
else:
pred_devs = model(images)
# compute task losses
if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey':
q_loss = mse(conf_y, pred_confs)
dev_loss = bce(dev_y, pred_devs)
loss = 4*q_loss + dev_loss
elif cfg.mode == 'us2conf':
q_loss = mse(conf_y, pred_confs)
loss = q_loss
else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey'
dev_loss = bce(dev_y, pred_devs)
loss = dev_loss
# add FK loss
if cfg.data.use_fk:
fk_loss = fke(conf_y, pred_confs)
loss += cfg.data.fk * fk_loss
train_fk_loss(fk_loss)
# add CD loss
if cfg.data.use_cd or cfg.data.use_fd:
cde.set_time(timestamps)
cd_loss = cde(conf_y, pred_confs)
if cfg.data.use_cd:
loss += cfg.data.cd * cd_loss
train_cd_loss(cd_loss)
# add FD loss
if cfg.data.use_fd:
fde.set_time_and_grads(timestamps, cde.y_true_grad, cde.y_pred_grad)
fd_loss = fde(conf_y, pred_confs)
loss += cfg.data.fd * fd_loss
train_fd_loss(fd_loss)
# perform optimization step
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey':
train_c_loss(q_loss)
train_dev_loss(dev_loss)
elif cfg.mode == 'us2conf':
train_c_loss(q_loss)
else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey'
train_dev_loss(dev_loss)
# tf function to test
@tf.function
def test(timestamps, images, conf_y, dev_x, dev_y, cfg):
# get predictions
if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey':
pred_confs, pred_devs = model(images)
elif cfg.mode == 'us2conf':
pred_confs = model(images)
else:
pred_devs = model(images)
# compute task losses
if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey':
q_loss = mse(conf_y, pred_confs)
dev_loss = bce(dev_y, pred_devs)
elif cfg.mode == 'us2conf':
q_loss = mse(conf_y, pred_confs)
else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey'
dev_loss = bce(dev_y, pred_devs)
# log tasks losses
if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey':
test_c_loss(q_loss)
test_dev_loss(dev_loss)
elif cfg.mode == 'us2conf':
test_c_loss(q_loss)
else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey'
test_dev_loss(dev_loss)
# log FK loss
if cfg.data.use_fk:
fk_loss = fke(conf_y, pred_confs)
test_fk_loss(fk_loss)
# log CD loss
if cfg.data.use_cd or cfg.data.use_fd:
cde.set_time(timestamps)
cd_loss = cde(conf_y, pred_confs)
if cfg.data.use_cd:
test_cd_loss(tf.cast(cd_loss, q_loss.dtype))
# log FD loss
if cfg.data.use_fd:
fde.set_time_and_grads(timestamps, cde.y_true_grad, cde.y_pred_grad)
fd_loss = fde(conf_y, pred_confs)
test_fd_loss(tf.cast(fd_loss, q_loss.dtype))
if __name__ == "__main__":
# load config file
cfg = ConfigManager(json_name=args.json, retrain=True)
# set random seed (do nothing for no random seed)
set_random_seed(cfg)
# list visible devices and use allow growth - updated for TF 2.7 (CUDA 11 + CUDNN 8.2)
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.set_visible_devices([gpus[cfg.system.gpu]], 'GPU')
tf.config.experimental.set_memory_growth(gpus[cfg.system.gpu], True)
# check if output folder exists
if not os.path.isdir(cfg.output_dir):
os.makedirs(cfg.output_dir)
# initiate csv of training if asked
if cfg.store_csv:
metric_df_columns = []
if cfg.use_conf:
metric_df_columns += ['train_c_loss','test_c_loss']
if cfg.data.use_fk:
metric_df_columns += ['train_fk_loss','test_fk_loss']
if cfg.data.use_fd:
metric_df_columns += ['train_fd_loss','test_fd_loss']
if cfg.data.use_cd:
metric_df_columns += ['train_cd_loss','test_cd_loss']
if cfg.use_dev:
metric_df_columns += ['train_dev_loss','test_dev_loss']
metric_df = pd.DataFrame(columns=metric_df_columns)
# load train and test datasets
data_mng = DataManagement(cfg=cfg)
# wait for gpu if asked
wait_for_gpu(gpu=str(cfg.system.gpu), memory_req=cfg.system.memory_req)
# create model, loss and optimizer
model = initiate_model(cfg=cfg)
if cfg.use_conf:
mse = tf.keras.losses.MeanSquaredError()
if cfg.use_dev:
bce = tf.keras.losses.BinaryCrossentropy(from_logits=True)
if cfg.data.use_fk:
fke = ForwardKinematicsError(cfg=cfg, arm_lengths=data_mng.arm_lengths)
if cfg.data.use_cd or cfg.data.use_fd:
cde = ConfigurationDynamicsError(cfg=cfg)
if cfg.data.use_fd:
fde = ForwardDynamicsError(cfg=cfg, arm_lengths=data_mng.arm_lengths)
optimizer = tf.keras.optimizers.Adam(learning_rate=cfg.training.learning_rate)
# load weights
if cfg.model.weights != "":
model.load_weights(cfg.model.weights)
# define metrics
if cfg.use_conf:
train_c_loss = tf.keras.metrics.Mean(name='train_c_loss')
test_c_loss = tf.keras.metrics.Mean(name='test_c_loss')
if cfg.data.use_fk:
train_fk_loss = tf.keras.metrics.Mean(name='train_fk_loss')
test_fk_loss = tf.keras.metrics.Mean(name='test_fk_loss')
if cfg.data.use_cd:
train_cd_loss = tf.keras.metrics.Mean(name='train_cd_loss')
test_cd_loss = tf.keras.metrics.Mean(name='test_cd_loss')
if cfg.data.use_fd:
train_fd_loss = tf.keras.metrics.Mean(name='train_fd_loss')
test_fd_loss = tf.keras.metrics.Mean(name='test_fd_loss')
if cfg.use_dev:
train_dev_loss = tf.keras.metrics.Mean(name='train_dev_loss')
test_dev_loss = tf.keras.metrics.Mean(name='test_dev_loss')
metrics_writer = tf.summary.create_file_writer(cfg.output_dir)
# train
train_counter = 0
test_counter = 0
print('Start training...')
for epoch in range(cfg.training.epochs):
for batch in data_mng.train_gen:
if cfg.mode == 'us2conf':
_, images, conf_y = batch
train(timestamps=None, images=images, conf_y=conf_y, dev_x=None, dev_y=None, cfg=cfg)
elif cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey':
timestamps, images, conf_y, dev_y = batch
train(timestamps=timestamps, images=images, conf_y=conf_y, dev_x=None, dev_y=dev_y, cfg=cfg)
else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey'
images, dev_y = batch
train(timestamps=None, images=images, conf_y=None, dev_x=None, dev_y=dev_y, cfg=cfg)
train_counter += 1
with metrics_writer.as_default():
if cfg.use_conf:
tf.summary.scalar('Train C loss', train_c_loss.result(), step=test_counter)
if cfg.data.use_fk:
tf.summary.scalar('Train FK loss', train_fk_loss.result(), step=test_counter)
if cfg.data.use_cd:
tf.summary.scalar('Train CD loss', train_cd_loss.result(), step=test_counter)
if cfg.data.use_fd:
tf.summary.scalar('Train FD loss', train_fd_loss.result(), step=test_counter)
if cfg.use_dev:
tf.summary.scalar('Train Dev loss', train_dev_loss.result(), step=test_counter)
for test_batch in data_mng.test_gen:
if cfg.mode == 'us2conf':
_, test_images, test_conf_y = test_batch
test(timestamps=None, images=test_images, conf_y=test_conf_y, dev_x=None, dev_y=None, cfg=cfg)
elif cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey':
test_timestamps, test_images, test_conf_y, test_dev_y = test_batch
test(timestamps=test_timestamps, images=test_images, conf_y=test_conf_y, dev_x=None, dev_y=test_dev_y, cfg=cfg)
else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey'
test_images, test_dev_y = test_batch
test(timestamps=None, images=test_images, conf_y=None, dev_x=None, dev_y=test_dev_y, cfg=cfg)
test_counter += 1
with metrics_writer.as_default():
if cfg.use_conf:
tf.summary.scalar('Test C loss', test_c_loss.result(), step=test_counter)
if cfg.data.use_fk:
tf.summary.scalar('Test FK loss', test_fk_loss.result(), step=test_counter)
if cfg.data.use_cd:
tf.summary.scalar('Test CD loss', test_cd_loss.result(), step=test_counter)
if cfg.data.use_fd:
tf.summary.scalar('Test FD loss', test_fd_loss.result(), step=test_counter)
if cfg.use_dev:
tf.summary.scalar('Test Dev loss', test_dev_loss.result(), step=test_counter)
if cfg.store_csv:
row_dict = {}
if cfg.use_conf:
row_dict['train_c_loss'] = train_c_loss.result().numpy()
row_dict['test_c_loss'] = test_c_loss.result().numpy()
if cfg.data.use_fk:
row_dict['train_fk_loss'] = train_fk_loss.result().numpy()
row_dict['test_fk_loss'] = test_fk_loss.result().numpy()
if cfg.data.use_fd:
row_dict['train_fd_loss'] = train_fd_loss.result().numpy()
row_dict['test_fd_loss'] = test_fd_loss.result().numpy()
if cfg.data.use_cd:
row_dict['train_cd_loss'] = train_cd_loss.result().numpy()
row_dict['test_cd_loss'] = test_cd_loss.result().numpy()
if cfg.use_dev:
row_dict['train_dev_loss'] = train_dev_loss.result().numpy()
row_dict['test_dev_loss'] = test_dev_loss.result().numpy()
metric_df = metric_df.append(row_dict, ignore_index=True)
metric_df.to_csv(path_or_buf=os.path.join(cfg.output_dir, 'metric.csv'), index=False)
# printing
print('Epoch {},'.format(epoch+1), end=" ")
if cfg.use_conf:
print('C L: {:.5f}, T C L: {:.5f}.'.format(train_c_loss.result(), test_c_loss.result()), end=" ")
if cfg.data.use_fk:
print('FK L: {:.5f}, T FK L: {:.5f}.'.format(train_fk_loss.result(), test_fk_loss.result()), end=" ")
if cfg.data.use_cd:
print('CD L: {:.5f}, T CD L: {:.5f}.'.format(train_cd_loss.result(), test_cd_loss.result()), end=" ")
if cfg.data.use_fd:
print('FD L: {:.5f}, T FD L: {:.5f}.'.format(train_fd_loss.result(), test_fd_loss.result()), end=" ")
if cfg.use_dev:
print('Dev L: {:.5f}, T Dev L: {:.5f}.'.format(train_dev_loss.result(), test_dev_loss.result()), end=" ")
print(" ")
if (epoch+1) % 10 == 0 or epoch == 0:
print('GPU: {}, Experiment: {}'.format(cfg.system.gpu,cfg.output_dir))
# save model
if (epoch+1) % cfg.training.cp_interval == 0 and epoch > 0:
print('Saving weights to {}'.format(cfg.output_dir))
model.save_weights(os.path.join(cfg.output_dir, "model{}.ckpt".format(epoch+1))) | 1.9375 | 2 |
adept_envs/utils/registration.py | isabella232/DBAP-simulation | 2 | 12793156 | """
Copyright 2021 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""Helper methods for Gym environment registration."""
import logging
from gym.envs import registration as gym_reg
def register(env_id: str, class_path: str, **kwargs):
"""Registers the given class path as a Gym environment.
Args:
env_id: The ID to register the environment as.
class_path: The fully-qualified class path of the environment.
**kwargs: Key-word arguments to pass to gym's register function.
"""
if env_id in gym_reg.registry.env_specs:
# This may happen during test discovery.
logging.warning('Re-registering environment %s', env_id)
del gym_reg.registry.env_specs[env_id]
gym_reg.register(env_id, entry_point=class_path, **kwargs)
| 2.234375 | 2 |
moztrap/settings/default.py | yifanjiang/moztrap | 1 | 12793157 | <filename>moztrap/settings/default.py
from .base import *
try:
from .local import *
except ImportError:
pass
CACHES["default"]["VERSION"] = 1
if DEBUG:
MIDDLEWARE_CLASSES.insert(
0, "moztrap.debug.middleware.AjaxTracebackMiddleware")
try:
HMAC_KEYS
except NameError:
HMAC_KEYS = {"default": SECRET_KEY}
LOGGING["handlers"]["null"] = {
'level':'DEBUG',
'class':'django.utils.log.NullHandler',
}
LOGGING["loggers"]["moztrap"] = {
"handlers": ["null"], # replace this in local.py if you want logging
"level": "ERROR",
"propagate": True,
}
| 1.585938 | 2 |
Analysis Scripts/supply_demand_mismatch.py | bonesbb/HASPR | 1 | 12793158 | <reponame>bonesbb/HASPR<filename>Analysis Scripts/supply_demand_mismatch.py
# HASPR - High-Altitude Solar Power Research
# Script to get alleviation of supply/demand mismatch given generation profiles
# Version 0.1
# Author: neyring
from os import walk
import haspr
from haspr import Result
from haspr import Dataset
from numpy import genfromtxt
# PARAMETERS #
# path to .csv file of supply/demand mismatch data (Wh, UTC, 30min res, no leap days):
mismatchPath = "D:\\00_Results\\03_Supply Demand Mismatch\\5_2018 Mismatch - 30min res - UTC time.csv"
# directory containing generation profiles (30min res, Wh) to run our analyses on (without leap days):
inputDirectory = "D:\\00_Results\\03_Supply Demand Mismatch\\In"
# directory to write output to:
haspr.outputDirectory = "D:\\00_Results\\03_Supply Demand Mismatch\\Case 5 - 30 to 65 deg winter opt"
# OS path delimiter ("\\" for windows, "/" for unix)"
haspr.osPathDelimiter = "\\"
# extract mismatch data:
mismatch = Dataset("mismatch")
haspr.get_csv_data(mismatchPath, mismatch)
timestamps = []
mismatch_values = []
for p in mismatch.payload:
timestamps.append(str(p[0]))
mismatch_values.append(float(p[1]))
# get all file names in inputDirectory:
file_names = []
for (dirpath, dirnames, filenames) in walk(inputDirectory):
file_names.extend(filenames)
# cycle through files and build result objects:
results = []
for f in file_names:
file_path = inputDirectory + haspr.osPathDelimiter + f
# get generation profile:
extracted_array = genfromtxt(file_path, delimiter=',', skip_header=1)
gen_values = extracted_array[:, 1] # we only want generation values
# calculate import offset:
current_import_offset = []
for i in range(17520):
total_import = (-1) * mismatch_values[i] # -ve value for mismatch => import
generation = gen_values[i]
import_offset = 0.0
if total_import > 0:
import_offset = min(generation, total_import) # can't offset more than total imports
current_import_offset.append(import_offset)
# build current result object:
result_title = f[0:len(f) - 4] + " - import offset"
current_result = Result(result_title)
current_result.payload.append("Time (UTC), Reduction in Imports [Wh]")
for j in range(17520):
str_to_append = str(timestamps[j]) + ", " + str(current_import_offset[j])
current_result.payload.append(str_to_append)
results.append(current_result)
# dump all results:
for r in results:
r.dump()
| 2.734375 | 3 |
lambda/exercices/PhotoCollector/getAllSubjectLambda.py | Mythridor/aws-scripting | 0 | 12793159 | <gh_stars>0
import boto3
def lambda_handler(event, context):
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('<DBName>')
result = table.scan(
ProjectionExpression='subject',
)
return (result['Items'])
| 2.21875 | 2 |
network/tests/test_integration.py | glasser/integrations-core | 2 | 12793160 | <reponame>glasser/integrations-core<gh_stars>1-10
# (C) Datadog, Inc. 2019
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import platform
import pytest
from . import common
pytestmark = pytest.mark.integration
@pytest.mark.usefixtures("dd_environment")
def test_check(aggregator, check, instance):
check.check(instance)
for metric in common.EXPECTED_METRICS:
aggregator.assert_metric(metric)
@pytest.mark.skipif(platform.system() != 'Linux', reason="Only runs on Unix systems")
@pytest.mark.usefixtures("dd_environment")
def test_check_linux(aggregator, check, instance_blacklist):
check.check(instance_blacklist)
for metric in common.CONNTRACK_METRICS:
aggregator.assert_metric(metric)
| 2.015625 | 2 |
counternet/dataset.py | BirkhoffG/counternet | 0 | 12793161 | <gh_stars>0
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01b_data.ipynb (unless otherwise specified).
__all__ = ['dict2json', 'load_configs', 'update_json_file', 'bn_func', 'x1_to_x3', 'x1x2_to_x4', 'bn_gen',
'load_adult_income_dataset', 'load_learning_analytic_data', 'describe']
# Cell
from .import_essentials import *
# Cell
def dict2json(dictionary: Dict[str, Any], file_name: str):
with open(file_name, "w") as outfile:
json.dump(dictionary, outfile, indent=4)
def load_configs(file_name: Path):
# if os.path.exists(file_name):
# raise FileNotFoundError(f"{file_name} is not found.")
with open(file_name) as json_file:
return json.load(json_file)
def update_json_file(param: dict, file_name: str):
if os.path.exists(file_name):
old_param = load_configs(file_name)
else:
old_param = {}
# copy to old_param
for k in param.keys():
old_param[k] = param[k]
dict2json(old_param, file_name)
return old_param
# Cell
def bn_func(x1, x2, x3, x4):
def sigmoid(x):
return 1 / (1 + np.exp(-x))
return sigmoid(10.5 * ((x1 * x2) / 8100) + 10 - np.random.normal(1, 0.1, 10000) * x3 + 1e-3 * x4)
def x1_to_x3(x1):
return 1/3 * x1 - 5
def x1x2_to_x4(x1, x2):
return x1 * np.log(x2 ** 2) / 10 - 10
def bn_gen():
"""
modify code from: https://github.com/divyat09/cf-feasibility/blob/master/generativecf/scripts/simple-bn-gen.py
"""
x1 = np.random.normal(50, 15, 10000)
x2 = np.random.normal(35, 17, 10000)
x3 = x1_to_x3(x1) + np.random.normal(0, 1, 10000)
x4 = x1x2_to_x4(x1, x2) + np.random.normal(0, 1, 10000)
y = bn_func(x1, x2, x3, x4)
data = np.zeros((x1.shape[0], 5))
data[:, 0] = x1
data[:, 1] = x2
data[:, 2] = x3
data[:, 3] = x4
data[:, 4] = np.array(y > .5, dtype=np.int)
return pd.DataFrame(data, columns=['x1', 'x2', 'x3', 'x4', 'y'])
# Cell
def load_adult_income_dataset(path=None):
"""Loads adult income dataset from https://archive.ics.uci.edu/ml/datasets/Adult and prepares the data for data analysis based on https://rpubs.com/H_Zhu/235617
:return adult_data: returns preprocessed adult income dataset.
copy from https://github.com/interpretml/DiCE/blob/master/dice_ml/utils/helpers.py
"""
if path is None:
raw_data = np.genfromtxt(
'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data',
delimiter=', ',
dtype=str
)
else:
raw_data = np.genfromtxt(
path,
delimiter=', ',
dtype=str
)
# column names from "https://archive.ics.uci.edu/ml/datasets/Adult"
column_names = ['age', 'workclass', 'fnlwgt', 'education', 'educational-num',
'marital-status', 'occupation', 'relationship', 'race', 'gender',
'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income']
adult_data = pd.DataFrame(raw_data, columns=column_names)
# For more details on how the below transformations are made, please refer to https://rpubs.com/H_Zhu/235617
adult_data = adult_data.astype(
{"age": np.int64, "educational-num": np.int64, "hours-per-week": np.int64})
adult_data = adult_data.replace(
{'workclass': {'Without-pay': 'Other/Unknown', 'Never-worked': 'Other/Unknown'}})
adult_data = adult_data.replace({'workclass': {
'Federal-gov': 'Government', 'State-gov': 'Government', 'Local-gov': 'Government'}})
adult_data = adult_data.replace(
{'workclass': {'Self-emp-not-inc': 'Self-Employed', 'Self-emp-inc': 'Self-Employed'}})
adult_data = adult_data.replace(
{'workclass': {'Never-worked': 'Self-Employed', 'Without-pay': 'Self-Employed'}})
adult_data = adult_data.replace({'workclass': {'?': 'Other/Unknown'}})
adult_data = adult_data.replace({'occupation': {'Adm-clerical': 'White-Collar', 'Craft-repair': 'Blue-Collar',
'Exec-managerial': 'White-Collar', 'Farming-fishing': 'Blue-Collar',
'Handlers-cleaners': 'Blue-Collar',
'Machine-op-inspct': 'Blue-Collar', 'Other-service': 'Service',
'Priv-house-serv': 'Service',
'Prof-specialty': 'Professional', 'Protective-serv': 'Service',
'Tech-support': 'Service',
'Transport-moving': 'Blue-Collar', 'Unknown': 'Other/Unknown',
'Armed-Forces': 'Other/Unknown', '?': 'Other/Unknown'}})
adult_data = adult_data.replace({'marital-status': {'Married-civ-spouse': 'Married',
'Married-AF-spouse': 'Married', 'Married-spouse-absent': 'Married', 'Never-married': 'Single'}})
adult_data = adult_data.replace({'race': {'Black': 'Other', 'Asian-Pac-Islander': 'Other',
'Amer-Indian-Eskimo': 'Other'}})
adult_data = adult_data[['age', 'hours-per-week', 'workclass', 'education', 'marital-status',
'occupation', 'race', 'gender', 'income']]
adult_data = adult_data.replace({'income': {'<=50K': 0, '>50K': 1}})
adult_data = adult_data.replace({'education': {'Assoc-voc': 'Assoc', 'Assoc-acdm': 'Assoc',
'11th': 'School', '10th': 'School', '7th-8th': 'School', '9th': 'School',
'12th': 'School', '5th-6th': 'School', '1st-4th': 'School', 'Preschool': 'School'}})
adult_data = adult_data.rename(
columns={'marital-status': 'marital_status', 'hours-per-week': 'hours_per_week'})
return adult_data
# Cell
def load_learning_analytic_data(path='assets/data/oulad'):
def weighted_score(x):
d = {}
total_weight = sum(x['weight'])
d['weight'] = total_weight
if sum(x['weight']) == 0:
d['weighted_score'] = sum(x['score']) / len(x['score'])
else:
d['weighted_score'] = sum(
x['score'] * x['weight']) / sum(x['weight'])
return pd.DataFrame(d, index=[0])
def clicks(x):
types = x['activity_type']
sum_clicks = x['sum_click']
# for t, c in zip(types, sum_clicks):
# x[f"{t}_click"] = c
return pd.DataFrame({f"{t}_click": c for t, c in zip(types, sum_clicks)}, index=[0])
print('loading pandas dataframes...')
assessment = pd.read_csv(f'{path}/assessments.csv')
courses = pd.read_csv(f'{path}/courses.csv')
student_assessment = pd.read_csv(f'{path}/studentAssessment.csv')
student_info = pd.read_csv(f'{path}/studentInfo.csv')
student_regist = pd.read_csv(f'{path}/studentRegistration.csv')
student_vle = pd.read_csv(f'{path}/studentVle.csv')
vle = pd.read_csv(f'{path}/vle.csv')
print('preprocessing assessment...')
# note: only count for submitted assessment, not weighted for unsubmitted ones
assessment_merged = student_assessment.merge(assessment)
assessment_grouped = assessment_merged.groupby(
['code_module', 'code_presentation', 'id_student']).apply(weighted_score)
assessment_df = assessment_grouped.reset_index(
None).drop(['level_3'], axis=1)
print('preprocessing vle...')
# vle
grouped_vle = student_vle.merge(vle).groupby(
['activity_type', 'code_module', 'code_presentation', 'id_student'])
sumed_vle = grouped_vle.sum().drop(
['id_site', 'date', 'week_from', 'week_to'], axis=1).reset_index()
grouped_vle = sumed_vle.groupby(
['code_module', 'code_presentation', 'id_student']).apply(clicks)
vle_df = grouped_vle.reset_index(None).drop(['level_3'], axis=1)
student_df = student_info.merge(assessment_df, on=['code_module', 'code_presentation', 'id_student'], how='left')\
.merge(vle_df, on=['code_module', 'code_presentation', 'id_student'], how='left')
return student_df[['num_of_prev_attempts', 'weight', 'weighted_score',
'forumng_click', 'homepage_click', 'oucontent_click',
'resource_click', 'subpage_click', 'url_click', 'dataplus_click',
'glossary_click', 'oucollaborate_click', 'quiz_click',
'ouelluminate_click', 'sharedsubpage_click', 'questionnaire_click',
'page_click', 'externalquiz_click', 'ouwiki_click', 'dualpane_click',
'folder_click', 'repeatactivity_click', 'htmlactivity_click',
'code_module', 'gender', 'region',
'highest_education', 'imd_band', 'age_band', 'studied_credits',
'disability', 'final_result']]
# Cell
def describe(configs: List[Dict[str, Dict[str, Any]]]):
r = {"size": {}, "# of Cont": {}, "# of Cat": {}}
for data_name, config in configs:
data = pd.read_csv(f"{config['data_dir']}")
data_size = len(data)
cat_len = len(config['discret_cols'])
cont_len = len(config['continous_cols'])
r['size'][data_name] = data_size
r['# of Cont'][data_name] = cont_len
r['# of Cat'][data_name] = cat_len
# pd.DataFrame.from_dict(r).to_csv("../results/data_describe.csv")
return r | 2.296875 | 2 |
v1/cs/download.py | imhele/django-example | 1 | 12793162 | # -*- coding: utf-8 -*-
# API - cs
# FileName: download.py
# Version: 1.0.0
# Create: 2018-10-27
# Modify: 2018-11-07
import mimetypes
from .auth import OSS
from .util import Check
from act import StoreData
from .upload import FolderFile, Source
from .exception import CSCommonErr, CSDownloadErr
class Download(object):
def __init__(self, act=None, app=None):
"""
:param StoreData act:
:param StoreData app:
"""
self.act = act
self.app = app
def normal(self, content_type, expires, folder_file, intranet, source_file):
"""
:param str or None content_type: Content type in headers
:param int or None expires: Url expires
:param str folder_file: Eg: folder/${FolderId}/.../t=${CreateTime}&n=${FileName}&i=${FileId}
:param bool intranet: Return intranet url
:param str source_file: Eg: source/${FileId}.source.cs
:return:
"""
headers = None
if content_type is not None:
if mimetypes.guess_extension(content_type) is not None:
headers = {'Content-Type': content_type}
if not Check.download_expires(expires):
return CSDownloadErr.EXPIRES_LIMIT
if not folder_file.startswith('folder/'):
return CSCommonErr.INVALID_FOLDER
if not source_file.startswith('source/'):
return CSCommonErr.INVALID_SOURCE
appid = self.act.dict['PassiveParty']
if source_file is not None:
source = Source(appid, suffix=source_file)
else:
source = FolderFile(appid, suffix=folder_file).source
oss = OSS(intranet=intranet, extranet=(not intranet))
url = oss.sign_url('GET', source.key, expires, headers, intranet=intranet)
return {
'errcode': 0,
'url': url,
'headers': headers,
'source': source.suffix,
}
| 2.40625 | 2 |
awspider/servers/data.py | wehriam/awspider | 2 | 12793163 | from twisted.internet.defer import Deferred, DeferredList
from twisted.web import server
from twisted.internet import reactor
from .base import BaseServer, LOGGER
from ..resources import DataResource
class DataServer(BaseServer):
def __init__(self,
aws_access_key_id,
aws_secret_access_key,
aws_s3_storage_bucket,
aws_sdb_reservation_domain,
port=5002,
log_file='dataserver.log',
log_directory=None,
log_level="debug",
name=None,
max_simultaneous_requests=50):
if name == None:
name = "AWSpider Data Server UUID: %s" % self.uuid
resource = DataResource(self)
self.site_port = reactor.listenTCP(port, server.Site(resource))
BaseServer.__init__(
self,
aws_access_key_id,
aws_secret_access_key,
aws_s3_storage_bucket=aws_s3_storage_bucket,
aws_sdb_reservation_domain=aws_sdb_reservation_domain,
log_file=log_file,
log_directory=log_directory,
log_level=log_level,
name=name,
max_simultaneous_requests=max_simultaneous_requests,
port=port)
def clearStorage(self):
return self.s3.emptyBucket(self.aws_s3_storage_bucket)
def getData(self, uuid):
LOGGER.debug("Getting %s from S3." % uuid)
d = self.s3.getObject(self.aws_s3_storage_bucket, uuid)
d.addCallback(self._getCallback, uuid)
d.addErrback(self._getErrback, uuid)
return d
def _getCallback(self, data, uuid):
LOGGER.debug("Got %s from S3." % (uuid))
return cPickle.loads(data["response"])
def _getErrback(self, error, uuid):
LOGGER.error("Could not get %s from S3.\n%s" % (uuid, error))
return error
def shutdown(self):
deferreds = []
LOGGER.debug("%s stopping on main HTTP interface." % self.name)
d = self.site_port.stopListening()
if isinstance(d, Deferred):
deferreds.append(d)
if len(deferreds) > 0:
d = DeferredList(deferreds)
d.addCallback(self._shutdownCallback)
return d
else:
return self._shutdownCallback(None)
def _shutdownCallback(self, data):
return BaseServer.shutdown(self) | 1.9375 | 2 |
app/tests/utils/test_utils.py | onap/sdc-dcae-d-tosca-lab | 1 | 12793164 | <filename>app/tests/utils/test_utils.py
from toscalib.templates.topology import ToscaTopology
from toscalib.templates.database import ToscaDB
from toscalib.types.node import NodeType
from toscalib.types.capability import CapabilityType
from toscalib.tosca_builder import ToscaBuilder
import os
CURR_DIR = os.path.dirname(os.path.abspath(__file__))
meta_model = os.path.join(CURR_DIR, os.pardir, '../data/meta_model/meta_tosca_schema.yaml')
policy_model = os.path.join(CURR_DIR, os.pardir, '../data/meta_model/meta_policy_schema.yaml')
def init_template():
db = ToscaDB()
capability_type = CapabilityType('tosca.capabilities.dummy', {'properties': {'capabilityProperty': {'type': 'string'}}})
sub_capability = CapabilityType('tosca.capabilities.substitute', {'properties': {'capabilityProperty': {'type': 'string'}}})
capability_type._parse_content(db)
sub_capability._parse_content(db)
db._import_capability_type(capability_type)
db._import_capability_type(sub_capability)
node_type = NodeType('nodeTypeName', {'id': 'nodeId', 'attributes': {'attributeName': {'type': 'string'}}, 'properties': {'propertyName': {'type': 'string'}}, 'capabilities': {'capabilityName': {'type': 'tosca.capabilities.dummy'}}, 'requirements': [{'dummyRequirement': {'capability': 'tosca.capabilities.dummy'}}]})
sub_node = NodeType('substituteNodeType', {'id': 'subNodeId', 'properties': {'inputName': {'type': 'string'}}, 'capabilities': {'substituteCapability': {'type': 'tosca.capabilities.substitute'}}, 'requirements': [{'substituteRequirement': {'capability': 'tosca.capabilities.substitute'}}]})
node_type._parse_content(db)
sub_node._parse_content(db)
db._import_node_type(node_type)
db._import_node_type(sub_node)
template = ToscaTopology('templateName', None, {'inputs': {'inputName': {'type': 'string'}}, 'node_templates': {'nodeName': {'type': 'nodeTypeName'}, 'node2': {'type': 'nodeTypeName', 'requirements': [{'dummyRequirement': 'nodeName'}]}}})
template._parse_content(db)
return template
def init_sub_template():
db = ToscaDB()
sub_capability = CapabilityType('tosca.capabilities.substitute', {'properties': {'capabilityProperty': {'type': 'string'}}})
sub_capability._parse_content(db)
db._import_capability_type(sub_capability)
sub_node = NodeType('substituteNodeType', {'id': 'subNodeId', 'properties': {'propertyName': {'type': 'string'}}, 'capabilities': {'substituteCapability': {'type': 'tosca.capabilities.substitute'}}, 'requirements': [{'substituteRequirement': {'capability': 'tosca.capabilities.substitute'}}]})
sub_node._parse_content(db)
db._import_node_type(sub_node)
template = ToscaTopology('subTemplateName', None, {'inputs': {'propertyName': {'type': 'string'}}, 'node_templates': {'nodeName': {'type': 'substituteNodeType'}}})
template._parse_content(db)
return template
def init_tosca_builder_with_schema_and_spec():
spec = {"self": {
"version": "1.1.0",
"name": "test_spec_ss",
"description": "Collector for receiving VES events through restful interface",
"component_type": "docker"},
"streams": {
"subscribes": [],
"publishes": [{
"format": "VES_specification",
"version": "5.28.4",
"type": "message router",
"config_key": "ves_sipsignaling"}]},
"services": {
"provides": [{
"route": "/eventListener/v5",
"verb": "POST",
"request": {
"format": "VES_specification",
"version": "5.28.4"},
"response": {
"format": "ves.coll.response",
"version": "1.0.0"}}]},
"parameters": [{
"name": "collector.service.port",
"value": 8080,
"description": "standard http port"},
{"name": "collector.service.secure.port",
"value": 8443,
"description": "secure port "},
{"name": "collector.keystore.file.location",
"value": "/opt/app/dcae-certificate/keystore.jks",
"description": "fs location of keystore in vm"}],
"auxilary": {
"healthcheck": {
"type": "http",
"interval": "15s",
"timeout": "1s",
"endpoint": "/healthcheck"}}}
builder = ToscaBuilder()
builder.import_schema(meta_model)
builder.import_spec_str(spec)
return builder
def init_tosca_builder_with_policy_schema_and_spec():
spec = {"self": {
"version": "0.1.6",
"name": "DcaeSamCollector",
"component_type": "docker"},
"parameters": [{
"name": "clliLocationMappingClliFutureUse3",
"description": "SAM Collector clli=location ID set",
"value": "",
"type": "string"},
{"name": "vnfFaultMonProvisionPolicy",
"policy_editable": True,
"policy_group": "DcaeSamCollector_vnfFaultMonProvisionPolicy",
"type": "string",
"policy_schema": [{
"name": "vnfTypeSpecificData",
"description": "List of objects for vnf type monitorng",
"type": "list",
"entry_schema": [{
"name": "elementType",
"value": ""},
{"name": "monitoringTasks",
"type": "list",
"entry_schema": [{
"name": "HostGroupSetCommonLinuxSNMP",
"type": "boolean",
"value": "false"},
{"name": "HostGroupSetNagent_Common_Linux",
"type": "boolean",
"value": "false"}]
}
]
}]}
]}
builder = ToscaBuilder()
builder.import_schema(policy_model)
builder.import_spec_str(spec)
return builder
def init_tosca_builder_with_hello_world_spec_k8():
spec = {"self": {"component_type": "docker", "description": "Hello World mS for subscribing the data from local DMaaP, DR or MR, processing them and publishing them as PM files to local DMaaP DR",
"name": "dcae.collectors.vcc.helloworld.pm", "version": "1.0.1"}, "services": {"calls": [], "provides": []},
"streams": {"publishes": [], "subscribes": []},
"parameters": [{"name": "vcc_hello_name", "value": "", "description": "the name entered for specific person","sourced_at_deployment": True, "designer_editable": True, "policy_editable": False},
{"name": "useDtiConfig", "value": False, "description": "component depends on configuration from dti.", "sourced_at_deployment": "false", "designer_editable": "false", "policy_editable": False, "required": True},
{"name": "isSelfServeComponent", "value": "false", "description": "Is this used as self serve component.", "sourced_at_deployment": False, "designer_editable": False, "policy_editable": False, "required": "true"}],
"auxilary": {"healthcheck": {"interval": "60s", "timeout": "20s", "script": "/opt/app/vcc/bin/common/HealthCheck_HelloWorld.sh", "type": "docker"},
"volumes": [{"container": {"bind": "/opt/app/dcae-certificate"}, "host": {"path": "/opt/app/dcae-certificate"}},
{"container": {"bind": "/opt/logs/DCAE/dmd/AGENT"}, "host": {"path": "/opt/logs/DCAE/helloworldpm/dmd/AGENT"}},
{"container": {"bind": "/opt/logs/DCAE/dmd/WATCHER"}, "host": {"path": "/opt/logs/DCAE/helloworldpm/dmd/WATCHER"}},
{"container": {"bind": "/opt/app/vcc/logs/DCAE"}, "host": {"path": "/opt/logs/DCAE/helloworldpm/vcc-logs"}},
{"container": {"bind": "/opt/app/vcc/archive/data"}, "host": {"path": "/opt/data/DCAE/helloworldpm/vcc-archive"}}]},
"artifacts": [{"type": "docker image", "uri": "dockercentral.it.att.com:5100/com.att.dcae.controller/dcae-controller-vcc-helloworld-pm:18.02-001"}]}
builder = ToscaBuilder()
builder.import_schema(meta_model)
builder.import_spec_str(spec)
return builder
| 2.03125 | 2 |
lmp/util/validate.py | ProFatXuanAll/char-RNN | 0 | 12793165 | """Checking types and values."""
import os
from typing import Any, List, Type, Union
def raise_if_empty_str(*, val: str, val_name: str) -> None:
"""Raise if ``val`` is an empty :py:class:`str`.
Parameters
----------
val: str
Test target.
val_name: str
Test target name. Mainly used to create error message.
Raises
------
ValueError
When ``val`` is an empty :py:class:`str`.
"""
if not val:
raise ValueError(f'`{val_name}` must be non-empty `str`.')
def raise_if_is_directory(*, path: str) -> None:
"""Raise if ``path`` exists and is a directory.
Parameters
----------
path: str
Test path.
Raises
------
FileExistsError
When ``path`` exists and is a directory.
"""
if os.path.exists(path) and os.path.isdir(path):
raise FileExistsError(f'{path} is a directory.')
def raise_if_is_file(*, path: str) -> None:
"""Raise if ``path`` exists and is a file.
Parameters
----------
path: str
Test path.
Raises
------
FileExistsError
When ``path`` exists and is a file.
"""
if os.path.exists(path) and os.path.isfile(path):
raise FileExistsError(f'{path} is a file.')
def raise_if_not_in(*, val: Any, val_name: str, val_range: List) -> None:
"""Raise if ``val`` is not in ``val_range``.
Parameters
----------
val: Any
Test target.
val_name: str
Test target name. Mainly used to create error message.
val_range: list
Expected value range.
Raises
------
ValueError
When ``val`` is not in ``val_range``.
"""
if val not in val_range:
raise ValueError(
f'`{val_name}` must be one of the following values:' + ''.join(map(lambda v: f'\n- {v}', val_range))
)
def raise_if_not_instance(*, val: Any, val_name: str, val_type: Type) -> None:
"""Raise if ``val`` is not an instance of ``val_type``.
Parameters
----------
val: Any
Test target.
val_name: str
Test target name. Mainly used to create error message.
val_type: Type
Expected target type.
Raises
------
TypeError
When ``val`` is not an instance of ``val_type``.
"""
if not isinstance(val, val_type):
raise TypeError(f'`{val_name}` must be an instance of `{val_type.__name__}`.')
def raise_if_wrong_ordered(*, vals: List[Union[float, int]], val_names: List[str]) -> None:
"""Raise if there exist some ``i < j`` such that ``vals[i] > vals[j]``.
Parameters
----------
vals: list[Union[float, int]]
Test targets.
val_names: list[str]
Test targets' names. Mainly used to create error message.
Raises
------
ValueError
When there exist some ``i < j`` such that ``vals[i] > vals[j]``.
"""
for i in range(len(vals) - 1):
if vals[i] > vals[i + 1]:
raise ValueError(f'Must have `{" <= ".join(val_names)}`.')
| 3.6875 | 4 |
part2.py | AybukeYALCINER/image_classification | 9 | 12793166 | <reponame>AybukeYALCINER/image_classification<filename>part2.py
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.optim as optim
import tensorflow as tf
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
from torch.autograd import Variable
import matplotlib.pyplot as plt
import time
import os
import copy
import tensorflow as tf
# plot the train and validation loss/accuracy plots and save it.
# takes array of validation losses, validation accuracy, train losses, train accuracy and number of epochs respectively.
def plot_graph(val_loss, val_acc, tr_loss, tr_acc, num_epochs):
plt.subplot(211)
plt.title("Loss plots vs. Number of Training Epochs")
plt.plot(range(1,num_epochs+1),val_loss,label="validation")
plt.plot(range(1,num_epochs+1),tr_loss,label="train")
plt.xticks(np.arange(1, num_epochs+1, 1.0))
plt.legend()
plt.subplot(212)
plt.title("Accuracy plots vs. Number of Training Epochs")
plt.plot(range(1,num_epochs+1),val_acc,label="validation")
plt.plot(range(1,num_epochs+1),tr_acc,label="train")
plt.xticks(np.arange(1, num_epochs+1, 1.0))
plt.legend()
plt.tight_layout()
plt.savefig("plot.png")
# train the model
# takes the model, dataloaders, criterion, optimizer, device(GPU or CPU) and number of epochs respectively as parameters
# returns model, array of validation accuracy, validation loss, train accuracy, train loss respectively
def train_model(model, dataloaders, criterion, optimizer, device,num_epochs=25):
since = time.time()
val_acc_history = []
val_loss_history = []
tr_acc_history = []
tr_loss_history = []
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
n_epochs_stop = 5
min_val_loss = np.Inf
epochs_no_improve = 0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
#scheduler.step(epoch) #for lr_scheduler
# Each epoch has a training and validation phase
for phase in ['train', 'validation']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
loader = dataloaders[phase]
# Iterate over data.
for inputs, labels in loader:
inputs = inputs.to(device)
labels = labels.to(device)
# print("in dataloaders", end=" ")
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
# print("x")
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / len(dataloaders[phase].dataset)
epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'validation' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
if phase == 'validation':
val_acc_history.append(epoch_acc)
val_loss_history.append(epoch_loss)
if phase == 'train':
tr_acc_history.append(epoch_acc)
tr_loss_history.append(epoch_loss)
#early stopping
if phase == 'validation':
if epoch_loss < min_val_loss:
epochs_no_improve = 0
min_val_loss = val_loss
else:
epochs_no_improve += 1
# Check early stopping condition
if epochs_no_improve == n_epochs_stop:
print('Early stopping!')
return model, val_acc_history, val_loss_history, tr_acc_history, tr_loss_history
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model, val_acc_history, val_loss_history, tr_acc_history, tr_loss_history
# data augmentation
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'validation': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'test': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = "dataset"
num_classes = 10
batch_size = 32
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'validation', 'test']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size,
shuffle=True, num_workers=2)
for x in ['train', 'validation', 'test']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'validation', 'test']}
class_names = image_datasets['train'].classes
model_ft = models.vgg16(pretrained=True)
# freeze layers before classifiers
for param in model_ft.features.parameters():
# print(param)
param.requires_grad = False
#different number of layer freeze
#model_ft.features[-1].requires_grad = True
#model_ft.features[-2].requires_grad = True
#model_ft.features[-3].requires_grad = True
model_ft.classifier[6] = nn.Linear(4096,10) #modify the last layer
# specify loss function
criterion = nn.CrossEntropyLoss()
# specify optimizer
optimizer = torch.optim.Adam(model_ft.parameters(), lr=0.001)
#lr_scheduler
#scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1)
#different optimizer
#optimizer = torch.optim.SGD(model_ft.parameters(), lr=0.001)
#weight_decay
#optimizer = torch.optim.Adam(model_ft.parameters(), lr=0.1,weight_decay= 0.001)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model_ft = model_ft.to(device) #send the model to the gpu
model_ft, val_acc, val_loss, tr_acc, tr_loss = train_model(model_ft, dataloaders, criterion, optimizer,device, num_epochs=30) #train model
#test the model
correct = 0
topk = 0
total = 0
testloader = dataloaders['test']
with torch.no_grad():
for data in testloader:
images, labels = data
images = images.to(device)
labels = labels.to(device)
outputs = model_ft(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
probs, classes = outputs.topk(5, dim=1)
labels_size = labels.size(0)
for i in range(labels_size):
if(labels[i] in classes[i]):
topk += 1
print('Accuracy of the model on the test images: %d %%' % (100 * correct / total))
print('Accuracy of the top 5 on the test images: %d %%' % (100 * topk / total))
# val/train loss and accuracy plots
plot_graph(val_loss, val_acc, tr_loss, tr_acc, 30)
| 2.765625 | 3 |
submission_broker/submission/submission.py | ebi-ait/submission-broker | 0 | 12793167 | <reponame>ebi-ait/submission-broker
from enum import Enum
from typing import List, Set, KeysView, Dict, ValuesView
from submission_broker.submission.entity import Entity
class HandleCollision(Enum):
UPDATE = 1
OVERWRITE = 2
ERROR = 3
class Submission:
def __init__(self, collider: HandleCollision = None):
self.__collider = collider if collider else HandleCollision.UPDATE
self.__map: Dict[str, Dict[str, Entity]] = {}
def has_data(self) -> bool:
return len(self.__map) > 0
def map(self, entity_type: str, index: str, attributes: dict) -> Entity:
if entity_type in self.__map and index in self.__map[entity_type]:
entity = self.__handle_collision(entity_type, index, attributes)
else:
entity = Entity(entity_type, index, attributes)
self.__map.setdefault(entity_type, {})[index] = entity
return entity
def get_entity_types(self) -> KeysView[str]:
return self.__map.keys()
def get_entities(self, entity_type: str) -> ValuesView[Entity]:
return self.__map.get(entity_type, {}).values()
def get_entity(self, entity_type: str, index: str) -> Entity:
return self.__map.get(entity_type, {}).get(index, None)
def get_all_entities(self) -> Dict[str, ValuesView[Entity]]:
all_entities = {}
for entity_type in self.get_entity_types():
all_entities[entity_type] = self.get_entities(entity_type)
return all_entities
def get_linked_entities(self, entity: Entity, entity_type: str) -> Set[Entity]:
entities = set()
for index in entity.get_linked_indexes(entity_type):
entities.add(self.get_entity(entity_type, index))
return entities
def get_linked_accessions(self, entity: Entity) -> Dict[str, Set[str]]:
accessions: Dict[str, Set[str]] = {}
for entity_type in self.get_entity_types():
for linked_entity in self.get_linked_entities(entity, entity_type):
for service, accession in linked_entity.get_accessions():
accessions.setdefault(service, set()).add(accession)
return accessions
def get_all_accessions(self) -> Dict[str, Set[str]]:
all_accessions: Dict[str, Set[str]] = {}
for entities in self.get_all_entities().values():
for entity in entities:
for service, accession in entity.get_accessions():
all_accessions.setdefault(service, set()).add(accession)
return all_accessions
def has_errors(self) -> bool:
for entities in self.get_all_entities().values():
for entity in entities:
if entity.has_errors():
return True
return False
def get_errors(self, entity_type: str) -> Dict[str, Dict[str, List[str]]]:
type_errors: Dict[str, Dict[str, List[str]]] = {}
for index, entity in self.__map[entity_type].items():
if entity.has_errors():
type_errors[index] = entity.get_errors()
return type_errors
def get_all_errors(self) -> Dict[str, Dict[str, Dict[str, List[str]]]]:
errors: Dict[str, Dict[str, Dict[str, List[str]]]] = {}
for entity_type in self.get_entity_types():
type_errors = self.get_errors(entity_type)
if type_errors:
errors[entity_type] = type_errors
return errors
def as_dict(self, string_lists: bool = False) -> Dict[str, Dict[str, dict]]:
view = {}
for entity_type, indexed_entities in self.__map.items():
for index, entity in indexed_entities.items():
view.setdefault(entity_type, {})[index] = entity.as_dict(string_lists=string_lists)
return view
@staticmethod
def link_entities(entity_a: Entity, entity_b: Entity):
entity_a.add_link_id(entity_b.identifier)
entity_b.add_link_id(entity_a.identifier)
def __handle_collision(self, entity_type: str, index: str, attributes: dict) -> Entity:
if self.__collider == HandleCollision.ERROR:
raise IndexError(f'Index {index} already exists.')
existing_entity: Entity = self.__map[entity_type][index]
if self.__collider == HandleCollision.OVERWRITE:
existing_entity.attributes = attributes
else: # Default is UPDATE
existing_entity.attributes.update(attributes)
return existing_entity
| 2.109375 | 2 |
PythonGlobalOptimizationLib/PythonGlobalOptimizationLib/DataDownload/GetYahooFinanceTimeSeriesData.py | zhenshaoaixixi0507/PythonGlobalOptimizationLib | 0 | 12793168 | <gh_stars>0
from yahoofinancials import YahooFinancials
import pandas as pd
import numpy as np
def GetYahooFinanceData(ticker:str,startdate:str,enddate:str,timeinterval:str,datatype:str)->np.ndarray:
yahoo_financials = YahooFinancials(ticker)
data = yahoo_financials.get_historical_price_data(start_date=startdate,
end_date=enddate,
time_interval=timeinterval)
df = pd.DataFrame(data[ticker]['prices'])
return df[datatype].to_numpy()
| 2.96875 | 3 |
leetcode/RansomNote.py | mahendra-rk/coding-interview-prep | 0 | 12793169 | <filename>leetcode/RansomNote.py
# Leetcode
# 383. Ransom Note
class Solution:
def canConstruct(self, ransomNote: str, magazine: str) -> bool:
if len(magazine) < len(ransomNote):
return False
mag_counter = collections.Counter(magazine)
ransom_counter = collections.Counter(ransomNote)
for alpha in ransom_counter:
if ransom_counter.get(alpha, -1) > mag_counter.get(alpha, 0):
return False
return True
####################################################################
# #Alternate solution
# counter = collections.Counter(magazine)
# ransomNote = list(ransomNote)
# for alpha in list(ransomNote):
# ransomNote.remove(alpha)
# alpha_count = counter.get(alpha, -1)
# if alpha_count > 1:
# counter[alpha] = counter.get(alpha) - 1
# elif alpha_count == 1:
# counter.pop(alpha)
# else:
# return False
# if not ransomNote:
# return True
# else:
# return False
| 3.390625 | 3 |
Grammys/src/data/run_crawlers.py | kelvingakuo/INCITEFUL-DATA | 1 | 12793170 | <reponame>kelvingakuo/INCITEFUL-DATA
import subprocess
def runCrawler(what):
if(what == 'lyrics'):
command = 'cd LyricsFreakCrawler/LyricsFreakCrawler/ && scrapy crawl lyricsfreak -o lyrics.json'
elif(what == 'artistData'):
command = 'cd WikipediaCrawler/WikipediaCrawler/ && scrapy crawl artistInfo -o artist_data.json'
process = subprocess.Popen(command.split(), stdout = subprocess.PIPE, shell = True)
output, error = process.communicate()
| 2.265625 | 2 |
example/benchmark/counter.py | dendisuhubdy/ijson | 0 | 12793171 | <filename>example/benchmark/counter.py
from bottle import post, run, request
import threading
import time
count = 0
@post('/')
def index():
global count
count += int(request.body.read())
return b''
def show():
prev = 0
while True:
start = time.time()
time.sleep(1)
now = time.time()
dur = now - start
print(int((count - prev) / dur), 'ops')
start = now
prev = count
threading.Thread(target=show).start()
run(host='localhost', port=7000, quiet=True)
| 2.5625 | 3 |
django_ancestry_relation/managers.py | aaronmarkey/django-ancestry-relation | 2 | 12793172 | <reponame>aaronmarkey/django-ancestry-relation
from django.db import models
class NodeManager(models.Manager):
def create_node(self, *args, **kwargs):
'''
Create a Node object. Generates level and path automatically if ones are
not supplied in kwargs.
RETURNS
-------
node: Node
The Node object, unsaved.
'''
node = self.model(**kwargs)
# generate level
if 'level' not in kwargs:
if node.parent_node_id:
node.level = node.parent_node.level + 1
else:
node.level = 1
# generate path
if 'path' not in kwargs:
if node.parent_node:
node.path = '{},{}'.format(
node.parent_node.path,
node.id
)
else:
node.path = '{}'.format(
node.id
)
if node.level == 1:
node.root_node = node
return node
def create_tree(self, nodes=[]):
'''
Save a list of nodes to the database.
ARGS
----
nodes: [Node,]
A list of Node objects
RETURNS
-------
Bool
False if nodes is empty or save to DB failed, True if saved to DB
successfully
'''
if nodes:
try:
self.bulk_create(nodes)
return True
except:
return False
return False
def ancestral_nodes(self, node):
'''
Get a list of all nodes that are ascendants or descendants of the given
node.
ARGS
----
node: Node
The node.
RETURNS
-------
QuerySet:
A QuerySet of Node Objects, ordered by level.
'''
node_ids = node.path.split(',')
return self.filter(id__in=node_ids).order_by('level')
def descendants(self, node):
'''
Get a complete list of all nodes that inheiret from the given node.
ARGS
----
node: Node
The node.
RETURNS
-------
QuerySet:
A QuerySet of Node Objects, ordered by level.
'''
return self.filter(
root_node=node.root_node,
path__contains=str(node.id)
).order_by('level')
def children(self, node):
'''
Get the immediate children of the given node.
ARGS
----
node: Node
The node.
RETURNS
-------
QuerySet:
A QuerySet of Node objects.
'''
return self.filter(
parent_node=node
).order_by('level')
def leaves(self, node):
if node.id != node.root_node_id:
raise Exception('node must be a root level node.')
else:
leaves = self.raw(
'''
SELECT * FROM django_ancestry_relation_testnode n1
WHERE (SELECT count(*) FROM django_ancestry_relation_testnode n2
WHERE n2.parent_node_id = n1.id) = 0
AND n1.root_node_id = '{}'
ORDER BY n1.level ASC
'''.format(str(node.id))
)
return leaves
def hierarchical_ordered(self, node):
'''
Get a structured representation of Nodes. Uses the StructuredNode class
found at classes.StructuredNode.
ARGS
----
node: Node
The root of the tree being requested. This is be the root
kStructuredNode.
RETURNS
-------
tree: StructuredNode
A single StructuredNode object.
NOTE
----
This is slow. Do not use if descendants_ordered() can be used in
any way.
'''
from django_ancestry_relation.classes import StructuredNode
children_count = self.children(node).count()
tree = StructuredNode.StructuredNode(
data=node
)
if children_count > 0:
children = self.children(node)
for child in children:
tree.children.append(
self.hierarchical_structured_tree(
child
)
)
return tree
def descendants_ordered(self, node):
'''
Retrieve a flat list of node descendents, ordered according to their
placement in the hierarchy.
ARGS
----
node: Node
The root Node of this tree/subtree.
RETURNS
-------
nodes: [Node,]
A QuerySet of Node objects.
'''
nodes = self.descendants(node).order_by('path')
return nodes
def delete_tree(self, node):
'''
Just a wrapper for Django Model .delete method. Will delete a node and
all of it's descendents.
'''
node.delete()
| 2.6875 | 3 |
download.py | felipefelixarias/a2cat-vn-pytorch | 7 | 12793173 | <filename>download.py
import requests
import io
import zipfile
from collections import namedtuple
import os
def _convert_name(name, acc = ''):
if len(name) == 0:
return acc
if name[0].isupper():
acc += ('-' if len(acc) > 0 else '') + name[0].lower()
elif name[0] == '_':
acc += '-'
else:
acc += name[0]
return _convert_name(name[1:], acc)
DownloaderContext = namedtuple('DownloaderContext', ['base_url', 'resources_path', 'store_path'])
class Downloader:
def __init__(self):
self.base_url = 'https://deep-rl.herokuapp.com/resources/'
self.resources = dict()
self._base_path = None
self._all_requirements = []
@property
def base_path(self):
if self._base_path is None:
self._base_path = os.path.expanduser('~/.visual_navigation')
return self._base_path
@property
def resources_path(self):
return os.path.join(self.base_path, 'resources')
def create_context(self, name):
return DownloaderContext(self.base_url, self.resources_path, os.path.join(self.resources_path, name))
def add_resource(self, name, fn):
self.resources[name] = fn
def require(self, name):
self._all_requirements.append(name)
def get(self, name):
return self.resources[name](self.create_context(name))
def download_all(self):
for r in self._all_requirements:
self.get(r)
downloader = Downloader()
def download_resource(name, context):
resource_path = os.path.join(context.resources_path, name)
if os.path.exists(resource_path):
return resource_path
url = context.base_url + '%s.zip' % name
try:
print('Downloading resource %s.' % name)
response = requests.get(url)
with zipfile.ZipFile(io.BytesIO(response.content)) as z:
z.extractall(resource_path)
print('Resource %s downloaded.' %name)
return resource_path
except Exception as e:
if os.path.exists(resource_path):
os.remove(resource_path)
raise e
def register_resource(task):
if isinstance(task, str):
def _thunk(res):
downloader.add_resource(task, res)
return res
return _thunk
name = _convert_name(task.__name__)
downloader.add_resource(name, task)
return task
def require_resource(name):
downloader.require(name)
return lambda x: x
def download_resource_task(name):
def thunk(context):
return download_resource(name, context)
return thunk
def add_resources(downloader_instance):
# Add test resource
downloader_instance.add_resource('test', download_resource_task('test'))
add_resources(downloader)
def resource(name):
return downloader.get(name) | 2.890625 | 3 |
leonardo_form_roudnyresl/models.py | dresl/leonardo-form-roudnyresl | 0 | 12793174 | # encoding: utf-8
from leonardo.module.web.models import Widget
from leonardo.module.media.fields.image import ImageField
from django.db import models
from django.conf import settings
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
import datetime
from django.utils.encoding import python_2_unicode_compatible
from leonardo.module.media.fields.multistorage_file import MultiStorageFileField
class RoudnyreslOrders(models.Model):
jmeno = models.CharField(
max_length=255, verbose_name=u"Jméno", default='')
slug = models.CharField(
verbose_name=u"URL ID", max_length=150, blank=True, null=True)
prijmeni = models.CharField(
max_length=255, verbose_name=u"Příjmení", default='')
email = models.EmailField(
verbose_name=u"E-mail", default='')
telefon = models.CharField(
verbose_name=u"Telefon (ve tvaru: +420 123 456 789)", max_length=100)
dorucovaci_adresa = models.CharField(
verbose_name=u"Doručovací adresa", help_text="Př.: Pardubice, Benedettiho 709, 530 03", max_length=255)
firma = models.CharField(
max_length=255, verbose_name=u"Název firmy", default='')
ico = models.CharField(
verbose_name=u"IČO", max_length=255, default='')
dic = models.CharField(
verbose_name=u"DIČ", max_length=255, help_text="Vyplňte, jste-li plátce DPH", blank=True, null=True)
doprava = models.CharField(
verbose_name=u"Doprava", max_length=255)
platba = models.CharField(
verbose_name=u"Platba", max_length=255)
zprava = models.TextField(
verbose_name=u"Poznámka", default='', blank=True)
pub_date = models.DateTimeField(u'Datum objednávky', auto_now_add=True)
def get_absolute_url(self):
from leonardo.module.web.widget.application.reverse import app_reverse
return app_reverse(
'created_order',
'leonardo_form_roudnyresl.apps.roudnyresl',
kwargs={'slug': self.slug})
def get_full_name(self):
return str(self.jmeno.encode("utf-8") + " " + self.prijmeni.encode("utf-8"))
def __unicode__(self):
return self.jmeno
class Meta:
ordering = ['jmeno', ]
verbose_name = u'Objednávka'
verbose_name_plural = u'Objednávky'
class RoudnyreslProduct(models.Model):
objednavka = models.ForeignKey(RoudnyreslOrders,
verbose_name=u"Objednávka", related_name="orderproduct_set")
produkt = models.CharField(
verbose_name=u"Vyberte produkt", max_length=255)
tloustka = models.CharField(
verbose_name=u"Výška podstavy", max_length=255)
vyska = models.CharField(
verbose_name=u"Výška reliéfu", max_length=255)
rozmer_motivu = models.CharField(
verbose_name=u"Rozměr raženého motivu", max_length=255)
soubor = models.FileField(
u'Nahrání dat', upload_to='documents/%Y/%m/%d/')
def __unicode__(self):
return self.produkt
class Meta:
ordering = ['produkt', ]
verbose_name = u'Produkt'
verbose_name_plural = u'Produkty'
| 1.710938 | 2 |
packages/w3af/w3af/core/data/parsers/tests/test_parser_cache.py | ZooAtmosphereGroup/HelloPackages | 3 | 12793175 | <reponame>ZooAtmosphereGroup/HelloPackages
"""
test_parser_cache.py
Copyright 2012 <NAME>
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import unittest
from mock import patch, PropertyMock
from w3af.core.data.parsers.doc.html import HTMLParser
from w3af.core.data.parsers.tests.test_document_parser import _build_http_response
from w3af.core.data.parsers.parser_cache import ParserCache
from w3af.core.data.parsers.doc.url import URL
from w3af.core.data.url.HTTPResponse import HTTPResponse
from w3af.core.data.dc.headers import Headers
from w3af.core.data.parsers.tests.test_mp_document_parser import DelayedParser
from w3af.core.data.parsers.utils.response_uniq_id import get_response_unique_id
from w3af.core.controllers.exceptions import BaseFrameworkException
class TestParserCache(unittest.TestCase):
def setUp(self):
self.url = URL('http://w3af.com')
self.headers = Headers([(u'content-type', u'text/html')])
self.dpc = ParserCache()
def tearDown(self):
self.dpc.clear()
def test_basic(self):
resp1 = HTTPResponse(200, 'abc', self.headers, self.url, self.url)
resp2 = HTTPResponse(200, 'abc', self.headers, self.url, self.url)
parser1 = self.dpc.get_document_parser_for(resp1)
parser2 = self.dpc.get_document_parser_for(resp2)
self.assertEqual(id(parser1), id(parser2))
def test_bug_13_Dec_2012(self):
url1 = URL('http://w3af.com/foo/')
url2 = URL('http://w3af.com/bar/')
body = '<a href="?id=1">1</a>'
resp1 = HTTPResponse(200, body, self.headers, url1, url1)
resp2 = HTTPResponse(200, body, self.headers, url2, url2)
parser1 = self.dpc.get_document_parser_for(resp1)
parser2 = self.dpc.get_document_parser_for(resp2)
self.assertNotEqual(id(parser1), id(parser2))
_, parsed_refs_1 = parser1.get_references()
_, parsed_refs_2 = parser2.get_references()
self.assertEqual(parsed_refs_1, parsed_refs_2)
def test_issue_188_invalid_url(self):
# https://github.com/andresriancho/w3af/issues/188
all_chars = ''.join([chr(i) for i in xrange(0, 255)])
response = HTTPResponse(200, all_chars, self.headers, self.url, self.url)
self.dpc.get_document_parser_for(response)
def test_cache_blacklist_after_timeout(self):
#
# If the cache tries to parse an HTTP response, that process fails, then we blacklist
# the HTTP response so it never gets parsed again.
#
mmpdp = 'w3af.core.data.parsers.mp_document_parser.%s'
kmpdp = mmpdp % 'MultiProcessingDocumentParser.%s'
modp = 'w3af.core.data.parsers.document_parser.%s'
with patch(kmpdp % 'PARSER_TIMEOUT', new_callable=PropertyMock) as timeout_mock, \
patch(kmpdp % 'MAX_WORKERS', new_callable=PropertyMock) as max_workers_mock, \
patch(modp % 'DocumentParser.PARSERS', new_callable=PropertyMock) as parsers_mock:
#
# Trigger the timeout
#
html = '<html>DelayedParser!</html>'
http_resp = _build_http_response(html, u'text/html')
timeout_mock.return_value = 1
max_workers_mock.return_value = 1
parsers_mock.return_value = [DelayedParser, HTMLParser]
try:
self.dpc.get_document_parser_for(http_resp)
except BaseFrameworkException, bfe:
self._is_timeout_exception_message(bfe, http_resp)
else:
self.assertTrue(False)
#
# Make sure it is in the blacklist
#
hash_string = get_response_unique_id(http_resp)
self.assertIn(hash_string, self.dpc._parser_blacklist)
#
# Make sure the blacklist is used
#
try:
self.dpc.get_document_parser_for(http_resp)
except BaseFrameworkException, bfe:
self.assertIn('Exceeded timeout while parsing', str(bfe))
def _is_timeout_exception_message(self, toe, http_resp):
msg = 'Reached timeout parsing "http://w3af.com/".'
self.assertEquals(str(toe), msg)
def test_get_tags_by_filter_simple(self):
html = '<a href="/def">abc</a>'
resp1 = HTTPResponse(200, html, self.headers, self.url, self.url)
resp2 = HTTPResponse(200, html, self.headers, self.url, self.url)
parser1 = self.dpc.get_tags_by_filter(resp1, tags=('a',))
parser2 = self.dpc.get_tags_by_filter(resp2, tags=('a',))
self.assertEqual(id(parser1), id(parser2))
def test_get_tags_by_filter_different_tags(self):
html = '<a href="/def">abc</a><b>hello</b>'
resp1 = HTTPResponse(200, html, self.headers, self.url, self.url)
resp2 = HTTPResponse(200, html, self.headers, self.url, self.url)
parser1 = self.dpc.get_tags_by_filter(resp1, tags=('a',))
parser2 = self.dpc.get_tags_by_filter(resp2, tags=('b',))
self.assertNotEqual(id(parser1), id(parser2))
| 2.046875 | 2 |
xtools/main.py | hxler123/tools | 2 | 12793176 | import sys
import binascii
import hashlib
from PyQt5.QtWidgets import QApplication,QMainWindow,QFileDialog
from xtui import Ui_Form
from xtoolsfunc import XToolsFunc
base64_method = ["encode","decode"]
hash_available = hashlib.algorithms_guaranteed
class MainUi(QMainWindow,QFileDialog,Ui_Form):
def __init__(self,parent=None):
super(MainUi,self).__init__(parent)
self.setupUi(self)
self.type_ComboBox.addItem("")
self.type_ComboBox.addItem("")
self.type_ComboBox.setItemText(0,"base64")
self.type_ComboBox.setItemText(1,"Hash")
self.type_ComboBox.activated.connect(self.enc_type)
self.confirm_Button.clicked.connect(self.confirm)
self.open_Button.clicked.connect(self.openfile)
for i in range(len(base64_method)):
self.method_ComboBox.addItem("")
self.method_ComboBox.setItemText(i,base64_method[i])
def openfile(self):
filedir = self.getOpenFileName(self,"open file","./","All Files (*)")[0]
self.input_TextEdit.setText(filedir)
def enc_type(self):
self.method_ComboBox.clear()
if self.type_ComboBox.currentText() == "Hash":
hash_available_list = list(hash_available)
for i in range(len(hash_available_list)):
self.method_ComboBox.addItem("")
self.method_ComboBox.setItemText(i,hash_available_list[i])
else:
for i in range(len(base64_method)):
self.method_ComboBox.addItem("")
self.method_ComboBox.setItemText(i,base64_method[i])
def confirm(self):
enc_type = self.type_ComboBox.currentText()
method = self.method_ComboBox.currentText()
value = self.input_TextEdit.toPlainText()
if value:
if enc_type == "base64":
result = XToolsFunc.base64_method(method,value)
self.ouput_TextBrowser.setText(result[0])
self.output_label.setText(result[1])
elif enc_type == "Hash":
result = XToolsFunc.hash_method(method,value)
self.ouput_TextBrowser.setText(result[0])
self.output_label.setText(result[1])
else:
self.output_label.setText("无输入")
self.ouput_TextBrowser.clear()
def main():
app = QApplication(sys.argv)
myUi = MainUi()
myUi.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main() | 2.484375 | 2 |
faucet.py | thecaliconoire/aepp-faucet | 0 | 12793177 | <reponame>thecaliconoire/aepp-faucet<filename>faucet.py
#!/usr/bin/env python3
import os
import sys
import logging
import argparse
# flask
from flask import Flask, jsonify, render_template
# aeternity
from aeternity.epoch import EpochClient
from aeternity.signing import KeyPair, is_valid_hash
from aeternity.config import Config
# also log to stdout because docker
root = logging.getLogger()
root.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
app = Flask(__name__)
logging.getLogger("aeternity.epoch").setLevel(logging.WARNING)
# logging.getLogger("urllib3.connectionpool").setLevel(logging.WARNING)
# logging.getLogger("engineio").setLevel(logging.ERROR)
@app.after_request
def after_request(response):
"""enable CORS"""
header = response.headers
header['Access-Control-Allow-Origin'] = '*'
return response
@app.route('/')
def hello(name=None):
amount = int(os.environ.get('TOPUP_AMOUNT', 250))
return render_template('index.html', amount=amount)
@app.route('/account/<recipient_address>', methods=['POST'])
def rest_faucet(recipient_address):
"""top up an account"""
# recipient_address = request.form.get("account")
# validate the address
if len(recipient_address.strip()) < 3 or not is_valid_hash(recipient_address, prefix='ak'):
return jsonify({"message": "bad request"}), 400
# genesys key
bank_wallet_key = os.environ.get('BANK_WALLET_KEY')
kp = KeyPair.from_private_key_string(bank_wallet_key)
# target node
Config.set_defaults(Config(
external_url=os.environ.get('EPOCH_URL', "https://sdk-testnet.aepp<EMAIL>")
))
# amount
amount = int(os.environ.get('TOPUP_AMOUNT', 250))
ttl = int(os.environ.get('TX_TTL', 100))
client = EpochClient()
tx = client.spend(kp, recipient_address, amount, tx_ttl=ttl)
balance = client.get_balance(account_pubkey=recipient_address)
logging.info(f"top up accont {recipient_address} of {amount} tx_ttl:{ttl} tx_hash: {tx}")
return jsonify({"tx_hash": tx, "balance": balance})
# ______ ____ ____ ______ ______
# .' ___ ||_ \ / _||_ _ `. .' ____ \
# / .' \_| | \/ | | | `. \| (___ \_|
# | | | |\ /| | | | | | _.____`.
# \ `.___.'\ _| |_\/_| |_ _| |_.' /| \____) |
# `.____ .'|_____||_____||______.' \______.'
#
def cmd_start(args=None):
root.addHandler(app.logger)
logging.info("faucet service started")
app.run(host='0.0.0.0', port=5000)
if __name__ == '__main__':
cmds = [
{
'name': 'start',
'help': 'start the top up service',
'opts': []
}
]
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
subparsers.required = True
subparsers.dest = 'command'
# register all the commands
for c in cmds:
subp = subparsers.add_parser(c['name'], help=c['help'])
# add the sub arguments
for sa in c.get('opts', []):
subp.add_argument(*sa['names'],
help=sa['help'],
action=sa.get('action'),
default=sa.get('default'))
# parse the arguments
args = parser.parse_args()
# call the command with our args
ret = getattr(sys.modules[__name__], 'cmd_{0}'.format(
args.command.replace('-', '_')))(args)
| 2.15625 | 2 |
src/nirvana/coalesce/coalesce_strategies.py | jimmylchen/nirvana | 0 | 12793178 | from statistics import mean
# Weighted by api name
def coalesce_weighted_mean(api_responses):
weights = {
"api1": 0.5,
"api2": 0.3,
"api3": 0.2,
}
weighted_mean = 0
for api, value in api_responses.items():
weighted_mean += weights[api] * value
return weighted_mean/len(api_responses)
# Ignoring a particular api response
def coalesce_mean_ignore_api1(api_responses):
filtered = {k: v for k, v in api_responses.items() if k.find("api1") == -1}
return mean(filtered.values()) | 3.453125 | 3 |
src/library/__init__.py | oschusler/libqasm | 0 | 12793179 | # Author <NAME>
# The import syntax changes slightly between python 2 and 3, so we
# need to detect which version is being used:
from sys import version_info
if version_info[0] == 3:
PY3 = True
# elif version_info[0] == 2:
# PY3 = False
else:
raise EnvironmentError("sys.version_info refers to a version of "
"Python is not 3. This is not permitted. "
"sys.version_info = {}".format(version_info))
from .libQasm import libQasm
| 2.46875 | 2 |
kunai/dnsquery.py | vijayanant/kunai | 1 | 12793180 | import re
import socket
pattern = r"((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)([ (\[]?(\.|dot)[ )\]]?(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3})"
ipv4pattern = re.compile(pattern)
class DNSQuery:
def __init__(self, data):
self.data = data
self.domain = ''
t = (ord(data[2]) >> 3) & 15 # Opcode bits
if t == 0: # Standard query
ini = 12
lon = ord(data[ini])
while lon != 0:
self.domain += data[ini+1:ini+lon+1]+'.'
ini += lon+1
lon = ord(data[ini])
def _get_size_hex(self, nb):
nb = min(nb, 256*256)
d,r = divmod(nb, 256)
s = chr(d)+chr(r)
return s
# We look in the nodes for the good tag
def lookup_for_nodes(self, nodes):
if not self.domain.endswith('.kunai.'):
return []
tag = self.domain[:-len('.kunai.')]
print "DNS lookup for tag", tag
r = []
for n in nodes.values():
if tag in n['tags']:
services = n.get('services', {})
state_id = 0
if tag in services:
service = services[tag]
state_id = service.get('state_id')
print "DNS state_id", state_id
if state_id == 0:
addr = n['addr']
# If already an ip, add it
if ipv4pattern.match(addr):
r.append(addr)
else: # else try to resolv it first
try:
addr = socket.gethostbyname(addr)
r.append(addr)
except socket.gaierror: # not found
print 'DNS cannot find the hotname ip', addr
# skip this node
print "DNS R:", r
return r
def response(self, r):
packet = ''
print "DOM", self.domain
nb = len(r)
if self.domain:
packet += self.data[:2] + "\x81\x80"
packet += self.data[4:6] + self._get_size_hex(nb) + '\x00\x00\x00\x00' # Questions and Answers Counts
packet += self.data[12:] # Original Domain Name Question
for ip in r:
packet += '\xc0\x0c' # Pointer to domain name
packet += '\x00\x01\x00\x01\x00\x00\x00\x3c\x00\x04' # Response type, ttl and resource data length -> 4 bytes
packet += str.join('',map(lambda x: chr(int(x)), ip.split('.'))) # 4bytes of IP
return packet
| 2.78125 | 3 |
02-Scripting/05-Twitter_Bot/twitter.py | suzynakayama/udemy-python-dev | 1 | 12793181 | <filename>02-Scripting/05-Twitter_Bot/twitter.py
import tweepy
import os
import time
from dotenv import load_dotenv
load_dotenv()
consumer_key = os.getenv('consumer_key')
consumer_secret = os.getenv('consumer_secret')
access_token = os.getenv('access_token')
access_token_secret = os.getenv('access_token_secret')
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
user = api.me()
# print(user.name)
# public_tweets = api.home_timeline()
# for tweet in public_tweets:
# print(tweet.text)
# helper function
def limit_handler(cursor):
try:
while True:
yield cursor.next()
except tweepy.RateLimitError:
time.sleep(1000) # it will stay and wait in this line for 1000 ms
except StopIteration:
return
# Generous Bot - follows ppl back
# for follower in limit_handler(tweepy.Cursor(api.followers).items()):
# print(follower.name)
# if follower.name == "<NAME>":
# follower.follow()
# break
# Narcissist Bot
search_str = '<NAME>'
number_of_tweets = 2
for tweet in limit_handler(tweepy.Cursor(api.search, search_str).items(number_of_tweets)):
try:
tweet.favorite()
# tweet.retweet()
print('I liked that tweet')
except tweepy.TweepError as e:
print(e.reason)
except StopIteration:
break
| 2.953125 | 3 |
main.py | DeerChen/ip2host | 1 | 12793182 | '''
Description: ip反查域名
Author: Senkita
Date: 2020-10-09 10:23:52
LastEditors: Senkita
LastEditTime: 2020-10-09 15:01:39
'''
import os
from utils.Query import batch_query
if __name__ == "__main__":
os.makedirs('./Log', exist_ok=True)
filename = 'public.txt'
save_filename = 'domain_name.txt'
batch_query(filename, save_filename)
| 2.109375 | 2 |
metrics/serializers.py | cybrvybe/AU7OMA7A-BI | 0 | 12793183 | from rest_framework import serializers
from .models import Metric
class MetricSerializer(
serializers.ModelSerializer
):
class Meta:
model = Metric
fields = (
"title",
"created_at",
"metric"
) | 2.0625 | 2 |
fdm-devito-notebooks/02_wave/exer-wave/wave_spectra.py | devitocodes/devito_book | 7 | 12793184 | import numpy as np
import matplotlib.pyplot as plt
def spectrum(f, x):
# Discrete Fourier transform
A = np.fft.rfft(f(x))
A_amplitude = np.abs(A)
# Compute the corresponding frequencies
dx = x[1] - x[0]
freqs = np.linspace(0, np.pi/dx, A_amplitude.size)
plt.plot(freqs[:len(freqs)/2], A_amplitude[:len(freqs)/2])
# Mesh
L = 10; Nx = 100
x = np.linspace(0, L, Nx+1)
spectrum(lambda x: np.where(x < 5, 1, 0), x)
spectrum(lambda x: np.sin(np.pi*x/float(L)) + np.sin(np.pi*20*x/float(L)), x)
s = 0.5
spectrum(lambda x: 1./(np.sqrt(2*np.pi)*s)*np.exp(-0.5*((x-L/2.)/s)**2), x)
def f(x):
r = np.zeros_like(x)
r[len(x)/2] = 1
return r
spectrum(f, x)
figfile = 'tmp'
plt.legend(['step', '2sin', 'gauss', 'peak'])
plt.savefig(figfile + '.pdf')
plt.savefig(figfile + '.png')
plt.show()
| 2.875 | 3 |
9term/fipt/P2PLending/reviews/forms.py | nik-sergeson/bsuir-informatics-labs | 0 | 12793185 | <reponame>nik-sergeson/bsuir-informatics-labs<gh_stars>0
from django.forms import ModelForm
from P2PLending.reviews.models import Review
class ReviewForm(ModelForm):
class Meta:
model = Review
fields = ['text']
| 1.609375 | 2 |
cli/src/klio_cli/commands/job/stop.py | gaybro8777/klio | 705 | 12793186 | # Copyright 2019-2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import logging
import time
import emoji
from googleapiclient import discovery
JOB_STATE_MAP = {"cancel": "JOB_STATE_CANCELLED", "drain": "JOB_STATE_DRAINED"}
class StopJob(object):
def __init__(self, api_version=None):
self._set_dataflow_client(api_version)
def _set_dataflow_client(self, api_version):
if not api_version:
api_version = "v1b3"
self._client = discovery.build("dataflow", api_version)
def _check_job_running(self, job_name, project, region):
request = (
self._client.projects()
.locations()
.jobs()
.list(projectId=project, location=region, filter="ACTIVE",)
)
try:
response = request.execute()
except Exception as e:
logging.warning(
"Could not find running job '{}' in project '{}': {}".format(
job_name, project, e
)
)
logging.warning(
"Continuing to attempt deploying '{}'".format(job_name)
)
return
job_results = response.get("jobs", [])
if job_results:
for result in job_results:
if result["name"] == job_name:
return result
def _update_job_state(self, job, req_state=None, retries=None):
if retries is None:
retries = 0
_req_state = JOB_STATE_MAP.get(req_state, JOB_STATE_MAP["cancel"])
if job.get("requestedState") is not _req_state:
job["requestedState"] = _req_state
request = (
self._client.projects()
.locations()
.jobs()
.update(
jobId=job["id"],
projectId=job["projectId"],
location=job["location"],
body=job,
)
)
try:
request.execute()
except Exception as e:
# generic catch if 4xx error - probably shouldn't retry
if getattr(e, "resp", None):
if e.resp.status < 500:
msg = "Failed to {} job '{}': {}".format(
req_state, job["name"], e
)
logging.error(msg)
raise SystemExit(1)
if retries > 2:
msg = "Max retries reached: could not {} job '{}': {}".format(
req_state, job["name"], e
)
logging.error(msg)
raise SystemExit(1)
logging.info(
"Failed to {} job '{}'. Trying again after 30s...".format(
req_state, job["name"]
)
)
retries += 1
time.sleep(30)
self._update_job_state(job, req_state, retries)
def _watch_job_state(self, job, timeout=600):
timeout = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
request = (
self._client.projects()
.locations()
.jobs()
.get(
jobId=job["id"],
projectId=job["projectId"],
location=job["location"],
)
)
while datetime.datetime.now() < timeout:
try:
resp = request.execute()
except Exception as e:
msg = (
"Failed to get current status for job '{}'. Error: {}.\n"
"Trying again after 5s...".format(job["name"], e)
)
logging.info(msg)
time.sleep(5)
continue
if resp["currentState"] in JOB_STATE_MAP.values():
return
else:
msg = "Waiting for job '{}' to reach terminal state...".format(
job["name"]
)
logging.info(msg)
time.sleep(5)
msg = "Job '{}' did not reach terminal state after '{}' secs.".format(
job["name"], timeout
)
logging.error(msg)
raise SystemExit(1)
def stop(self, job_name, project, region, strategy, api_version=None):
self._set_dataflow_client(api_version)
current_running_job = self._check_job_running(
job_name, project, region
)
if not current_running_job:
return
self._update_job_state(current_running_job, req_state=strategy)
self._watch_job_state(current_running_job)
verb = "cancelled" if strategy == "cancel" else "drained"
msg = "Successfully {} job '{}' :smile_cat:".format(verb, job_name)
logging.info(emoji.emojize(msg, use_aliases=True))
| 1.945313 | 2 |
VecMap/vec_map.py | matao1984/vec-map | 0 | 12793187 | # -*- coding: utf-8 -*-
#VecMap0.1
#The first versio of VecMap
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
import matplotlib
matplotlib.use('Qt5Agg')
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
class Ui_VecMap(QtWidgets.QMainWindow):
def __init__(self):
super(Ui_VecMap,self).__init__()
self.setupUi(self)
self.retranslateUi(self)
def setupUi(self, VecMap):
VecMap.setObjectName("VecMap")
VecMap.resize(402, 876)
VecMap.setMinimumSize(QtCore.QSize(402, 836))
VecMap.setMaximumSize(QtCore.QSize(1024, 1024))
self.pushButton = QtWidgets.QPushButton(VecMap)
self.pushButton.setGeometry(QtCore.QRect(20, 40, 91, 41))
self.pushButton.setObjectName("pushButton")
self.checkBox = QtWidgets.QCheckBox(VecMap)
self.checkBox.setGeometry(QtCore.QRect(150, 10, 111, 20))
self.checkBox.setObjectName("checkBox")
self.line = QtWidgets.QFrame(VecMap)
self.line.setGeometry(QtCore.QRect(20, 90, 371, 21))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.label = QtWidgets.QLabel(VecMap)
self.label.setGeometry(QtCore.QRect(20, 10, 121, 16))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(VecMap)
self.label_2.setGeometry(QtCore.QRect(130, 40, 251, 51))
self.label_2.setTextFormat(QtCore.Qt.AutoText)
self.label_2.setScaledContents(False)
self.label_2.setWordWrap(True)
self.label_2.setObjectName("label_2")
self.lineEdit = QtWidgets.QLineEdit(VecMap)
self.lineEdit.setGeometry(QtCore.QRect(130, 130, 30, 20))
self.lineEdit.setObjectName("lineEdit")
self.label_3 = QtWidgets.QLabel(VecMap)
self.label_3.setGeometry(QtCore.QRect(20, 110, 191, 16))
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(VecMap)
self.label_4.setGeometry(QtCore.QRect(20, 130, 111, 16))
self.label_4.setObjectName("label_4")
self.pushButton_2 = QtWidgets.QPushButton(VecMap)
self.pushButton_2.setGeometry(QtCore.QRect(20, 170, 91, 41))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_3 = QtWidgets.QPushButton(VecMap)
self.pushButton_3.setGeometry(QtCore.QRect(20, 230, 91, 41))
self.pushButton_3.setObjectName("pushButton_3")
self.label_5 = QtWidgets.QLabel(VecMap)
self.label_5.setGeometry(QtCore.QRect(130, 160, 251, 51))
self.label_5.setTextFormat(QtCore.Qt.AutoText)
self.label_5.setScaledContents(False)
self.label_5.setWordWrap(True)
self.label_5.setObjectName("label_5")
self.label_6 = QtWidgets.QLabel(VecMap)
self.label_6.setGeometry(QtCore.QRect(130, 230, 251, 51))
self.label_6.setTextFormat(QtCore.Qt.AutoText)
self.label_6.setScaledContents(False)
self.label_6.setWordWrap(True)
self.label_6.setObjectName("label_6")
self.line_2 = QtWidgets.QFrame(VecMap)
self.line_2.setGeometry(QtCore.QRect(20, 280, 371, 21))
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.label_9 = QtWidgets.QLabel(VecMap)
self.label_9.setGeometry(QtCore.QRect(20, 300, 191, 16))
self.label_9.setObjectName("label_9")
self.checkBox_2 = QtWidgets.QCheckBox(VecMap)
self.checkBox_2.setGeometry(QtCore.QRect(20, 330, 111, 20))
self.checkBox_2.setObjectName("checkBox_2")
self.checkBox_3 = QtWidgets.QCheckBox(VecMap)
self.checkBox_3.setGeometry(QtCore.QRect(150, 330, 131, 20))
self.checkBox_3.setObjectName("checkBox_3")
self.pushButton_4 = QtWidgets.QPushButton(VecMap)
self.pushButton_4.setGeometry(QtCore.QRect(20, 370, 91, 41))
self.pushButton_4.setObjectName("pushButton_4")
self.label_10 = QtWidgets.QLabel(VecMap)
self.label_10.setGeometry(QtCore.QRect(130, 360, 251, 51))
self.label_10.setTextFormat(QtCore.Qt.AutoText)
self.label_10.setScaledContents(False)
self.label_10.setWordWrap(True)
self.label_10.setObjectName("label_10")
self.checkBox_4 = QtWidgets.QCheckBox(VecMap)
self.checkBox_4.setGeometry(QtCore.QRect(260, 10, 111, 20))
self.checkBox_4.setObjectName("checkBox_4")
self.line_3 = QtWidgets.QFrame(VecMap)
self.line_3.setGeometry(QtCore.QRect(20, 420, 371, 21))
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.label_11 = QtWidgets.QLabel(VecMap)
self.label_11.setGeometry(QtCore.QRect(20, 440, 191, 16))
self.label_11.setObjectName("label_11")
self.label_12 = QtWidgets.QLabel(VecMap)
self.label_12.setGeometry(QtCore.QRect(170, 130, 191, 16))
self.label_12.setObjectName("label_12")
self.label_14 = QtWidgets.QLabel(VecMap)
self.label_14.setGeometry(QtCore.QRect(20, 510, 381, 16))
self.label_14.setObjectName("label_14")
self.lineEdit_4 = QtWidgets.QLineEdit(VecMap)
self.lineEdit_4.setGeometry(QtCore.QRect(20, 550, 251, 22))
self.lineEdit_4.setObjectName("lineEdit_4")
self.label_15 = QtWidgets.QLabel(VecMap)
self.label_15.setGeometry(QtCore.QRect(20, 530, 181, 16))
self.label_15.setObjectName("label_15")
self.label_16 = QtWidgets.QLabel(VecMap)
self.label_16.setGeometry(QtCore.QRect(20, 580, 381, 16))
self.label_16.setObjectName("label_16")
self.label_17 = QtWidgets.QLabel(VecMap)
self.label_17.setGeometry(QtCore.QRect(20, 600, 181, 16))
self.label_17.setObjectName("label_17")
self.lineEdit_5 = QtWidgets.QLineEdit(VecMap)
self.lineEdit_5.setGeometry(QtCore.QRect(20, 620, 251, 22))
self.lineEdit_5.setObjectName("lineEdit_5")
self.pushButton_5 = QtWidgets.QPushButton(VecMap)
self.pushButton_5.setGeometry(QtCore.QRect(280, 550, 101, 91))
self.pushButton_5.setObjectName("pushButton_5")
self.pushButton_6 = QtWidgets.QPushButton(VecMap)
self.pushButton_6.setGeometry(QtCore.QRect(20, 680, 80, 41))
self.pushButton_6.setObjectName("pushButton_6")
self.label_18 = QtWidgets.QLabel(VecMap)
self.label_18.setGeometry(QtCore.QRect(200, 680, 191, 51))
self.label_18.setTextFormat(QtCore.Qt.AutoText)
self.label_18.setScaledContents(False)
self.label_18.setWordWrap(True)
self.label_18.setObjectName("label_18")
self.pushButton_7 = QtWidgets.QPushButton(VecMap)
self.pushButton_7.setGeometry(QtCore.QRect(290, 460, 91, 51))
self.pushButton_7.setObjectName("pushButton_7")
self.line_4 = QtWidgets.QFrame(VecMap)
self.line_4.setGeometry(QtCore.QRect(20, 730, 371, 21))
self.line_4.setFrameShape(QtWidgets.QFrame.HLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.pushButton_8 = QtWidgets.QPushButton(VecMap)
self.pushButton_8.setGeometry(QtCore.QRect(20, 780, 120, 28))
self.pushButton_8.setObjectName("pushButton_8")
self.label_19 = QtWidgets.QLabel(VecMap)
self.label_19.setGeometry(QtCore.QRect(60, 850, 291, 16))
self.label_19.setObjectName("label_19")
self.label_20 = QtWidgets.QLabel(VecMap)
self.label_20.setGeometry(QtCore.QRect(20, 750, 211, 16))
self.label_20.setObjectName("label_20")
self.pushButton_9 = QtWidgets.QPushButton(VecMap)
self.pushButton_9.setGeometry(QtCore.QRect(150, 780, 120, 28))
self.pushButton_9.setObjectName("pushButton_9")
self.pushButton_10 = QtWidgets.QPushButton(VecMap)
self.pushButton_10.setGeometry(QtCore.QRect(20, 810, 120, 28))
self.pushButton_10.setObjectName("pushButton_10")
self.pushButton_11 = QtWidgets.QPushButton(VecMap)
self.pushButton_11.setGeometry(QtCore.QRect(150, 810, 120, 28))
self.pushButton_11.setObjectName("pushButton_11")
self.pushButton_12 = QtWidgets.QPushButton(VecMap)
self.pushButton_12.setGeometry(QtCore.QRect(280, 780, 101, 58))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.pushButton_12.setFont(font)
self.pushButton_12.setObjectName("pushButton_12")
self.radioButton = QtWidgets.QRadioButton(VecMap)
self.radioButton.setGeometry(QtCore.QRect(20, 480, 95, 20))
self.radioButton.setChecked(True)
self.radioButton.setObjectName("radioButton")
self.radioButton_2 = QtWidgets.QRadioButton(VecMap)
self.radioButton_2.setGeometry(QtCore.QRect(90, 480, 95, 20))
self.radioButton_2.setObjectName("radioButton_2")
self.label_21 = QtWidgets.QLabel(VecMap)
self.label_21.setGeometry(QtCore.QRect(20, 460, 171, 16))
self.label_21.setObjectName("label_21")
self.pushButton_13 = QtWidgets.QPushButton(VecMap)
self.pushButton_13.setGeometry(QtCore.QRect(200, 460, 81, 51))
self.pushButton_13.setObjectName("pushButton_13")
self.label_7 = QtWidgets.QLabel(VecMap)
self.label_7.setGeometry(QtCore.QRect(20, 650, 41, 16))
self.label_7.setObjectName("label_7")
self.lineEdit_2 = QtWidgets.QLineEdit(VecMap)
self.lineEdit_2.setGeometry(QtCore.QRect(60, 650, 30, 20))
self.lineEdit_2.setObjectName("lineEdit_2")
self.pushButton_14 = QtWidgets.QPushButton(VecMap)
self.pushButton_14.setGeometry(QtCore.QRect(110, 680, 80, 41))
self.pushButton_14.setObjectName("pushButton_14")
self.lineEdit_3 = QtWidgets.QLineEdit(VecMap)
self.lineEdit_3.setGeometry(QtCore.QRect(150, 650, 30, 20))
self.lineEdit_3.setObjectName("lineEdit_3")
self.label_13 = QtWidgets.QLabel(VecMap)
self.label_13.setGeometry(QtCore.QRect(110, 650, 41, 16))
self.label_13.setObjectName("label_13")
self.checkBox_5 = QtWidgets.QCheckBox(VecMap)
self.checkBox_5.setGeometry(QtCore.QRect(210, 650, 111, 20))
self.checkBox_5.setChecked(True)
self.checkBox_5.setObjectName("checkBox_5")
self.retranslateUi(VecMap)
QtCore.QMetaObject.connectSlotsByName(VecMap)
#=======Connect all the functions=============================================
self.pushButton.clicked.connect(self.openfile)
self.pushButton_2.clicked.connect(self.ini_atom_position)
self.pushButton_3.clicked.connect(self.find_separation)
self.pushButton_4.clicked.connect(self.refine_atom_position)
self.pushButton_13.clicked.connect(self.cal_disp)
self.pushButton_5.clicked.connect(self.vec_ang_dist)
self.pushButton_6.clicked.connect(self.show_vec_map)
self.pushButton_14.clicked.connect(self.show_O_vec_map)
self.pushButton_7.clicked.connect(self.load_from_csv)
self.pushButton_8.clicked.connect(self.disclaimer)
self.pushButton_9.clicked.connect(self.show_about)
self.pushButton_10.clicked.connect(self.acknowledgments)
self.pushButton_11.clicked.connect(self.show_contact)
self.pushButton_12.clicked.connect(self.donate)
def retranslateUi(self, VecMap):
_translate = QtCore.QCoreApplication.translate
VecMap.setWindowTitle(_translate("VecMap", "VecMap0.1"))
#VecMap.setWindowIcon(QtGui.QIcon('icon.png'))
self.pushButton.setText(_translate("VecMap", "Load Image"))
self.checkBox.setText(_translate("VecMap", "ABF/BF image"))
self.label.setText(_translate("VecMap", "Step 1. Load image"))
self.label_2.setText(_translate("VecMap", "<html><head/><body><p>Load a HR-STEM image with a perovskite structure. Support [001] and [011] zone axes. Filtered image is preferred.</p><p><br/></p></body></html>"))
self.lineEdit.setText(_translate("VecMap", "8"))
self.label_3.setText(_translate("VecMap", "Step 2. Initialize atom positions"))
self.label_4.setText(_translate("VecMap", "Separation factor"))
self.pushButton_2.setText(_translate("VecMap", "Initialize"))
self.pushButton_3.setText(_translate("VecMap", "Find \n"
"separation"))
self.label_5.setText(_translate("VecMap", "<html><head/><body><p>Input an appropriate separation factor to initialize the atom positions for refining. Adding/removing atoms by left-click.</p></body></html>"))
self.label_6.setText(_translate("VecMap", "<html><head/><body><p>Try a few separation factors around the given number to determine the best separation factor.</p></body></html>"))
self.label_9.setText(_translate("VecMap", "Step 3. Refine atom positions"))
self.checkBox_2.setText(_translate("VecMap", "Refine Oxygen"))
self.checkBox_3.setText(_translate("VecMap", "Save result plots"))
self.pushButton_4.setText(_translate("VecMap", "Refine"))
self.label_10.setText(_translate("VecMap", "<html><head/><body><p>Refine atom positions. Check [001] or [011] zone. Only check Refine Oxygen if O columns are visible.</p></body></html>"))
self.checkBox_4.setText(_translate("VecMap", "[011] Zone"))
self.label_11.setText(_translate("VecMap", "Step 4. Generate a vector map"))
self.label_12.setText(_translate("VecMap", "e.g., something around 8-12"))
self.label_14.setText(_translate("VecMap", "List of angles (degrees) of vectors that will be colored differently:"))
self.lineEdit_4.setText(_translate("VecMap", "45"))
self.label_15.setText(_translate("VecMap", "e.g., 45 135 225 315"))
self.label_16.setText(_translate("VecMap", "List of colors (should match the angles):"))
self.label_17.setText(_translate("VecMap", "e.g., yellow blue red green"))
self.lineEdit_5.setText(_translate("VecMap", "yellow"))
self.pushButton_5.setText(_translate("VecMap", "Vector angle\n"
"distrubution"))
self.pushButton_6.setText(_translate("VecMap", "Show \n"
"map"))
self.label_18.setText(_translate("VecMap", "<html><head/><body><p>Generate a vector map. Set the coloring pattern by checking the vector angle distribution.</p></body></html>"))
self.pushButton_7.setText(_translate("VecMap", "Load from csv"))
self.pushButton_8.setText(_translate("VecMap", "Disclaimer"))
self.label_19.setText(_translate("VecMap", "VecMap 0.1.1 Released: 06/13/2020 by Dr. <NAME>"))
self.label_20.setText(_translate("VecMap", "Check here for more information!"))
self.pushButton_9.setText(_translate("VecMap", "About"))
self.pushButton_10.setText(_translate("VecMap", "Acknoledgments"))
self.pushButton_11.setText(_translate("VecMap", "Contact"))
self.pushButton_12.setText(_translate("VecMap", "Donate me!"))
self.radioButton.setText(_translate("VecMap", "A-site"))
self.radioButton_2.setText(_translate("VecMap", "B-site"))
self.label_21.setText(_translate("VecMap", "Select which site to calculate"))
self.pushButton_13.setText(_translate("VecMap", "Calculate"))
self.label_7.setText(_translate("VecMap", "Scale:"))
self.lineEdit_2.setText(_translate("VecMap", "10"))
self.pushButton_14.setText(_translate("VecMap", "Oxygen\n"
" map"))
self.lineEdit_3.setText(_translate("VecMap", "6"))
self.label_13.setText(_translate("VecMap", "Scale:"))
self.checkBox_5.setText(_translate("VecMap", "Scale bar"))
#===== Open file and set up global variables such as path etc. ======================
#===== Connected to self.pushButton =================================================
def openfile(self):
openfile_name = QFileDialog.getOpenFileName(self,'Select Image','','DigitalMicrograph (*.dm3 , *.dm4);;Image files (*.tif , *.tiff , *.jpg , *.jpeg , *.png ,*.bmp);;All Files (*)')
global file, my_path, file_path, title, scale, units, s, image, ABF, img_110
file = openfile_name[0]
if self.checkBox.isChecked(): #Set ABF toggle from the checkbox
ABF = 1
else:
ABF = 0
if self.checkBox_4.isChecked():
img_110 = 1
else:
img_110 = 0
if file:
print('{} has been loaded!'.format(file))
my_path = getDirectory(file) #Set the working path
file_path = getDirectory(file, '/') #Set the parent path
if not os.path.exists(my_path):
os.makedirs(my_path)
s = readImage(file)
title = s.metadata.General.title
scale = s.axes_manager[0].scale #Read scale data from the image
units = s.axes_manager[0].units #Read units
s.save(my_path + 'Original image.hspy', overwrite=True) #Save a backup file in hspy format
image = s.data
if ABF == 1:
s.data = np.divide(1, s.data) #Inverse the ABF contrast to make a ADF-like image
# Draw an image
global f_original_img
f_original_img = PlotCanvas()
f_original_img.setWindowTitle(file)
f_original_img.axes.imshow(image)
f_original_img.axes.set_axis_off()
f_original_img.axes.set_title('{} \n has been successfully loaded!'.format(title))
f_original_img.show()
#==== Initialize atom position module ===============================================
#==== Connected to self.pushButton_2 ================================================
def ini_atom_position(self):
sep = int(self.lineEdit.text())
try:
A_positions_ini = get_atom_positions(s,separation=sep)
global A_positions, f_ini
A_positions = A_positions_ini.tolist()
f_ini = PlotCanvas()
f_ini.setWindowTitle('Initial atom positions for refining')
f_ini.axes.imshow(s.data)
f_ini.axes.set_axis_off()
f_ini.axes.set_title('Left click to add or remove atoms')
f_ini.show()
def onclick(event):
if event.inaxes != f_ini.axes:
return
if event.button == 1: # Left mouse button
x = np.float(event.xdata)
y = np.float(event.ydata)
atom_nearby = closest_node((x,y), A_positions)[0]
if distance.euclidean((x,y), A_positions[atom_nearby]) > 5:
A_positions.append([x, y])
else:
A_positions.pop(atom_nearby)
replot(f_ini)
def get_xy_pos_lists(atom_lst):
return np.asarray(atom_lst)[:,0], np.asarray(atom_lst)[:,1]
def replot(f):
x_pos, y_pos = get_xy_pos_lists(A_positions)
dp.set_xdata(x_pos)
dp.set_ydata(y_pos)
f.fig.canvas.draw()
f.fig.canvas.flush_events()
xy_positions = get_xy_pos_lists(A_positions)
dp, = f_ini.axes.plot(xy_positions[0], xy_positions[1], marker='o', ms=5, color='r', ls='')
cid = f_ini.fig.canvas.mpl_connect('button_press_event', onclick)
except NameError:
#Pop up an error window
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Please load the image file first!")
msg.setWindowTitle("Hey guys")
returnValue = msg.exec()
#==== Find separation module ========================================================
#==== Connected to self.pushButton_3 ================================================
def find_separation(self):
#sep_range = (int(self.lineEdit_2.text()), int(self.lineEdit_3.text()))
#s_peaks=am.get_feature_separation(s, separation_range=sep_range) #Range might be changed for different images
#s_peaks.metadata.General.title = 'Use Arrow keys to find an appropriate separation factor'
#s_peaks.plot(colorbar=False,scalebar=False,axes_off=True)
sep = int(self.lineEdit.text())
sep_range = list(range(sep - 4, sep + 5))
# Create canvas for drawing
try:
global f_sep
f_sep = SeparationCanvas()
for i in range(9):
s_factor = sep - 4 + i
f_sep.axes[i].set_aspect('equal')
f_sep.axes[i].set_axis_off()
if s_factor < 1:
continue
ini_position = get_atom_positions(s, separation=s_factor)
f_sep.axes[i].imshow(s.data)
f_sep.axes[i].scatter(np.asarray(ini_position)[:,0], np.asarray(ini_position)[:,1], s=5, color='r')
f_sep.axes[i].set_title('Separation = {}'.format(s_factor))
f_sep.show()
except NameError:
#Pop up an error window
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Please load the image file first!")
msg.setWindowTitle("Hey guys")
returnValue = msg.exec()
#==== Refine atom position module ===================================================
#==== Connected to self.pushButton_4 ================================================
def refine_atom_position(self):
#Global variables:
global ap_A, ap_B, ap_O, Ua, Uc, find_O
#Read checkboxes
if self.checkBox_2.isChecked():
find_O = 1
else:
find_O = 0
if self.checkBox_3.isChecked():
plotpos = 1
else:
plotpos = 0
try:
#Refine atom positions
print('='*50)
print('Refining atom positions for A-site atoms...')
print('This may take time...')
sublattice_A = find_atom(s.data, A_positions, 'A-site atoms')
print('Refining A-site atoms done!')
ap_A = sublattice_A.atom_positions #Refined atoms positions for A-site. NumPy array.
#lattice_list = []
#lattice_list.append(sublattice_A)
print('='*50)
print('Finding the initial positions for B-site atoms...')
sublattice_A.construct_zone_axes()
#Find the zone axis for the initial position of B: typically 3 for [001] and 1 for [110]
if img_110 == 1:
zone_axis = sublattice_A.zones_axis_average_distances[1]
else:
zone_axis = sublattice_A.zones_axis_average_distances[2]
#Calculate lattice parameter
z0 = sublattice_A.zones_axis_average_distances[0]
z1 = sublattice_A.zones_axis_average_distances[1]
Ua = math.sqrt(z0[0]**2 + z0[1]**2) * scale
Uc = math.sqrt(z1[0]**2 + z1[1]**2) * scale
print('='*50)
print('Estimated lattice parameters (average) from the image:')
print('a = {:.3f} {}'.format(Ua, units))
print('c = {:.3f} {}'.format(Uc, units))
B_positions = sublattice_A.find_missing_atoms_from_zone_vector(zone_axis)
#Reomve A-site atoms from the image
print('='*50)
print('Subtracting sublattice A from the image using 2D gaussian fit...')
print('This may take time...')
image_without_A = remove_atoms_from_image_using_2d_gaussian(sublattice_A.image, sublattice_A, show_progressbar=False)
#Refine B-site atoms
print('='*50)
print('Refining atom positions for sublattice B...')
print('Almost there...')
sublattice_B = find_atom(image_without_A, B_positions, 'B-site atoms', atom_color='blue')
ap_B = sublattice_B.atom_positions ##Refined atoms positions for B-site. NumPy array.
print('Refining B-site atoms done!')
#lattice_list.append(sublattice_B)
#Find the position of O atoms
if find_O == 1:
#Find initial positions for O
AB_positions = ap_A.tolist() + ap_B.tolist()
sublattice_AB = Sublattice(AB_positions,image=s.data,color='y',name='Sublattice A + B')
sublattice_AB.construct_zone_axes()
zone_axis_002 = sublattice_AB.zones_axis_average_distances[2]#Only work for [001] currently
O_positions = sublattice_AB.find_missing_atoms_from_zone_vector(zone_axis_002) #Initial positions of O
print('='*50)
print('Subtracting sublattice A and B from the image using 2D gaussian fit...')
print('This may take time...')
image_without_AB=remove_atoms_from_image_using_2d_gaussian(sublattice_B.image,sublattice_B,show_progressbar=False) #Subtract both A and B from the original image
#Refine O positions
print('='*50)
print('Refining atom positions for sublattice O...')
sublattice_O = find_atom(image_without_AB, O_positions, 'O sites', atom_color='g')
ap_O = sublattice_O.atom_positions #Refined atoms positions for O. NumPy array.
print('Refining O atoms done!')
#lattice_list.append(sublattice_O)
print('Refining atoms done!')
#Construct atom position results with sublattice A and B.
#atom_lattice = am.Atom_Lattice(image=image, name='Atoms positions', sublattice_list=lattice_list)
#Save the refined positions and original image as hdf5 file. This file can be called later.
#atom_lattice.save(my_path + 'atom_position.hdf5', overwrite=True)
#=======================
#Plot and save figures
#=======================
if plotpos == 1:
print('='*50)
print('Saving result plots...')
global f_A_site, f_B_site, f_AB
#Plot A-site atom positions with the original image overlayed.
f_A_site = PlotCanvas()
f_A_site.setWindowTitle('VecMap0.1: Refined positions of A-site atoms')
f_A_site.axes.imshow(image)
f_A_site.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r')
f_A_site.axes.set_axis_off()
f_A_site.show()
f_A_site.fig.savefig(my_path + title + '_A-site atoms' + '.tif',dpi=600,bbox_inches='tight')
#Plot B-site atom positions with the original image overlayed.
f_B_site = PlotCanvas()
f_B_site.setWindowTitle('VecMap0.1: Refined positions of B-site atoms')
f_B_site.axes.imshow(image)
f_B_site.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b')
f_B_site.axes.set_axis_off()
f_B_site.show()
f_B_site.fig.savefig(my_path + title + '_B-site atoms' + '.tif',dpi=600,bbox_inches='tight')
#Plot both A-site and B-site on the image
f_AB = PlotCanvas()
f_AB.setWindowTitle('VecMap0.1: A-site atoms vs. B-site atoms')
f_AB.axes.imshow(image)
f_AB.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r')
f_AB.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b')
f_AB.axes.set_axis_off()
f_AB.show()
f_AB.fig.savefig(my_path + title + '_A_and_B-site atoms' + '.tif',dpi=600,bbox_inches='tight')
#Plot O atoms if available
if find_O == 1:
global f_O_site, f_all
f_O_site = PlotCanvas()
f_O_site.setWindowTitle('VecMap0.1: Refined positions of O atoms')
f_O_site.axes.imshow(image)
f_O_site.axes.scatter(ap_O[:,0], ap_O[:,1], s=2, color='g')
f_O_site.axes.set_axis_off()
f_O_site.show()
f_O_site.fig.savefig(my_path + title + '_O atoms' + '.tif',dpi=600,bbox_inches='tight')
#Plot all the atoms on the image
f_all = PlotCanvas()
f_all.setWindowTitle('VecMap0.1: A-site vs. B-site vs. O atoms')
f_all.axes.imshow(image)
f_all.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r')
f_all.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b')
f_all.axes.scatter(ap_O[:,0], ap_O[:,1], s=2, color='g')
f_all.axes.set_axis_off()
f_all.show()
f_all.fig.savefig(my_path + title + '_A_B_O atoms' + '.tif',dpi=600,bbox_inches='tight')
if plotpos == 1:
print('All figures have been saved to '+ my_path)
except NameError:
#Pop up an error window
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Please initialize the atom positions first!")
msg.setWindowTitle("Hey guys")
returnValue = msg.exec()
#==================== Calculate displacement module =================================
#==================== Connected to self.pushButton_13 ===============================
def cal_disp(self):
try:
#Global variables
global U_avg, disp, disp_O, disp_atom
# Read cal_site from the radio button
# 0 to calculate A site in relative to B site; 1 to calculate B site in relative to A site
if self.radioButton.isChecked():
cal_site = 0
if self.radioButton_2.isChecked():
cal_site = 1
cal_110 = img_110 #If the input image is [110], turn this on. O map is not supported for [110] yet.
O_map = find_O #If enabled, will calculate the displacement of O atoms in relation to sublattice B.
U_avg = (Ua + Uc)/2 #Unit cell parameter estimated from the image.
#=========================================================================
#The main scripts start from here
if cal_site == 0:#Calculate A site
disp_atom = 'A-site'
rel_atom = 'B-site'
ap_0 = ap_A.tolist()
ap_1 = ap_B.tolist()
else:
disp_atom = 'B-site'
rel_atom = 'A-site'
ap_0 = ap_B.tolist()
ap_1 = ap_A.tolist()
print('='*50)
print('====Calculate {} in relative to {}===='.format(disp_atom, rel_atom))
ideal_pos, neighbor_pos = find_ideal_pos(ap_0, ap_1, U_avg, scale)
disp = find_displacement(ap_0, ideal_pos, scale)
#Save the displacement data
with open(my_path + title + '-{}-disp.csv'.format(disp_atom),'w') as disp_data:
disp_data.write('x (px), y (px), x disp (px), y disp (px), disp (nm), angle (deg)\n')
for data in disp:
disp_data.write('{}, {}, {}, {}, {}, {}'.format(data[0], data[1], data[2], data[3], data[4], data[5]))
disp_data.write('\n')
#Save the neigboring atoms as well
with open(my_path + 'neighboring atoms.csv','w') as neighbor_data:
for data in neighbor_pos:
n = len(data)
for idx in range(n):
neighbor_data.write('{0}, {1}, '.format(*data[idx]))
neighbor_data.write('\n')
#Calculate O map and save
if O_map == 1:
ap_2 = ap_O.tolist()
ideal_O_pos = find_ideal_O_pos(ap_0, ap_1, U_avg, scale)
disp_O = find_displacement(ap_2, ideal_O_pos, scale)
with open(my_path + title + '-disp_O_by_{}.csv'.format(disp_atom),'w') as disp_data:
disp_data.write('x (px), y (px), x disp (px), y disp (px), disp (nm), angle (deg)\n')
for data in disp_O:
disp_data.write('{}, {}, {}, {}, {}, {}'.format(data[0], data[1], data[2], data[3], data[4], data[5]))
disp_data.write('\n')
print('Atomic displacement data saved to ' + my_path + title + '-disp.csv.')
except NameError:
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Please refine the atom positions first!")
msg.setWindowTitle("Hey guys")
returnValue = msg.exec()
#======== Display angle distribution of the vectors module ===========================
#======== Connected to self.pushButton_5 =============================================
def vec_ang_dist(self):
try:
disp_angles = [lst[5] for lst in disp]
global f_vec_ang_dist
f_vec_ang_dist = PlotCanvas()
f_vec_ang_dist.setWindowTitle('Histogram of Displacement Directions')
f_vec_ang_dist.axes.hist(disp_angles, bins=50)
f_vec_ang_dist.axes.set_xlabel('Displacement angles (Degrees)')
f_vec_ang_dist.axes.set_xticks(list(range(0,390,30)))
f_vec_ang_dist.axes.set_ylabel('Frequency')
f_vec_ang_dist.axes.set_title('Put your cursor on the peak(s) to see the\n displacement directions')
f_vec_ang_dist.show()
except NameError:
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Please calculate the displacement first!")
msg.setWindowTitle("Hey guys")
returnValue = msg.exec()
print('')
#========= Generate vector map module =============================================
#========= Connected to self.pushButton_6 ===========================================
def show_vec_map(self):
a_len = int(self.lineEdit_2.text())
if self.checkBox_5.isChecked():
s_bar = 1
else:
s_bar = 0
try:
# Read from lineEdits:
ang_lst = str(self.lineEdit_4.text()).split() #A list of displacement directions. This is used to determine the coloring pattern. For single color rendering, just leave it as [0].
ang_lst = [int(a) for a in ang_lst]
color_lst = str(self.lineEdit_5.text()).split()
#====Plot====
disp_color = set_arrow_color(disp, ang_lst, color_lst)
global f_vec_map
f_vec_map = PlotCanvas()
f_vec_map.setWindowTitle('VecMap0.1: Vector Map')
f_vec_map.axes.imshow(image)
f_vec_map.axes.set_axis_off()
for vec in disp_color:
f_vec_map.axes.arrow(vec[0],vec[1],vec[2]*a_len,vec[3]*a_len,color=vec[6], linewidth=1, head_width=a_len/3, head_length=a_len/3)
#Add a scale bar
if s_bar == 1:
scalebar = ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2)
f_vec_map.axes.add_artist(scalebar)
f_vec_map.show()
f_vec_map.fig.savefig(my_path + title + "_{}_vec_map.tif".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True)
print('The vector map has been saved to ' + my_path + title + "_{}_vec_map.tif! Enjoy!".format(disp_atom))
except NameError:
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Please calculate the displacement first!")
msg.setWindowTitle("Hey guys")
returnValue = msg.exec()
except IndexError:
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("The list of colors should match the list of angles!")
msg.setWindowTitle("Hey guys")
returnValue = msg.exec()
#========= Generate O vector map module =============================================
#========= Connected to self.pushButton_14 ===========================================
def show_O_vec_map(self):
O_len = int(self.lineEdit_3.text())
if self.checkBox_5.isChecked():
s_bar = 1
else:
s_bar = 0
try:
global f_vec_map_O
f_vec_map_O = PlotCanvas()
f_vec_map_O.setWindowTitle('VecMap0.1: Vector Map of Oxygen atoms')
f_vec_map_O.axes.imshow(image)
f_vec_map_O.axes.set_axis_off()
for vec in disp_O:
f_vec_map_O.axes.arrow(vec[0],vec[1],vec[2]*O_len,vec[3]*O_len,color='red',linewidth=1,head_width=O_len/3,head_length=O_len/3)
#Add a scale bar
if s_bar == 1:
scalebar = ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2)
f_vec_map_O.axes.add_artist(scalebar)
f_vec_map_O.show()
f_vec_map_O.fig.savefig(my_path + title + "_O_vec_map_by_{}.tif".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True)
print('The O vector map has been saved to ' + my_path + title + "_O_vec_map_by_{}.tif! Enjoy!".format(disp_atom))
except NameError:
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("No O displacement data exist!")
msg.setWindowTitle("Hey guys")
returnValue = msg.exec()
#============ Load displacement from csv module ====================================
#============ Connected to self.pushButton_7 =======================================
def load_from_csv(self):
# Load displacement data from the csv file saved previously
global s, my_path, title, scale, units, disp, disp_O, image, disp_atom
openfile_name = QFileDialog.getOpenFileName(self,'Select the displacement data','','CSV (*.csv);;All Files (*)')
file = openfile_name[0]
if file:
my_path = getDirectory(file,'/')
s = readImage(my_path + 'Original image.hspy')
title = s.metadata.General.title
scale = s.axes_manager[0].scale
units = s.axes_manager[0].units
image = s.data
disp = load_disp_data_from_csv(file)
# Look for the O data
disp_atom = file[-15:-9]
file_O_disp = my_path + title + '-disp_O_by_' + disp_atom + '.csv'
if os.path.isfile(file_O_disp):
disp_O = load_disp_data_from_csv(file_O_disp)
find_O = 1
print('Found O displacement data!')
else:
find_O = 0
print('No O displacement data was found! Will do {} atom displacement only!'.format(disp_atom))
#============ Disclaimer button ====================================================
#============ Connected to self.pushButton_8 =======================================
def disclaimer(self):
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("<b>Disclaimer</b><br>" \
"This app was designed by Dr <NAME>. Redistribution and use in source, " \
"with or without modification, are permitted. Any redistribution must remain "\
"the above copyright. When a scientific publication is reached through the "\
"app, please add the following reference: <br>"\
"1. Ma, T. et al. <a href=\"https://doi.org/10.1103/PhysRevLett.123.217602\">Phys. Rev. Lett. 123, 217602 (2019).</a>"\
"<br>"\
"2. Ma, T. et al. <a href=\"https://doi.org/10.1063/1.5115039\">Appl. Phys. Lett. 115, 122902 (2019).</a>"
"<br>" \
"THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND.<br>")
msg.setWindowTitle("VecMap0.1: Disclaimer")
def disclaimerButtonClick():
msg = QMessageBox()
msg.setText('Thanks for using VecMap')
msg.setWindowTitle('Thank you!')
returnValue = msg.exec()
msg.buttonClicked.connect(disclaimerButtonClick)
returnValue = msg.exec()
#============ About button ====================================================
#============ Connected to self.pushButton_9 =======================================
def show_about(self):
msg = QMessageBox()
# msg.setIcon(QMessageBox.Information)
msg.setText("VecMap v0.1.1"\
"<br>"\
"Designed by Dr. <NAME>"\
"<br>"\
"06/13/2020"\
"<br>"
"First version release!<br>"
"Get more information and<br> source code from my <a href=\"http://www-personal.umich.edu/~taoma/VectorMap.html\">website</a>.")
msg.setWindowTitle("VecMap0.1: About")
returnValue = msg.exec()
#============ Acknowledgments button ====================================================
#============ Connected to self.pushButton_10 =======================================
def acknowledgments(self):
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("This program was written with Python 3. The author " \
"acknowledges the HyperSpy and Atomap packages which "\
"are partially incorporated in the program. Please "\
"consider citing/adding acknowledgement for Hyperspy "\
"and Atomap packages in your publication:"\
"<br>"
"<NAME> la et al. <a href=\"http://doi.org/10.5281/zenodo.3396791\">hyperspy/hyperspy: HyperSpy v1.5.2 (2019).</a>" \
"<br>"
"<NAME>. et al. <a href=\"https://doi.org/10.1186/s40679-017-0042-5\">Adv. Struct. Chem. Imaging 3, 9 (2017).</a>")
msg.setWindowTitle("VecMap0.1: Acknowledgments")
returnValue = msg.exec()
#============ Contact button ====================================================
#============ Connected to self.pushButton_11 =======================================
def show_contact(self):
msg = QMessageBox()
msg.setText("Ask questions and report bugs to:"\
"<br>"
"<a href=\"mailto:<EMAIL>\"><EMAIL></a>")
msg.setWindowTitle("VecMap0.1: Contact")
returnValue = msg.exec()
#============ Donate me button ====================================================
#============ Connected to self.pushButton_12 =======================================
def donate(self):
msg = QMessageBox()
msg.setText("I will make this app freely available for the society.<br>"\
"If you like this app, show your appreciation by <a href=\"https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=NQTP8WZX9VDRQ¤cy_code=USD&source=url\">donating me!</a>"\
"<br>"\
"Your support is my motivation!<br>")
msg.setWindowTitle("VecMap0.1: Donate me!")
returnValue = msg.exec()
#=========== Define figure canvas ===================================================
class PlotCanvas(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('VecMap0.1: Plot')
self.create_main_frame()
def create_main_frame(self):
self.main_frame = QWidget()
# Create the mpl Figure and FigCanvas objects.
# 5x4 inches, 100 dots-per-inch
#
self.dpi = 100
self.fig = Figure((5.0, 4.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
# Since we have only one plot, we can use add_axes
# instead of add_subplot, but then the subplot
# configuration tool in the navigation toolbar wouldn't
# work.
#
self.axes = self.fig.add_subplot(111)
# Create the navigation toolbar, tied to the canvas
#
self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
vbox = QVBoxLayout()
vbox.addWidget(self.mpl_toolbar)
vbox.addWidget(self.canvas)
self.main_frame.setLayout(vbox)
self.setCentralWidget(self.main_frame)
#==================== Find separation canvas =========================================
class SeparationCanvas(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('VecMap0.1: Find separation factors')
self.create_main_frame()
def create_main_frame(self):
self.main_frame = QWidget()
# Create the mpl Figure and FigCanvas objects.
# 10x10 inches, 100 dots-per-inch
#
self.dpi = 100
self.fig = Figure((10.0, 10.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
# Add a 9x9 axes layout
#
self.axes = [self.fig.add_subplot(3,3,n) for n in range(1,10)]
self.fig.set_tight_layout(True)
# Create the navigation toolbar, tied to the canvas
#
self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
vbox = QVBoxLayout()
vbox.addWidget(self.mpl_toolbar)
vbox.addWidget(self.canvas)
self.main_frame.setLayout(vbox)
self.setCentralWidget(self.main_frame)
#==================== Modules and helper functions ===================================
from hyperspy.io import load
from atomap.atom_finding_refining import get_atom_positions
from atomap.sublattice import Sublattice
from atomap.tools import remove_atoms_from_image_using_2d_gaussian
import os
import numpy as np
import matplotlib.pyplot as plt
import math
import copy
from scipy.spatial import distance
from matplotlib_scalebar.scalebar import ScaleBar
#====Helper functions, do not change====
def readImage(file):
#Load raw image file for process.
#Require Hyperspy package
s = load(file)
return s
def getDirectory(file, s='.'):
#Make the working directory and return the path.
for idx in range(-1, -len(file), -1):
if file[idx] == s: #find the file extension and remove it. '/' for parent path
path = file[:idx] + '/'
return path
def find_atom(img, ini_pos, atom_name, atom_color='r'):
#Refine atom positions for a sublattice
#img: an array of image data; ini_pos: initial positions; atom_name: a string for name; atom_color: a string for color
#img_110: For [110] image
sublattice = Sublattice(ini_pos, image=img, color=atom_color, name=atom_name)
sublattice.find_nearest_neighbors()
sublattice.refine_atom_positions_using_center_of_mass(show_progressbar=False)
sublattice.refine_atom_positions_using_2d_gaussian(show_progressbar=False)
return sublattice #Return an atomap sublattice object
def find_neighboring_atoms(P, A, Ua, tol=1.2):
# Define a function to find the neighboring atoms of P(x,y) from a list of atoms A.
# P:a given atom (x,y); A: a list of atoms; Ua: A threashold in px, 0.707*a for [001] and 0.5*a for [110]
x, y = P
N = [a for a in A if (a[0]-x)**2 + (a[1]-y)**2 < (Ua * tol) **2] #A list to store the neighboring atoms
N = sorted(N, key=lambda x: (x[0] ** 2 + x[1] ** 2) ** 0.5)
return N
def closest_node(node, nodes):
#A function to find the closest node in an array
closest_index = distance.cdist([node], nodes).argmin()
return closest_index,nodes[closest_index]
def line(p1, p2):
#Find a line function from two points
A = (p1[1] - p2[1])
B = (p2[0] - p1[0])
C = (p1[0]*p2[1] - p2[0]*p1[1])
return A, B, -C
def intersection(L1, L2):
#A function to find the intersection point of two lines
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
if D != 0:
x = Dx / D
y = Dy / D
return x,y
else:
return False
def math_center(a, b, c, d):
#Define a function to find the mathematical center of four points, a, b, c, d
#Find the diagonal of a
M = [b,c,d]
diag_idx = distance.cdist([a],M).argmax()
L1 = line(a,M[diag_idx])
del M[diag_idx]
L2 = line(M[0],M[1])
center = intersection(L1, L2)
return center
def find_ideal_pos(A, B, Ua, scale, img_110=False):
#calculate the ideal atomic positions for A in a un-distorted perovskite structure
#A, B are lists of atom coordinates; Ua is the estimated lattice paramter in nm; scale is the image pixel size
#return a list of tuples
ideal_positions = []
Neighbor_positions = []
if not img_110: #calculate image [001]
for atom in A:
Neighbor = find_neighboring_atoms(atom,B,Ua / scale * 0.707)
if len(Neighbor) == 4:
ap_center = math_center(*Neighbor)
ideal_positions.append(ap_center)
Neighbor_positions.append(Neighbor) #Save neighbors for plotting
return ideal_positions, Neighbor_positions
for atom in A:
Neighbor = find_neighboring_atoms(atom,B,Ua / scale * 0.5)
if len(Neighbor) == 2:
ap_center = ((Neighbor[0][0]+Neighbor[1][0])/2,(Neighbor[0][1]+Neighbor[1][1])/2)
ideal_positions.append(ap_center)
Neighbor_positions.append(Neighbor)
return ideal_positions, Neighbor_positions
def find_ideal_O_pos(A, B, Ua, scale):
#calculate the ideal atomic positions for O in a un-distorted perovskite structure
#only support [001] images
ideal_O_positions = []
for atom in A:
Neighbor = find_neighboring_atoms(atom,B,Ua / scale * 0.707)
if len(Neighbor) == 4:
n_0 = Neighbor.pop(0)
n_1 = Neighbor.pop(closest_node(n_0, Neighbor)[0])
n_2 = Neighbor.pop(closest_node(n_0,Neighbor)[0])
n_3 = Neighbor.pop()
o_0 = (n_0[0] + n_1[0]) / 2, (n_0[1] + n_1[1]) / 2
ideal_O_positions.append(o_0)
o_1 = (n_0[0] + n_2[0]) / 2, (n_0[1] + n_2[1]) / 2
ideal_O_positions.append(o_1)
o_2 = (n_1[0] + n_3[0]) / 2, (n_1[1] + n_3[1]) / 2
ideal_O_positions.append(o_2)
o_3 = (n_2[0] + n_3[0]) / 2, (n_2[1] + n_3[1]) / 2
ideal_O_positions.append(o_3)
ideal_O_positions = list(dict.fromkeys(ideal_O_positions))
return ideal_O_positions
def find_displacement(A, A_com, scale):
#find atomic displacement of A
#A_com, A are lists of atom coordinates; Ua is the estimated lattice paramter in nm; scale is the image pixel size
disp = []
for atom in A_com:
arrow_end = closest_node(atom,A)[1]
vec_len = distance.euclidean(arrow_end,atom)
if vec_len > 0.14 / scale:
continue
dx = arrow_end[0]-atom[0]
dy = arrow_end[1]-atom[1]
#calculate the displacement vector angle according to dx, dy.
if dy >= 0 and dx >= 0:
vec_ang = math.degrees(math.atan(dy/dx))
elif dy >= 0 and dx < 0:
vec_ang = math.degrees(math.atan(dy/dx)) + 180
elif dx < 0 and dy < 0:
vec_ang = math.degrees(math.atan(dy/dx)) + 180
else:
vec_ang = 360 + math.degrees(math.atan(dy/dx))
disp.append([atom[0], atom[1], dx, dy, scale*1000*vec_len, vec_ang])
return disp
def set_arrow_color(vec_data, ang_lst, color_lst):
color_lst = color_lst
vec_data_color = copy.deepcopy(vec_data) #Make a copy so it does not modify the original list
if len(ang_lst) == 1:
for vec in vec_data_color:
vec.append(color_lst[0]) #set yellow for single-color rendering
return vec_data_color
ang_lst_mod = [a - ang_lst[0] for a in ang_lst]
ang_bond = []
for idx in range(len(ang_lst_mod)-1):
ang_bond.append((ang_lst_mod[idx + 1] - ang_lst_mod[idx]) // 2 + ang_lst_mod[idx])
ang_bond.append((360 - ang_lst_mod[-1]) // 2 + ang_lst_mod[-1])
for vec in vec_data_color:
ang = vec[5] - ang_lst[0]
if ang < 0:
ang = ang + 360
for i in range(len(ang_bond)-1):
if round(ang) in range(ang_bond[i], ang_bond[i+1]):
vec.append(color_lst[i+1])
for vec in vec_data_color:
if len(vec) == 6:
vec.append(color_lst[0])
return vec_data_color
def load_disp_data_from_csv(file):
with open(file,'r') as disp:
disp_data = []
lines = disp.readlines()
print('Displacement data:\n')
print(lines[0])
for lin in lines[1:]:
lin_data = lin.strip().split(', ')
disp_data.append([float(data) for data in lin_data])
return disp_data
#====Application entry==================================
def main():
print('='*50)
print('''
Welcome to the first version of VecMap
--- a convenient tool to calculate atomic displacements in perovskite structures
This app was designed by Dr. <NAME>.
Address your questions and suggestions to <EMAIL>.
Please see the "Disclaimer" before use!
Hope you get good results and publications from it!
Version 0.1.1 06/13/2020
''')
print('='*50)
import sys
app = QtWidgets.QApplication(sys.argv)
VecMap = QtWidgets.QWidget()
ui = Ui_VecMap()
ui.setupUi(VecMap)
VecMap.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| 2.234375 | 2 |
count_subset_occurrences.py | will-r-wang/opencv-object-detection | 0 | 12793188 | import numpy as np
def count_subset_occurrences(array, subset_array):
occurrences = 0
for idx in range(len(array) - len(subset_array) + 1):
if np.array_equal(array[idx:(idx + len(subset_array))], subset_array):
occurrences += 1
return occurrences
def test_base_case():
assert count_subset_occurrences(
np.array([0, 1, 1, 1, 2, 2, 2, 1, 1, 3, 3, 3]),
np.array([1, 1])
) == 3
test_base_case()
| 3.421875 | 3 |
output/models/nist_data/list_pkg/name/schema_instance/nistschema_sv_iv_list_name_length_3_xsd/__init__.py | tefra/xsdata-w3c-tests | 1 | 12793189 | <reponame>tefra/xsdata-w3c-tests
from output.models.nist_data.list_pkg.name.schema_instance.nistschema_sv_iv_list_name_length_3_xsd.nistschema_sv_iv_list_name_length_3 import NistschemaSvIvListNameLength3
__all__ = [
"NistschemaSvIvListNameLength3",
]
| 0.925781 | 1 |
cv2/select-pixels-by-RGB/main.py | whitmans-max/python-examples | 140 | 12793190 | <gh_stars>100-1000
#!/usr/bin/env python3
# date: 2019.09.24
# https://stackoverflow.com/questions/58085439/opencv-extract-pixels-with-rbg/
# replaca pixel when `R > G > B`
import cv2
import numpy as np
img = cv2.imread('/home/furas/Obrazy/images/image.png')
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img[ (img[:,:,2] > img[:,:,1]) & (img[:,:,1] > img[:,:,0]) ] = 0
cv2.imshow('image', img)
cv2.waitKey(0)
| 3.09375 | 3 |
src/gui/actions.py | iosetek/CommandRecognition | 0 | 12793191 | <gh_stars>0
from src.api import Api
from src.gui.appJar.appjar import gui
class ActionsUI:
def __init__(self, app):
self.__app = app
def append_its_content(self):
"""
Creates view designed to create action and attach it to
particular command. Action can be defined as executing
executable file, python file or using list of bash commands.
"""
self.__app.addListBox("ACTIONS_COMMAND_LISTBOX",
row=1, column=1, rowspan=8, colspan=5)
self.__app.addNamedButton("ATTACH COMMAND", "ACTIONS_ATTACH_COMMAND", print("TODO"),
row=10, column=1, rowspan=2, colspan=5)
self.__app.addNamedButton("DETACH COMMAND", "ACTIONS_DETACH_COMMAND", print("TODO"),
row=13, column=1, rowspan=2, colspan=5)
self.__app.addListBox("ACTIONS_ACTION_LISTBOX",
row=1, column=7, rowspan=5, colspan=5)
self.__app.addNamedButton("NEW COMMAND", "ACTIONS_NEW_COMMAND", print("TODO"),
row=7, column=7, rowspan=2, colspan=5)
self.__app.addNamedButton("EDIT COMMAND", "ACTIONS_EDIT_COMMAND", print("TODO"),
row=10, column=7, rowspan=2, colspan=5)
self.__app.addNamedButton("REMOVE COMMAND", "ACTIONS_REMOVE_COMMAND", print("TODO"),
row=13, column=7, rowspan=2, colspan=5)
self.__app.addLabel("ACTIONS_ACTION_NAME_LABEL", "ACTION NAME",
row=2, column=13, rowspan=2, colspan=9)
self.__app.addEntry("ACTIONS_ACTION_NAME_ENTRY",
row=4, column=13, rowspan=2, colspan=9)
self.__app.addNamedButton("SET EXE FILE", "ACTIONS_SET_EXE_FILE_BUTTON", print("TODO"),
row=7, column=13, rowspan=2, colspan=9)
self.__app.addNamedButton("SET PYTHON FILE", "ACTIONS_SET_PY_FILE_BUTTON", print("TODO"),
row=10, column=13, rowspan=2, colspan=9)
self.__app.addNamedButton("SET BASH COMMAND", "ACTIONS_SET_BASH_COMMAND_BUTTON", print("TODO"),
row=13, column=13, rowspan=2, colspan=9)
print("TODO") | 2.765625 | 3 |
PySARibbon/__init__.py | Bllose/SARibbon-pyqt5 | 3 | 12793192 | # -*- coding: utf-8 -*-
"""
@Module __init__.py
@Author ROOT
"""
from .SAFramelessHelper import SAFramelessHelper
from .SARibbonBar import SARibbonBar
from .SARibbonButtonGroupWidget import SARibbonButtonGroupWidget
from .SARibbonCategory import SARibbonCategory
from .SARibbonCategoryLayout import SARibbonCategoryLayout
from .SARibbonContextCategory import SARibbonContextCategory
from .SARibbonGallery import SARibbonGallery
from .SARibbonMainWindow import SARibbonMainWindow
from .SARibbonPannel import SARibbonPannel
from .SARibbonPannelLayout import SARibbonPannelLayout
from .SARibbonQuickAccessBar import SARibbonQuickAccessBar
from .SAWindowButtonGroup import SAWindowButtonGroup
| 0.992188 | 1 |
app/models/user.py | nickobrad/blogs | 0 | 12793193 | from .. import db
from flask_login import UserMixin, login_manager, LoginManager
from werkzeug.security import generate_password_hash, check_password_hash
from .. import login_manager
from datetime import date, datetime
@login_manager.user_loader
def load_user(id):
return User.query.get(int(id))
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key = True)
firstname = db.Column(db.String(255))
secondname = db.Column(db.String(255))
username = db.Column(db.String(255),unique = True)
email = db.Column(db.String(255), unique = True, index = True)
profile_picture = db.Column(db.String())
profile_bio = db.Column(db.String(255))
secured_password = db.Column(db.String(255))
blog_posts_by_me = db.relationship('Blog', backref = 'myblogposts', lazy = 'dynamic')
blog_comments_by_me = db.relationship('BlogComment', backref = 'myblogcomments', lazy = 'dynamic')
@property
def password(self):
raise AttributeError('You cannot view a users password')
@password.setter
def password(self, password):
self.secured_password = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.secured_password, password)
@classmethod
def save_user(self):
db.session.add(self)
db.session.commit() | 2.59375 | 3 |
sitykelib/modifyer.py | DistinctWind/SITYKE | 2 | 12793194 | <filename>sitykelib/modifyer.py
from sitykelib.sercher import perfix
import os
def get_pdf_name(original_name):
return perfix(original_name)+'.pdf'
def cut_ppt(pptfiles):
new_pptfiles = list()
arg = ' -fc- -odpi 300 -mode copy -w 1364 -h 1016 -o [Cut]%s '
for pptfile in pptfiles:
pdfname = get_pdf_name(pptfile)
os.system('k2pdfopt'+arg+'"'+pdfname+'"')
new_pptfiles.append('[Cut]'+pdfname)
return new_pptfiles
def reform_doc(docfiles):
new_docfiles = list()
arg = ' -fc- -odpi 300 -o [Reformed]%s '
for docfile in docfiles:
pdfname = get_pdf_name(docfile)
os.system('k2pdfopt'+arg+'"'+pdfname+'"')
new_docfiles.append('[Reformed]'+pdfname)
return new_docfiles
def dark_mode(pdffiles):
new_pdffiles = list()
arg = ' -fc- -odpi 300 -mode copy -neg -o [Dark]%s '
for file in pdffiles:
pdfname = get_pdf_name(file)
os.system('k2pdfopt'+arg+'"'+pdfname+'"')
new_pdffiles.append('[Dark]'+pdfname)
return new_pdffiles | 2.671875 | 3 |
blog/models.py | bibibricodeur/django-bibi | 0 | 12793195 | <gh_stars>0
from django.db import models
# https://docs.djangoproject.com/fr/3.1/intro/tutorial02/
# Cette fonction est utilisée pour formater les URL
from django.urls import reverse
from django.contrib.auth.models import User
# Create your models here.
class BlogCategory(models.Model):
# Fields
category_name = models.CharField('Category', max_length=255, unique=True, blank=True, null=True)
category_slug = models.SlugField('Slug', max_length=255, unique=True, blank=True, null=True)
# https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models
# Metadata
class Meta:
verbose_name = 'Post Category'
verbose_name_plural = 'Posts Categories'
# Methods
def get_absolute_url(self):
"""Cette fonction est requise pas Django, lorsque vous souhaitez détailler le contenu d'un objet."""
return reverse('blog-detail', args=[str(self.id)])
def __str__(self):
"""Fonction requise par Django pour manipuler les objets dans la base de données."""
return self.category_name
class BlogPost(models.Model):
# Fields
options = (
('draft', 'Private'),
('published', 'Public'),
)
blog_title = models.CharField('Title', max_length=255)
blog_slug = models.SlugField('Slug', max_length=255, unique=True, blank=True, null=True)
blog_description = models.CharField('Description', max_length=255, blank=True, null=True)
blog_picture = models.ImageField('Head picture', blank=True, null=True, upload_to='blog')
blog_content = models.TextField('Texte')
blog_file = models.FileField('File', blank=True, null=True, upload_to='blog')
blog_created = models.DateTimeField('Created', auto_now_add=True)
blog_updated = models.DateTimeField('Updated', auto_now=True)
seo_title = models.SlugField('Seo title', max_length=60, unique=True, blank=True, null=True)
seo_description = models.SlugField('Seo description', max_length=165, unique=True, blank=True, null=True)
# https://www.youtube.com/watch?v=_ph8GF84fX4
blog_author = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='Author', blank=True, null=True)
# https://www.youtube.com/watch?v=jFqYuWNyLnI
blog_category = models.ForeignKey(BlogCategory, on_delete=models.CASCADE, verbose_name='Category', blank=True, null=True)
blog_favorite = models.BooleanField('Favorite', default='False')
blog_status = models.CharField('Status', max_length=12, choices=options, default='draft')
# https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models
# Metadata
class Meta:
verbose_name = 'Post'
verbose_name_plural = 'Posts'
ordering = ('blog_created', )
# Methods
def get_absolute_url(self):
"""Cette fonction est requise pas Django, lorsque vous souhaitez détailler le contenu d'un objet."""
return reverse('blog-detail', args=[str(self.id)])
def __str__(self):
"""Fonction requise par Django pour manipuler les objets dans la base de données."""
return self.blog_title
### Fin
| 2.640625 | 3 |
packages/pyright-internal/src/tests/samples/paramSpec19.py | kihoonim/pyright | 1 | 12793196 | <reponame>kihoonim/pyright
# This sample tests the case where a ParamSpec is used within a generic
# type alias with a Callable.
from typing import Any, Callable, Generic, Protocol
from typing_extensions import Concatenate, ParamSpec
P = ParamSpec("P")
# Example 1: Callable generic type alias
CommandHandler1 = Callable[Concatenate[int, P], dict[str, Any]]
class Command1(Generic[P]):
def __init__(self, handler: CommandHandler1[P]) -> None:
...
class Application1:
def func1(self, handler: CommandHandler1[P]) -> Command1[P]:
return Command1(handler)
def func2(
self,
handler: CommandHandler1[P],
) -> Callable[[CommandHandler1[P]], Command1[P]]:
def decorator(handler: CommandHandler1[P]) -> Command1[P]:
return self.func1(handler)
return decorator
# Example 2: Callback Protocol
class CommandHandler2(Protocol[P]):
def __call__(self, *args: P.args, **kwargs: P.kwargs) -> dict[str, Any]:
...
class Command2(Generic[P]):
def __init__(self, handler: CommandHandler2[P]) -> None:
...
class Application2:
def func1(self, handler: CommandHandler2[P]) -> Command2[P]:
return Command2(handler)
def func2(
self,
handler: CommandHandler2[P],
) -> Callable[[CommandHandler2[P]], Command2[P]]:
def decorator(handler: CommandHandler2[P]) -> Command2[P]:
return self.func1(handler)
return decorator
def handler(arg1: int, arg2: str) -> dict[str, Any]:
...
v1: CommandHandler2 = handler
| 2.59375 | 3 |
run.py | Originofamonia/mcan-vqa | 0 | 12793197 | # --------------------------------------------------------
# mcan-vqa (Deep Modular Co-Attention Networks)
# modify this to our VQA dataset
# --------------------------------------------------------
import os
from copy import copy
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from matplotlib import colors
from cfgs.base_cfgs import Cfgs
from core.exec import Execution
import argparse, yaml
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='MCAN Args')
parser.add_argument('--run', dest='run_mode',
choices=['train', 'val', 'test', 'visualize'],
type=str, default='train')
parser.add_argument('--model', dest='model',
choices=['small', 'large'],
default='small', type=str)
parser.add_argument('--split', dest='train_split',
choices=['train', 'train+val', 'train+val+vg'],
help="set training split, "
"eg.'train', 'train+val+vg'"
"set 'train' can trigger the "
"eval after every epoch",
type=str)
parser.add_argument('--eval_every_epoch', default=False,
help='set True to evaluate the '
'val split when an epoch finished'
"(only work when train with "
"'train' split)",
type=bool)
parser.add_argument('--test_save_pred',
help='set True to save the '
'prediction vectors'
'(only work in testing)',
type=bool)
parser.add_argument('--batch_size', default=1, # was 256
help='batch size during training',
type=int)
parser.add_argument('--max_epoch',
help='max training epoch',
type=int)
parser.add_argument('--preload',
help='pre-load the features into memory'
'to increase the I/O speed',
type=bool)
parser.add_argument('--gpu', default='0,1',
help="gpu select, eg.'0, 1, 2'",
type=str)
parser.add_argument('--seed', default=444,
help='fix random seed',
type=int)
parser.add_argument('--version',
help='version control',
type=str)
parser.add_argument('--resume',
help='resume training',
type=bool)
parser.add_argument('--ckpt_version',
help='checkpoint version',
type=str)
parser.add_argument('--ckpt_epoch',
help='checkpoint epoch',
type=int)
parser.add_argument('--ckpt_path',
help='load checkpoint path, we '
'recommend that you use '
'ckpt_version and ckpt_epoch '
'instead',
type=str)
parser.add_argument('--grad_accu_steps',
help='reduce gpu memory usage',
type=int)
parser.add_argument('--num_workers',
help='multithreaded loading',
type=int)
parser.add_argument('--pin_mem',
help='use pin memory',
type=bool)
parser.add_argument('--verbose',
help='verbose print',
type=bool)
parser.add_argument('--dataset_path',
help='vqav2 dataset root path',
type=str)
parser.add_argument('--feature_path',
help='bottom up features root path',
type=str)
args = parser.parse_args()
return args
def main():
opt = Cfgs()
args = parse_args()
args_dict = opt.parse_to_dict(args)
cfg_file = "cfgs/{}_model.yml".format(args.model)
with open(cfg_file, 'r') as f:
yaml_dict = yaml.load(f, Loader=yaml.FullLoader)
args_dict = {**yaml_dict, **args_dict}
opt.add_args(args_dict)
opt.proc()
print('Hyper Parameters:')
print(opt)
opt.check_path()
execution = Execution(opt)
execution.run(opt.run_mode)
def text_layout():
# compute some interesting data
x0, x1 = -5, 5
y0, y1 = -3, 3
x = np.linspace(x0, x1, 500)
y = np.linspace(y0, y1, 500)
X, Y = np.meshgrid(x, y)
Z1 = np.exp(-X**2 - Y**2)
Z2 = np.exp(-(X - 1)**2 - (Y - 1)**2)
Z = (Z1 - Z2) * 2
# Set up a colormap:
# use copy so that we do not mutate the global colormap instance
palette = copy(plt.cm.gray)
palette.set_over('r', 1.0)
palette.set_under('g', 1.0)
palette.set_bad('b', 1.0)
# Alternatively, we could use
# palette.set_bad(alpha = 0.0)
# to make the bad region transparent. This is the default.
# If you comment out all the palette.set* lines, you will see
# all the defaults; under and over will be colored with the
# first and last colors in the palette, respectively.
Zm = np.ma.masked_where(Z > 1.2, Z)
# By setting vmin and vmax in the norm, we establish the
# range to which the regular palette color scale is applied.
# Anything above that range is colored based on palette.set_over, etc.
# set up the Axes objects
fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(6, 5.4))
# plot using 'continuous' color map
im = ax1.imshow(Zm, interpolation='bilinear',
cmap=palette,
norm=colors.Normalize(vmin=-1.0, vmax=1.0),
aspect='auto',
origin='lower',
extent=[x0, x1, y0, y1])
ax1.set_title('Green=low, Red=high, Blue=masked')
cbar = fig.colorbar(im, extend='both', shrink=0.9, ax=ax1)
cbar.set_label('uniform')
for ticklabel in ax1.xaxis.get_ticklabels():
ticklabel.set_visible(False)
# Plot using a small number of colors, with unevenly spaced boundaries.
im = ax2.imshow(Zm, interpolation='nearest',
cmap=palette,
norm=colors.BoundaryNorm([-1, -0.5, -0.2, 0, 0.2, 0.5, 1],
ncolors=palette.N),
aspect='auto',
origin='lower',
extent=[x0, x1, y0, y1])
ax2.set_title('With BoundaryNorm')
cbar = fig.colorbar(im, extend='both', spacing='proportional',
shrink=0.9, ax=ax2)
cbar.set_label('proportional')
fig.suptitle('imshow, with out-of-range and masked data')
f1 = os.path.join(os.getcwd(), f'results/val_imgs/dark_mask.jpg')
plt.savefig(f1)
plt.close()
if __name__ == '__main__':
main()
# text_layout()
| 2.609375 | 3 |
app/core/serializers.py | jblanquicett92/activo-django-docker-postgres | 0 | 12793198 | <gh_stars>0
from django.contrib.auth import get_user_model
from django.contrib.auth import authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from .models import Departamento, EstatusUsuario, Historial_Turno, Idioma, Puesto, Scope, Tipo_Rol, Rol
from .models import Departamento_Turno, Turno, Puesto, Usuario
class DepartamentoSerializer(serializers.ModelSerializer):
class Meta:
model = Departamento
fields = '__all__'
class TurnoSerializer(serializers.ModelSerializer):
class Meta:
model = Turno
fields = '__all__'
class Tipo_RolSerializer(serializers.ModelSerializer):
class Meta:
model = Tipo_Rol
fields = '__all__'
class ScopeSerializer(serializers.ModelSerializer):
class Meta:
model = Scope
fields = '__all__'
class EstatusSerializer(serializers.ModelSerializer):
class Meta:
model = EstatusUsuario
fields = '__all__'
class IdiomaSerializer(serializers.ModelSerializer):
class Meta:
model = Idioma
fields = '__all__'
class RolSerializer(serializers.ModelSerializer):
class Meta:
model = Rol
fields = '__all__'
depth = 1
class Departamento_TurnoSerializer(serializers.ModelSerializer):
class Meta:
model = Departamento_Turno
fields = '__all__'
depth = 1
class PuestoSerializer(serializers.ModelSerializer):
class Meta:
model = Puesto
fields = '__all__'
depth = 2
class UsuarioSerializer(serializers.ModelSerializer):
class Meta:
model = Usuario
fields = '__all__'
depth = 2
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ('email', 'password', 'username')
extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}
def create(self, validated_data):
return get_user_model().objects.create_user(**validated_data)
class AuthTokenSerializer(serializers.Serializer):
email = serializers.CharField()
password = serializers.CharField(
style={'input_type': 'password'},
trim_whitespace=False
)
def validate(self, attrs):
"""Validate and authenticate the user"""
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg = _('Unable to authenticate with provided credentials')
raise serializers.ValidationError(msg, code='authorization')
attrs['user'] = user
return attrs
class Historial_TurnoSerializer(serializers.ModelSerializer):
class Meta:
model = Historial_Turno
fields = '__all__'
depth = 4
class Usuario_Lat_Lng_Serializer(serializers.Serializer):
lat = serializers.CharField(max_length=40, allow_blank=False)
lng = serializers.CharField(max_length=40, allow_blank=False)
class Meta:
fields = ('Lat', 'lng')
| 2.15625 | 2 |
examples/plot_transport_laws.py | Cgadal/PyDune | 0 | 12793199 | r"""
===========
Transport laws
===========
Create a plot comparing the different transport laws.
"""
import matplotlib.pyplot as plt
import numpy as np
from PyDune.physics.sedtransport import transport_laws as TL
theta = np.linspace(0, 0.4, 1000)
theta_d = 0.035
omega = 8
plt.figure()
plt.plot(theta, TL.quadratic_transport_law(theta, theta_d, omega), label='quadratic transport law')
plt.plot(theta, TL.cubic_transport_law(theta, theta_d, omega), label='cubic transport law')
plt.plot(theta, TL.quartic_transport_law(theta, theta_d), label='cubic transport law')
plt.xlabel(r'Shield number, $\theta$')
plt.ylabel('Non dimensional saturated flux')
plt.legend()
plt.tight_layout()
plt.show()
| 3.109375 | 3 |
#7 Kelas Dan Object/Decorator/decorator.py | HudaFiqri/belajarPython | 5 | 12793200 | '''
decorator
jadi agar memudahkan dalam pembungkusan fungsi maka python menyediakan yang namanya decorator.
sumber referensi: https://www.youtube.com/watch?v=r7Dtus7N4pI
direvisi pada: 26-02-2021
'''
'''
membuat fungsi yang akan dipanggil.
jadi di decorator pada python harus fungsi rekursif untuk mengembalikan value jika tidak ada rekursif maka python akan menunjukan error
TypeError: 'NoneType' object is not callable | ini dikarenakan tidak adanya rekursif pada fungsi.
'''
def f1(func):
# jika ada variabel yang di keluarkan maka akan menghasilkan output
function_data = 'function data'
print(function_data)
return func
# nah ini lah yang unik dari python decorator dilambangkan dengan @ dan ini bertujuan untuk memanggil fungsi
@f1
def func_data():
print('hello')
# lalu coba panggil fungsi yang baru dan coba output seperti apakah yang akan keluar
func_data()
| 3.734375 | 4 |
widgets/tests_list.py | alexoff13/QTpost-machine | 1 | 12793201 | <reponame>alexoff13/QTpost-machine
from typing import Union
from time import time
from PyQt5 import QtCore
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QListWidget, QAbstractItemView, QListWidgetItem
from widgets.tape import Tape
class Data:
test: QListWidgetItem
state: dict
saved_state: dict
class TestsList(QListWidget):
DEFAULT_NAME = 'test'
def __init__(self, tape: Tape, parent: any = None):
super().__init__(parent)
self.__tape = tape
self.__tests = dict()
self.__set_list()
self.__last_time_dragged = float()
@property
def last_time_dragged(self) -> float:
return self.__last_time_dragged
def __set_list(self) -> None:
# устанавливает возможность Drag'n'Drop элементов
self.setDragDropMode(QAbstractItemView.DragDrop)
# устанавливает возможность сразу нескольких элементов
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.setAcceptDrops(True)
self.setDropIndicatorShown(True)
def get_test_name(self, name: str = DEFAULT_NAME, ignore_name: str = None) -> str:
if not name:
name = self.DEFAULT_NAME
k = 1
new_name = name
while new_name in self.__tests and new_name != ignore_name:
new_name = f'{name}-{k}'
k += 1
return new_name
def add_test(self, test_name: str = None, state: dict = None, test: QListWidgetItem = None,
reset: bool = True, saved_state: dict = None) -> QListWidgetItem:
if test_name is None:
test_name = self.get_test_name()
if test is None:
test = QListWidgetItem()
test.setText(test_name)
test.last_name = test_name
test.setFlags(test.flags() | Qt.ItemIsEditable)
self.addItem(test)
if reset:
self.__tape.reset()
self.__tests[test_name] = Data()
self.__tests[test_name].test = test
self.__tests[test_name].state = state if state is not None else Tape.get_empty_data()
self.__tests[test_name].saved_state = saved_state
return test
def remove_test(self, test: QListWidgetItem, internal_remove: bool = True) -> None:
self.takeItem(self.row(test))
if internal_remove:
self.__tests.pop(test.text())
def get_last(self) -> Union[QListWidgetItem, None]:
return self.item(self.count() - 1) if self.count() > 0 else None
def get_state(self, test: QListWidgetItem) -> dict:
return self.__tests[test.text()].state
def get_saved_state(self, test: QListWidgetItem) -> dict:
return self.__tests[test.text()].saved_state
def rename(self, test_name: str, new_test_name: str) -> None:
self.__tests[new_test_name] = self.__tests.pop(test_name)
self.__tests[new_test_name].test.last_name = new_test_name
def clear(self) -> None:
for test_name in self.__tests:
self.takeItem(self.row(self.__tests[test_name].test))
self.__tests.clear()
def save_state(self, test: QListWidgetItem, global_: bool = False) -> None:
if test.text() in self.__tests:
if global_:
self.__tests[test.text()].saved_state = self.__tape.get_data()
else:
self.__tests[test.text()].state = self.__tape.get_data()
def get_data(self) -> dict:
data = dict()
for test_name in self.__tests:
data[test_name] = self.__tests[test_name].state
return data
# TODO: возможно лучше предложить выбор: загрузка с заменой или с добавлением в конец
def set_from_file(self, file: dict) -> None:
self.clear()
for test_name in file:
self.add_test(test_name, file[test_name], saved_state=file[test_name])
def has_unsaved_data(self) -> bool:
return len(self.__tests) > 0
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
super().dragEnterEvent(event)
def dragMoveEvent(self, event):
if event.mimeData().hasUrls():
event.setDropAction(Qt.CopyAction)
event.accept()
else:
super().dragMoveEvent(event)
def dropEvent(self, event):
if event.mimeData().hasUrls():
event.setDropAction(Qt.CopyAction)
event.accept()
links = list()
for url in event.mimeData().urls():
links.append(str(url.toLocalFile()))
else:
event.setDropAction(Qt.MoveAction)
super().dropEvent(event)
def startDrag(self, supported_actions: Union[QtCore.Qt.DropActions, QtCore.Qt.DropAction]) -> None:
self.__last_time_dragged = time()
super().startDrag(supported_actions)
| 2.625 | 3 |
CSV Chunker & Sorter/utils/folderUtil.py | momoji123/Tools | 0 | 12793202 | <gh_stars>0
import os, shutil
from datetime import datetime;
SOURCE_FOLDER = "./temp";
TARGET_FOLDER = "./RESULT";
BACKUP_FOLDER = "./STAGING";
def printAction(action, fileNames):
print("\n" + action + " " + str(len(fileNames)) + " files");
def countFiles(pathToFolder):
fileNames = os.listdir(pathToFolder);
return len(fileNames);
def getFileNameOnly(filePath):
splitted = filePath.split("/");
return splitted[len(splitted)-1];
def deleteAllFiles(pathToFolder):
fileNames = os.listdir(pathToFolder);
printAction("Deleting", fileNames);
for filename in fileNames:
file_path = os.path.join(pathToFolder, filename);
deleteFile(file_path);
def deleteFile(filePath):
try:
if os.path.isfile(filePath) or os.path.islink(filePath):
os.unlink(filePath);
elif os.path.isdir(filePath):
shutil.rmtree(filePath)
except Exception as e:
raise Exception('Failed to delete %s. Reason: %s' % (filePath, e));
def moveAllFiles(srcFolder, dstFolder):
fileNames = os.listdir(srcFolder);
printAction("Moving", fileNames);
for filename in fileNames:
src = os.path.join(srcFolder, filename);
dst = os.path.join(dstFolder, filename);
moveFile(src, dst);
def moveFile(srcFilePath, dstFilePath):
try:
shutil.move(srcFilePath, dstFilePath);
except Exception as e:
raise Exception('Failed to move from %s to %s. Reason %s' % (srcFilePath, dstFilePath, e));
def copyAllFiles(srcFolder, dstFolder):
fileNames = os.listdir(srcFolder);
printAction("Copying", fileNames);
for filename in fileNames:
src = os.path.join(srcFolder, filename);
dst = os.path.join(dstFolder, filename);
copyFile(src, dst);
def copyFile(srcFilePath, dstFilePath):
try:
shutil.copyfile(srcFilePath, dstFilePath);
except Exception as e:
raise Exception('Failed to copy from %s to %s. Reason %s' % (srcFilePath, dstFilePath, e));
def clearTempFolder(gui):
gui.consoleInsert("Cleaning Temp Folder");
try:
deleteAllFiles(SOURCE_FOLDER);
gui.consoleInsert("Temp Folder Cleaned");
except Exception as e:
gui.consoleInsert(str(e), "red");
raise e;
def clearResultFolder(gui):
gui.consoleInsert("Cleaning Result Folder");
try:
deleteAllFiles(TARGET_FOLDER);
gui.consoleInsert("Result Folder Cleaned");
except Exception as e:
gui.consoleInsert(str(e), "red");
raise e;
def clearStagingFolder(gui):
gui.consoleInsert("Cleaning Staging Folder");
try:
deleteAllFiles(BACKUP_FOLDER);
gui.consoleInsert("Staging Folder Cleaned");
except Exception as e:
gui.consoleInsert(str(e), "red");
raise e;
def backupFiles(gui, filenames=[]):
gui.consoleInsert("Backing up files...");
fname = [];
if(len(filenames)<1):
fname=gui.filenames;
else:
fname=filenames;
for f in fname:
fileNameOnly = getFileNameOnly(f);
gui.consoleInsert("Backing up " + f + " to " + SOURCE_FOLDER);
try:
copyFile(f, SOURCE_FOLDER+"/"+fileNameOnly);
except Exception as e:
gui.consoleInsert(str(e), "red");
raise e;
gui.consoleInsert("Finish backup files");
def stagingResult(gui=None):
if(gui!=None):
gui.consoleInsert("Staging result", "blue");
stagingFolder = os.path.join(BACKUP_FOLDER, str(datetime.today().strftime('%d-%m-%Y_%H%M%S')), "stage result");
if not os.path.exists(os.path.dirname(stagingFolder)):
os.makedirs(stagingFolder);
copyAllFiles(TARGET_FOLDER, stagingFolder); | 2.75 | 3 |
lib/file_system.py | dhung09/KiteSublime | 81 | 12793203 | import sys
if sys.platform == 'darwin':
from ..lib.platform.darwin.file_system import *
elif sys.platform == 'win32':
from ..lib.platform.win32.file_system import *
elif sys.platform in ('linux', 'linux2'):
from ..lib.platform.linux.file_system import *
else:
from ..lib.platform.unsupported.file_system import *
def path_for_url(path):
return _path_for_url(path)
| 2.484375 | 2 |
automatization_of_data_mining_project/data_set_loader/exceptions/loader_exceptions.py | Sale1996/automatization_of_data_mining_project | 0 | 12793204 | <reponame>Sale1996/automatization_of_data_mining_project<gh_stars>0
class Error(Exception):
"""Base class for other exceptions"""
pass
class WrongPathNameFormatError(Error):
"""Raised when the path name has wrong format"""
pass
class FileIsNotFoundError(Error):
"""Raised when the input file from the path name was not found by loader"""
pass
class MissingImportantColumnsError(Error):
"""Raised when the loaded data set has missing one or more of the important columns"""
pass
| 2.203125 | 2 |
addons/website_event_meet/models/event_event.py | SHIVJITH/Odoo_Machine_Test | 0 | 12793205 | <filename>addons/website_event_meet/models/event_event.py
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class Event(models.Model):
_inherit = "event.event"
meeting_room_ids = fields.One2many("event.meeting.room", "event_id", string="Meeting rooms")
meeting_room_count = fields.Integer("Room count", compute="_compute_meeting_room_count")
meeting_room_allow_creation = fields.Boolean(
"Allow Room Creation", compute="_compute_meeting_room_allow_creation",
readonly=False, store=True,
help="Let Visitors Create Rooms")
@api.depends("event_type_id", "website_menu", "community_menu")
def _compute_community_menu(self):
""" At type onchange: synchronize. At website_menu update: synchronize. """
for event in self:
if event.event_type_id and event.event_type_id != event._origin.event_type_id:
event.community_menu = event.event_type_id.community_menu
elif event.website_menu and (event.website_menu != event._origin.website_menu or not event.community_menu):
event.community_menu = True
elif not event.website_menu:
event.community_menu = False
@api.depends("meeting_room_ids")
def _compute_meeting_room_count(self):
meeting_room_count = self.env["event.meeting.room"].sudo().read_group(
domain=[("event_id", "in", self.ids)],
fields=["id:count"],
groupby=["event_id"],
)
meeting_room_count = {
result["event_id"][0]: result["event_id_count"]
for result in meeting_room_count
}
for event in self:
event.meeting_room_count = meeting_room_count.get(event.id, 0)
@api.depends("event_type_id", "community_menu", "meeting_room_allow_creation")
def _compute_meeting_room_allow_creation(self):
for event in self:
if event.event_type_id and event.event_type_id != event._origin.event_type_id:
event.meeting_room_allow_creation = event.event_type_id.meeting_room_allow_creation
elif event.community_menu and event.community_menu != event._origin.community_menu:
event.meeting_room_allow_creation = True
elif not event.community_menu or not event.meeting_room_allow_creation:
event.meeting_room_allow_creation = False
| 2.03125 | 2 |
build_tiles.py | anthonyrandell-madetech/tiles-builder | 0 | 12793206 | <reponame>anthonyrandell-madetech/tiles-builder<filename>build_tiles.py
import argparse
import sqlite3
import subprocess
import multiprocessing as mp
from pathlib import Path
from itertools import repeat
def run(command):
proc = subprocess.run(command, capture_output=True, text=True)
try:
proc.check_returncode() # raise exception on nonz-ero return code
except subprocess.CalledProcessError as e:
print(f"\n---- STDERR ----\n{proc.stderr}")
print(f"\n---- STDOUT ----\n{proc.stdout}")
raise e
return proc
def get_geography_datasets(entity_model_path):
conn = sqlite3.connect(entity_model_path)
cur = conn.cursor()
cur.execute(
"""
SELECT
DISTINCT dataset
FROM
entity
WHERE geojson != ""
"""
)
geography_datasets = [x[0] for x in cur]
conn.close()
return geography_datasets
def get_dataset_features(entity_model_path, dataset=None):
conn = sqlite3.connect(entity_model_path)
json_properties = [
"'tippecanoe'",
"json_object('layer', entity.dataset)",
"'entity'",
"entity.entity",
"'properties'",
"json_patch(" "json_object(" "'name'",
"entity.name",
"'type'",
"entity.dataset",
"'organisation'",
"oe.name",
"'entity'",
"entity.entity",
"'entry-date'",
"entity.entry_date",
"'start-date'",
"entity.start_date",
"'end-date'",
"entity.end_date" ")",
"IFNULL(entity.json, '{}')" ")",
]
query = """
SELECT
json_patch(entity.geojson,
json_object({properties}))
FROM
entity
LEFT JOIN entity AS oe
ON entity.organisation_entity = oe.entity
WHERE entity.geojson != ''
""".format(
properties=",".join(json_properties)
)
cur = conn.cursor()
if dataset:
query += "AND entity.dataset == ?"
cur.execute(query, (dataset,))
else:
cur.execute(query)
results = ",".join(x[0] for x in cur)
results = results.rstrip(",")
return results
def create_geojson_file(features, output_path, dataset):
geojson = '{"type":"FeatureCollection","features":[' + features + "]}"
with open(f"{output_path}/{dataset}.geojson", "w") as f:
f.write(geojson)
def build_dataset_tiles(output_path, dataset):
build_tiles_cmd = [
"tippecanoe",
"-z15",
"-Z4",
"-r1",
"--no-feature-limit",
"--no-tile-size-limit",
f"--layer={dataset}",
f"--output={output_path}/{dataset}.mbtiles",
f"{output_path}/{dataset}.geojson",
]
run(build_tiles_cmd)
def build_tiles(entity_path, output_path, dataset):
print(dataset)
features = get_dataset_features(entity_path, dataset)
if dataset is None:
dataset = "dataset_tiles"
create_geojson_file(features, output_path, dataset)
build_dataset_tiles(output_path, dataset)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Script to build mbtiles databases")
parser.add_argument(
"--entity-path",
type=Path,
nargs=1,
required=False,
default=Path("var/cache/entity.sqlite3"),
help="Path to the entity database",
)
parser.add_argument(
"--output-dir",
type=Path,
nargs=1,
required=False,
default=Path("var/cache/"),
help="The numbers available to use (six must be provided)",
)
cmd_args = parser.parse_args()
entity_path = cmd_args.entity_path[0]
output_path = cmd_args.output_dir[0]
datasets = get_geography_datasets(entity_path)
datasets.append(None)
with mp.Pool(mp.cpu_count()) as pool:
pool.starmap(
build_tiles, zip(repeat(entity_path), repeat(output_path), datasets)
)
| 2.40625 | 2 |
office365/sharepoint/portal/SPSiteManager.py | stardust85/Office365-REST-Python-Client | 0 | 12793207 | from office365.runtime.client_object import ClientObject
from office365.runtime.serviceOperationQuery import ServiceOperationQuery
from office365.runtime.http.http_method import HttpMethod
from office365.runtime.resource_path import ResourcePath
from office365.sharepoint.portal.SPSiteCreationResponse import SPSiteCreationResponse
class SPSiteManager(ClientObject):
def __init__(self, context):
super(SPSiteManager, self).__init__(context, ResourcePath("SPSiteManager"), None)
def create(self, request):
"""Create a modern site"""
response = SPSiteCreationResponse()
qry = ServiceOperationQuery(self, "Create", None, request, "request", response)
self.context.add_query(qry)
return response
def delete(self, site_id):
"""Deletes a SharePoint site"""
payload = {
"siteId": site_id
}
qry = ServiceOperationQuery(self, "Delete", None, payload)
self.context.add_query(qry)
def get_status(self, url):
"""Get the status of a SharePoint site"""
response = SPSiteCreationResponse()
qry = ServiceOperationQuery(self, "Status", None, {'url': url}, None, response)
self.context.add_query(qry)
self.context.get_pending_request().beforeExecute += self._construct_status_request
return response
def _construct_status_request(self, request):
query = self.context.get_pending_request().current_query
request.method = HttpMethod.Get
request.url += "?url='{0}'".format(query.parameter_type['url'])
self.context.get_pending_request().beforeExecute -= self._construct_status_request
| 2.1875 | 2 |
backend/app/datastores.py | ikumen/notas | 0 | 12793208 | <reponame>ikumen/notas
from bson import json_util, objectid
from pymongo import MongoClient, database
from typing import List
from backend.app import settings
class MongoCollection:
"""Generic class wrapper around Mongo DB operations around a collection.
"""
def __init__(self, collection_name: str, db: database.Database = None):
self.init(collection_name, db)
def init(self, collection_name: str, db: database.Database = None):
"""Initialize this store, with the given mongo Database or create one
if necessary, and create a reference to the underlying collection this
store will represent.
"""
if db is None:
mongo_client = MongoClient(settings.MONGO_URI)
db = mongo_client.get_database(settings.MONGO_DB_NAME)
self.collection = db.get_collection(collection_name)
def create(self, **kwargs) -> dict:
"""Add the given attributes to the collection as a new document.
"""
id = self.collection.insert_one(kwargs).inserted_id
kwargs['_id'] = id
return kwargs
def update(self, id, **kwargs) -> bool:
"""Update the document in the collection with the given attributes
and identified by the given id. Return True if document was updated.
"""
rv = self.collection.update_one({'_id': objectid.ObjectId(id)}, {'$set': kwargs})
return rv.matched_count == 1
def get(self, id: str) -> dict:
"""Return the document in the collection identified by the given id.
"""
for rv in self.collection.find({"_id": objectid.ObjectId(id)}).limit(1):
return rv
return None
def all(self) -> List[dict]:
"""Return all documents in collection.
"""
rv = self.collection.find()
return list(rv)
def delete(self, id: str) -> bool:
"""Delete document in collection identified by the given id. Returns
True if the document was deleted.
"""
rv = self.collection.delete_one({'_id': objectid.ObjectId(id)})
return rv.deleted_count == 1
| 2.734375 | 3 |
vision/src/VisionTest/BroadcasterTEST.py | victoriapc/HockusPockus | 0 | 12793209 | <reponame>victoriapc/HockusPockus<gh_stars>0
from VisionInterfaces.Broadcaster import Broadcaster
import cv2
class BroadcasterTEST(Broadcaster) :
def __init__(self):
self.m_currentFrame = 0
def broadcastCoordinatesOfPuck(self,i_xPos,i_Ypos):
"""
This implementation does nothing, as the USB implementation does not need to brodcast this information
Args:
i_xPos: The X position of the puck
i_Ypos: The Y position of the puck
"""
pass
def broadcastVideoOfPuck(self,i_frame):
"""
"Broadcasts" the video feed of the puck (i.e, displays it on the screen)
Args:
i_frame: The altered frame to publish
"""
cv2.imwrite("VisionTest\outputFrames\Outframe%d.jpg" % self.m_currentFrame, i_frame)
self.m_currentFrame += 1 | 2.734375 | 3 |
test.py | matchagreen/fkom-inventory-management-system | 0 | 12793210 | <gh_stars>0
import numpy as np
import pandas as pd
staff_planning = [
[[0, 0, 8], [1, 0, 8], [2, 0, 8], [3, 0, 8], [4, 0, 8], [5, 0, 8], [6, 0, 8], [7, 0, 8], [8, 0, 8], [9, 0, 8], [10, 0, 8], [11, 0, 8], [12, 0, 8], [13, 0, 8], [14, 0, 8], [
15, 0, 8], [16, 0, 8], [17, 0, 8], [18, 0, 8], [19, 0, 8], [20, 0, 8], [21, 0, 8], [22, 0, 8], [23, 0, 8], [24, 0, 8], [25, 0, 8], [26, 0, 8], [27, 0, 8], [28, 0, 8], [29, 0, 8]],
[[0, 0, 8], [1, 0, 8], [2, 0, 8], [3, 0, 8], [4, 0, 8], [5, 0, 8], [6, 0, 8], [7, 0, 8], [8, 0, 8], [9, 0, 8], [10, 0, 8], [11, 0, 8], [12, 0, 8], [13, 0, 8], [14, 0, 8], [
15, 0, 8], [16, 0, 8], [17, 0, 8], [18, 0, 8], [19, 0, 8], [20, 0, 8], [21, 0, 8], [22, 0, 8], [23, 0, 8], [24, 0, 8], [25, 0, 8], [26, 0, 8], [27, 0, 8], [28, 0, 8], [29, 0, 8]],
[[0, 0, 8], [1, 0, 8], [2, 0, 8], [3, 0, 8], [4, 0, 8], [5, 0, 8], [6, 0, 8], [7, 0, 8], [8, 0, 8], [9, 0, 8], [10, 0, 8], [11, 0, 8], [12, 0, 8], [13, 0, 8], [14, 0, 8], [
15, 0, 8], [16, 0, 8], [17, 0, 8], [18, 0, 8], [19, 0, 8], [20, 0, 8], [21, 0, 8], [22, 0, 8], [23, 0, 8], [24, 0, 8], [25, 0, 8], [26, 0, 8], [27, 0, 8], [28, 0, 8], [29, 0, 8]],
[[0, 0, 8], [1, 0, 8], [2, 0, 8], [3, 0, 8], [4, 0, 8], [5, 0, 8], [6, 0, 8], [7, 0, 8], [8, 0, 8], [9, 0, 8], [10, 0, 8], [11, 0, 8], [12, 0, 8], [13, 0, 8], [14, 0, 8], [
15, 0, 8], [16, 0, 8], [17, 0, 8], [18, 0, 8], [19, 0, 8], [20, 0, 8], [21, 0, 8], [22, 0, 8], [23, 0, 8], [24, 0, 8], [25, 0, 8], [26, 0, 8], [27, 0, 8], [28, 0, 8], [29, 0, 8]],
[[0, 0, 8], [1, 0, 8], [2, 0, 8], [3, 0, 8], [4, 0, 8], [5, 0, 8], [6, 0, 8], [7, 0, 8], [8, 0, 8], [9, 0, 8], [10, 0, 8], [11, 0, 8], [12, 0, 8], [13, 0, 8], [14, 0, 8], [
15, 0, 8], [16, 0, 8], [17, 0, 8], [18, 0, 8], [19, 0, 8], [20, 0, 8], [21, 0, 8], [22, 0, 8], [23, 0, 8], [24, 0, 8], [25, 0, 8], [26, 0, 8], [27, 0, 8], [28, 0, 8], [29, 0, 8]]
]
hourlystaff_needed = np.array(
[
[3, 3, 3, 3, 3, 3, 10, 7, 12, 12, 9, 9, 10,
12, 12, 9, 9, 12, 15, 10, 10, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 10, 7, 12, 12, 9, 9, 10,
12, 12, 9, 9, 12, 15, 10, 10, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 10, 7, 12, 12, 9, 9, 10,
12, 12, 9, 9, 12, 15, 10, 10, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 10, 7, 12, 12, 9, 9, 10,
12, 12, 9, 9, 12, 15, 10, 10, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 10, 7, 12, 12, 9, 9, 10,
12, 12, 9, 9, 12, 15, 10, 10, 3, 3, 3]
])
hourlystaff_needed = np.array(
[[3, 3, 3, 3, 3, 3, 10, 7, 12, 12, 9, 9, 10, 12, 12, 9, 9, 12, 15, 10, 10, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 10, 7, 12, 12, 9, 9, 10,
12, 12, 9, 9, 12, 15, 10, 10, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 10, 7, 12, 12, 9, 9, 10,
12, 12, 9, 9, 12, 15, 10, 10, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 10, 7, 12, 12, 9, 9, 10,
12, 12, 9, 9, 12, 15, 10, 10, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 10, 7, 12, 12, 9, 9, 10, 12, 12, 9, 9, 12, 15, 10, 10, 3, 3, 3]])
"""
hourlystaff_needed = np.array([
[0, 0, 0, 0, 0, 0, 6, 12, 12, 12, 6, 6, 18, 18, 6, 6, 6, 6, 18, 18, 18, 6, 6, 6],
[0, 0, 0, 0, 0, 0, 6, 12, 12, 12, 6, 6, 18, 18, 6, 6, 6, 6, 18, 18, 18, 6, 6, 6],
[0, 0, 0, 0, 0, 0, 6, 12, 12, 12, 6, 6, 18, 18, 6, 6, 6, 6, 18, 18, 18, 6, 6, 6],
[0, 0, 0, 0, 0, 0, 6, 12, 12, 12, 6, 6, 18, 18, 6, 6, 6, 6, 18, 18, 18, 6, 6, 6],
[0, 0, 0, 0, 0, 0, 6, 12, 12, 12, 6, 6, 18, 18, 6, 6, 6, 6, 18, 18, 18, 6, 6, 6]
])
"""
"""
staff_planning = [
[ [0, 0, 10], [1, 0, 10], [2, 0, 10], [3, 0, 10], [4, 0, 10], [5, 0, 10], [6, 0, 10], [7, 0, 10], [8, 0, 10], [9, 0, 10], [10, 0, 10] ],
[ [0, 0, 10], [1, 0, 10], [2, 0, 10], [3, 0, 10], [4, 0, 10], [5, 0, 10], [6, 0, 10], [7, 0, 10], [8, 0, 10], [9, 0, 10], [10, 0, 10] ],
[ [0, 0, 10], [1, 0, 10], [2, 0, 10], [3, 0, 10], [4, 0, 10], [5, 0, 10], [6, 0, 10], [7, 0, 10], [8, 0, 10], [9, 0, 10], [10, 0, 10] ],
[ [0, 0, 10], [1, 0, 10], [2, 0, 10], [3, 0, 10], [4, 0, 10], [5, 0, 10], [6, 0, 10], [7, 0, 10], [8, 0, 10], [9, 0, 10], [10, 0, 10] ],
[ [0, 0, 10], [1, 0, 10], [2, 0, 10], [3, 0, 10], [4, 0, 10], [5, 0, 10], [6, 0, 10], [7, 0, 10], [8, 0, 10], [9, 0, 10], [10, 0, 10] ],
]
hourlystaff_needed = np.array([
[0, 0, 0, 0, 0, 0, 4, 4, 4, 2, 2, 2, 6, 6, 2, 2, 2, 6, 6, 6, 2, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 4, 4, 4, 2, 2, 2, 6, 6, 2, 2, 2, 6, 6, 6, 2, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 4, 4, 4, 2, 2, 2, 6, 6, 2, 2, 2, 6, 6, 6, 2, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 4, 4, 4, 2, 2, 2, 6, 6, 2, 2, 2, 6, 6, 6, 2, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 4, 4, 4, 2, 2, 2, 6, 6, 2, 2, 2, 6, 6, 6, 2, 2, 2, 2]
])
"""
"""
staff_planning = [
[ [0, 0, 8], [1, 0, 8], [2, 0, 8], [3, 0, 8], [4, 0, 8], [5, 0, 8], [6, 0, 8], [7, 0, 8], [8, 0, 8], [9, 0, 8], [10, 0, 8], [11, 0, 8], [12, 0, 8], [13, 0, 8], [14, 0, 8], [15, 0, 8], [16, 0, 8], [17, 0, 8], [18, 0, 8], [19, 0, 8], [20, 0, 8], [21, 0, 8], [22, 0, 8], [23, 0, 8], [24, 0, 8], [25, 0, 8], [26, 0, 8], [27, 0, 8], [28, 0, 8], [29, 0, 8] ],
[ [0, 0, 8], [1, 0, 8], [2, 0, 8], [3, 0, 8], [4, 0, 8], [5, 0, 8], [6, 0, 8], [7, 0, 8], [8, 0, 8], [9, 0, 8], [10, 0, 8], [11, 0, 8], [12, 0, 8], [13, 0, 8], [14, 0, 8], [15, 0, 8], [16, 0, 8], [17, 0, 8], [18, 0, 8], [19, 0, 8], [20, 0, 8], [21, 0, 8], [22, 0, 8], [23, 0, 8], [24, 0, 8], [25, 0, 8], [26, 0, 8], [27, 0, 8], [28, 0, 8], [29, 0, 8] ],
[ [0, 0, 8], [1, 0, 8], [2, 0, 8], [3, 0, 8], [4, 0, 8], [5, 0, 8], [6, 0, 8], [7, 0, 8], [8, 0, 8], [9, 0, 8], [10, 0, 8], [11, 0, 8], [12, 0, 8], [13, 0, 8], [14, 0, 8], [15, 0, 8], [16, 0, 8], [17, 0, 8], [18, 0, 8], [19, 0, 8], [20, 0, 8], [21, 0, 8], [22, 0, 8], [23, 0, 8], [24, 0, 8], [25, 0, 8], [26, 0, 8], [27, 0, 8], [28, 0, 8], [29, 0, 8] ],
[ [0, 0, 8], [1, 0, 8], [2, 0, 8], [3, 0, 8], [4, 0, 8], [5, 0, 8], [6, 0, 8], [7, 0, 8], [8, 0, 8], [9, 0, 8], [10, 0, 8], [11, 0, 8], [12, 0, 8], [13, 0, 8], [14, 0, 8], [15, 0, 8], [16, 0, 8], [17, 0, 8], [18, 0, 8], [19, 0, 8], [20, 0, 8], [21, 0, 8], [22, 0, 8], [23, 0, 8], [24, 0, 8], [25, 0, 8], [26, 0, 8], [27, 0, 8], [28, 0, 8], [29, 0, 8] ],
[ [0, 0, 8], [1, 0, 8], [2, 0, 8], [3, 0, 8], [4, 0, 8], [5, 0, 8], [6, 0, 8], [7, 0, 8], [8, 0, 8], [9, 0, 8], [10, 0, 8], [11, 0, 8], [12, 0, 8], [13, 0, 8], [14, 0, 8], [15, 0, 8], [16, 0, 8], [17, 0, 8], [18, 0, 8], [19, 0, 8], [20, 0, 8], [21, 0, 8], [22, 0, 8], [23, 0, 8], [24, 0, 8], [25, 0, 8], [26, 0, 8], [27, 0, 8], [28, 0, 8], [29, 0, 8] ]
]
hourlystaff_needed = np.array([
[3, 3, 3, 3, 3, 3, 10, 7, 12, 12, 9, 9, 10, 12, 12, 9, 9, 12, 15, 10, 10, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 10, 7, 12, 12, 9, 9, 10, 12, 12, 9, 9, 12, 15, 10, 10, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 10, 7, 12, 12, 9, 9, 10, 12, 12, 9, 9, 12, 15, 10, 10, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 10, 7, 12, 12, 9, 9, 10, 12, 12, 9, 9, 12, 15, 10, 10, 3, 3, 3],
[3, 3, 3, 3, 3, 3, 10, 7, 12, 12, 9, 9, 10, 12, 12, 9, 9, 12, 15, 10, 10, 3, 3, 3],
])
"""
"""
Employee present: analyse whether the employee is present yes or no on a given time
based on the time list of 3 (id, start time, duration) //list these 3 things
"""
def employee_present(employee, time):
employee_start_time = employee[1]
employee_duration = employee[2]
employee_end_time = employee_start_time + employee_duration
if (time >= employee_start_time) and (time < employee_end_time):
return True
return False
"""
"""
def staffplanning_to_hourlyplanning(staff_planning):
hourlystaff_week = []
for day in staff_planning:
hourlystaff_day = []
for employee in day:
employee_present_hour = []
for time in range(0, 24):
employee_present_hour.append(employee_present(employee, time))
hourlystaff_day.append(employee_present_hour)
hourlystaff_week.append(hourlystaff_day)
hourlystaff_week = np.array(hourlystaff_week).sum(axis=1)
return hourlystaff_week
"""
cost is calculated as hours understaffed + hours overstaffed
"""
def cost(hourlystaff, hourlystaff_needed):
errors = hourlystaff - hourlystaff_needed
overstaff = abs(errors[errors > 0].sum())
understaff = abs(errors[errors < 0].sum())
overstaff_cost = 1
understaff_cost = 1
cost = overstaff_cost * overstaff + understaff_cost * understaff
return cost
"""
"""
def generate_random_staff_planning(n_days, n_staff):
period_planning = []
for day in range(n_days):
day_planning = []
for employee_id in range(n_staff):
start_time = np.random.randint(0, 23)
duration = np.random.randint(0, 8) # changehere
employee = [employee_id, start_time, duration]
day_planning.append(employee)
period_planning.append(day_planning)
return period_planning
"""
"""
random_staff_planning = generate_random_staff_planning(n_days=5, n_staff=30)
random_staff_planning
cost(staffplanning_to_hourlyplanning(random_staff_planning), hourlystaff_needed)
"""
create a parent generation of n parent plannings
"""
def create_parent_generation(n_parents, n_days=5, n_staff=30):
parents = []
for i in range(n_parents):
parent = generate_random_staff_planning(n_days=n_days, n_staff=n_staff)
parents.append(parent)
return parents
"""
for each iteration, select randomly two parents and make a random combination of those two parents
by applying a randomly generated yes/no mask to the two selected parents
"""
def random_combine(parents, n_offspring):
n_parents = len(parents)
n_periods = len(parents[0])
n_employees = len(parents[0][0])
offspring = []
for i in range(n_offspring):
random_dad = parents[np.random.randint(low=0, high=n_parents - 1)]
random_mom = parents[np.random.randint(low=0, high=n_parents - 1)]
dad_mask = np.random.randint(0, 2, size=np.array(random_dad).shape)
mom_mask = np.logical_not(dad_mask)
child = np.add(np.multiply(random_dad, dad_mask),
np.multiply(random_mom, mom_mask))
offspring.append(child)
return offspring
"""
mutation
"""
def mutate_parent(parent, n_mutations):
size1 = parent.shape[0]
size2 = parent.shape[1]
for i in range(n_mutations):
rand1 = np.random.randint(0, size1)
rand2 = np.random.randint(0, size2)
rand3 = np.random.randint(1, 2)
parent[rand1, rand2, rand3] = np.random.randint(0, 8) # change here
return parent
def mutate_gen(parent_gen, n_mutations):
mutated_parent_gen = []
for parent in parent_gen:
mutated_parent_gen.append(mutate_parent(parent, n_mutations))
return mutated_parent_gen
"""
selection - feasibility
"""
def is_acceptable(parent):
# work > 10 hours is not ok
return np.logical_not((np.array(parent)[:, :, 2:] > 8).any())
def select_acceptable(parent_gen):
parent_gen = [parent for parent in parent_gen if is_acceptable(parent)]
return parent_gen
"""
selection - cost (inverse fitness)
"""
def select_best(parent_gen, hourlystaff_needed, n_best):
costs = []
for idx, parent_staff_planning in enumerate(parent_gen):
parent_hourly_planning = staffplanning_to_hourlyplanning(
parent_staff_planning)
parent_cost = cost(parent_hourly_planning, hourlystaff_needed)
costs.append([idx, parent_cost])
print('generation best is: {}, generation worst is: {}'.format(
pd.DataFrame(costs)[1].min(), pd.DataFrame(costs)[1].max()))
costs_tmp = pd.DataFrame(costs).sort_values(
by=1, ascending=True).reset_index(drop=True)
selected_parents_idx = list(costs_tmp.iloc[:n_best, 0])
selected_parents = [parent for idx, parent in enumerate(
parent_gen) if idx in selected_parents_idx]
return selected_parents
"""
overall func
"""
def gen_algo(hourlystaff_needed, n_iterations):
generation_size = 1000
parent_gen = create_parent_generation(
n_parents=generation_size, n_days=5, n_staff=30)
for it in range(n_iterations):
parent_gen = select_acceptable(parent_gen)
parent_gen = select_best(parent_gen, hourlystaff_needed, n_best=100)
parent_gen = random_combine(parent_gen, n_offspring=generation_size)
parent_gen = mutate_gen(parent_gen, n_mutations=1)
best_child = select_best(parent_gen, hourlystaff_needed, n_best=1)
return best_child
best_planning = gen_algo(hourlystaff_needed, n_iterations=100)
print(best_planning)
print(staffplanning_to_hourlyplanning(best_planning[0]))
print(hourlystaff_needed)
| 1.75 | 2 |
wtss_plugin/help/source/conf.py | AbnerErnaniADSFatec/wtss-qgis | 1 | 12793211 | """
WTSS QGIS Plugin.
Python Client Library for Web Time Series Service.
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/.
-------------------
begin : 2019-05-04
git sha : $Format:%H$
copyright : (C) 2020 by INPE
email : <EMAIL>
This program is free software.
You can redistribute it and/or modify it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
WTSS QGIS Plugin documentation build configuration file, created by
sphinx-quickstart on Sun Feb 12 17:11:03 2012.
"""
import sphinx_rtd_theme
from wtss_plugin.version import __version__
# -- Project information -----------------------------------------------------
project = 'WTSS-QGIS'
copyright = '2020, INPE.'
author = 'Brazil Data Cube Team'
# The full version, including alpha/beta/rc tags.
release = __version__
# -- General configuration ---------------------------------------------------
# Enabled Sphinx extensions.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx_copybutton',
'sphinx_rtd_theme',
]
# Paths that contain templates, relative to this directory.
templates_path = ['_templates']
# The language for content autogenerated by Sphinx.
language = 'en_US'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
'_build',
'Thumbs.db',
'.DS_Store',
]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages.
html_theme = 'sphinx_rtd_theme'
html_theme_options = {
'analytics_id': 'XXXXXXXXXX',
'logo_only': False,
'display_version': True,
'prev_next_buttons_location': 'both',
'style_external_links': True,
'style_nav_header_background': '#2980B9',
'collapse_navigation': True,
'sticky_navigation': False,
'navigation_depth': 3,
'includehidden': True,
'titles_only': False
}
html_title = 'WTSS-QGIS'
html_baseurl = 'https://brazil-data-cube.github.io/'
html_context = {
'display_github': False,
'github_user': 'brazil-data-cube',
'github_repo': 'wtss-qgis',
'last_updated': False,
}
html_show_sourcelink = False
html_logo = './assets/img/logo-bdc.png'
html_favicon = './assets/img/favicon.ico'
html_static_path = [
'_static',
]
html_css_files = [ ]
html_last_updated_fmt = '%b %d, %Y'
html_show_sphinx = False
html_search_language = 'en'
numfig = True
numfig_format = {
'figure': 'Figure %s -',
'table': 'Table %s -',
'code-block': 'Code snippet %s -',
'section': 'Section %s.'
}
copybutton_prompt_text = r'>>> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.\.\.: | {5,8}: '
copybutton_prompt_is_regexp = True
master_doc = 'index' | 1.515625 | 2 |
model/signalgenerator.py | shuaiwng/python | 1 | 12793212 | import random
import numpy as np
import time
class Signalgenerator():
def __init__(self):
self.Fs = 8000
self.f = 2
self.sample = 8000
self.x = np.arange(1, self.sample+1)
self.y = np.empty(self.sample)
self.level = 0
self.filename = ''
def set_filename(self, name):
self.filename = name
def configure_device(self, level):
self.level = level
def measure_signal(self):
for i in range(0, self.sample):
delta = random.randint(1, self.level * 10) / 10 - self.level
self.y[i] = self.level * 10 * np.cos(2* np.pi * self.f * i / self.Fs) + delta
def get_signal(self):
return self.y
def save_signal(self):
with open (self.filename, 'w') as f:
f.write('Time=' + str(time.asctime(time.localtime(time.time()))) + '\n')
f.write('Intensity=' + str(random.randint(1, self.level * 10)) + '\n')
f.write('Spectrum:\n')
for i in range(0, self.sample):
f.write(str(self.x[i]) + '\t' + str(self.y[i]) + '\n') | 2.9375 | 3 |
server/tests/mod_auth/test_auth.py | Synergize-Southwest-Detroit/api | 2 | 12793213 | from server.mod_auth.auth import load_user # , register, login
from server.tests.helpers import FlaskTestCase, fixtures
class TestAuth(FlaskTestCase):
@fixtures('single_user.json')
def test_load_existing_user(self):
"""Test loading a single valid user"""
with self.flaskapp.test_request_context():
user = load_user(1)
assert user is not None
assert user.username == 'ganemone'
@fixtures('base.json')
def test_load_nonexisting_user(self):
"""Test loading a user not in the database"""
with self.flaskapp.test_request_context():
user = load_user(50)
assert user is None
| 2.375 | 2 |
normVirReads.py | gstarrett/oncovirus_tools | 2 | 12793214 | <gh_stars>1-10
#!/usr/bin/env python3
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("g", type=str, help="The virus bedgraph file")
parser.add_argument("s", type=str, help="the bamindexstat file")
args = parser.parse_args()
# this can be simplified by just importing the bam file and then running bedtools and bamindexstat then processing those outputs
nums = list(range(1, 23))
chroms = ["chr" + str(num) for num in nums]
chroms.extend(["chrX", "chrY", "chrM", "X", "Y", "M", "MT"])
chroms.extend(str(num) for num in nums)
huReads = 0
path = args.s.split("/")
ext = path[-1].split(".")
lengths = {}
# stat file for human reads
with open(args.s) as statFH:
for line in statFH:
f = line.split()
if str(f[0]) in chroms:
huReads += int(f[4])
elif str(f[0]) != 'NoCoordinateCount=':
lengths.update({str(f[0]): int(f[2])})
# mcpyv.cov file for coverage of virus
virDict = {}
with open(args.g) as covFH:
for line in covFH:
f = line.split()
if str(f[0]) in virDict:
virDict[str(f[0])][0] += int(f[3])
virDict[str(f[0])][1] += int(f[2]) - int(f[1])
else:
virDict.update({str(f[0]): [int(f[3]), int(f[2]) - int(f[1])]})
for key in virDict:
cov = virDict[key][0]
bases = virDict[key][1]
if (cov > 0):
avgCov = cov/bases
else:
avgCov = 0
normCp = (avgCov) / (huReads/1000)
# viral genome coverage per 1000 human reads
print("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}".format(key, ext[0], normCp, huReads, bases, avgCov, cov, lengths[key]))
| 2.6875 | 3 |
progress/home/migrations/0001_initial.py | Andrew0701/web-coursework | 4 | 12793215 | <reponame>Andrew0701/web-coursework
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('group', models.CharField(max_length=5)),
('course', models.IntegerField()),
],
),
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('number', models.IntegerField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='Log',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('mark', models.IntegerField()),
('date', models.DateField()),
('job', models.ForeignKey(to='home.Job')),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('patronymic', models.CharField(max_length=100)),
('surname', models.CharField(max_length=100)),
('group', models.ForeignKey(null=True, to='home.Group')),
('jobs', models.ManyToManyField(to='home.Job', through='home.Log')),
],
),
migrations.CreateModel(
name='Student_Subject',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('student', models.ForeignKey(to='home.Student')),
],
),
migrations.CreateModel(
name='Subject',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Teacher',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('patronymic', models.CharField(max_length=100)),
('surname', models.CharField(max_length=100)),
('subjects', models.ManyToManyField(to='home.Subject')),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True)),
],
),
migrations.AddField(
model_name='student_subject',
name='subject',
field=models.ForeignKey(to='home.Subject'),
),
migrations.AddField(
model_name='student_subject',
name='teacher',
field=models.ForeignKey(to='home.Teacher'),
),
migrations.AddField(
model_name='student',
name='subjects',
field=models.ManyToManyField(to='home.Subject', through='home.Student_Subject'),
),
migrations.AddField(
model_name='student',
name='user',
field=models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AddField(
model_name='log',
name='student',
field=models.ForeignKey(to='home.Student'),
),
migrations.AddField(
model_name='job',
name='subject',
field=models.ForeignKey(to='home.Subject'),
),
]
| 2.015625 | 2 |
vireoSNP/utils/vireo_model_v02.py | andr-kun/vireo | 0 | 12793216 | # Core functions for Vireo model
# Author: <NAME>
# Date: 30/08/2019
# http://edwardlib.org/tutorials/probabilistic-pca
# https://github.com/allentran/pca-magic
import sys
import itertools
import numpy as np
from scipy.stats import entropy
from scipy.special import digamma
from .vireo_base import normalize, loglik_amplify, beta_entropy
def vireo_core(AD, DP, n_donor=None, GT_prior=None, learn_GT=True,
theta_prior=None, learn_theta=True, ASE_mode=False,
Psi=None, ID_prob_init=None, doublet_prior=None, check_doublet=True,
min_iter=20, max_iter=100, min_GP=0.00001, epsilon_conv=1e-2,
random_seed=None, verbose=False):
"""
Vireo core function to cluster the cells into donors.
"""
if random_seed is not None:
np.random.seed(random_seed)
if n_donor is None:
if len(GT_prior.shape) < 3 or GT_prior.shape[1] < 2:
print("Error: no n_donor and GT_prior has < 2 donors.")
sys.exit(1)
else:
n_donor = GT_prior.shape[1]
n_var = AD.shape[0] # n_variants
## initialize thete
if theta_prior is None:
#theta_prior = np.array([[0.3, 29.7], [3, 3], [29.7, 0.3]])
theta_prior = np.array([[0.1, 99.9], [50, 50], [99.9, 0.1]])
theta_shapes = theta_prior.copy()
if ASE_mode and len(theta_prior.shape) == 2:
theta_prior = np.repeat(np.expand_dims(theta_prior, 2), n_var, axis=2)
theta_shapes = np.repeat(np.expand_dims(theta_shapes, 2), n_var, axis=2)
n_gt = theta_shapes.shape[0] # number of genotype categories
## initialize Psi
if Psi is None:
Psi = np.ones(n_donor) / n_donor
else:
Psi = Psi[:n_donor] / np.sum(Psi[:n_donor])
if ID_prob_init is None:
ID_prob = normalize(np.random.rand(AD.shape[1], n_donor))
else:
ID_prob = normalize(ID_prob_init.copy())
## initialize GT
if GT_prior is None:
GT_prior = normalize(np.ones((n_var, n_donor, n_gt)))
GT_prob, logLik_GT = get_GT_prob(AD, DP, ID_prob,
theta_shapes, GT_prior)
if learn_GT is False:
print("As GT_prior is not given, we change learn_GT to True.")
learn_GT = True
else:
GT_prob = GT_prior.copy()
GT_prior[GT_prior < min_GP] = min_GP
GT_prior[GT_prior > 1 - min_GP] = 1 - min_GP
GT_prior = normalize(GT_prior)
#TODO: check if there is a better way to deal with GT imcompleteness
if GT_prior.shape[1] < n_donor:
_add_n = n_donor - GT_prior.shape[1]
GT_prior = np.append(GT_prior,
normalize(np.ones((n_var, n_gt, _add_n)), axis=1))
GT_prob = GT_prior.copy()
if learn_GT is False:
print("As GT_prior is not complete, we change learn_GT to True.")
learn_GT = True
elif GT_prior.shape[1] > n_donor:
print("Warning: n_donor is smaller than samples in GT_prior, hence we "
"ignore n_donor.")
n_donor = GT_prior.shape[1]
# check if n_gt is matched to GT_prior
if GT_prior.shape[2] != n_gt:
print("Error: number of GT categories not matched: theta and GT_prior")
sys.exit(1)
## VB interations
LB = np.zeros(max_iter)
for it in range(max_iter):
ID_prob, GT_prob, theta_shapes, LB[it] = update_VB(AD, DP, GT_prob,
theta_shapes, theta_prior, GT_prior, Psi, doublet_prior,
learn_GT=learn_GT, learn_theta=learn_theta,
check_doublet=check_doublet)
if it > min_iter:
if LB[it] < LB[it - 1]:
if verbose:
print("Warning: Lower bound decreases!\n")
elif it == max_iter - 1:
if verbose:
print("Warning: VB did not converge!\n")
elif LB[it] - LB[it - 1] < epsilon_conv:
break
## one-off check doublet
if check_doublet:
ID_prob2, GT_prob, theta_shapes, LB_doublet = update_VB(AD, DP, GT_prob,
theta_shapes, theta_prior, GT_prior, Psi, doublet_prior,
learn_GT=True, learn_theta=learn_theta, check_doublet=True)
ID_prob = ID_prob2[:, :n_donor]
doublet_prob = ID_prob2[:, n_donor:]
else:
LB_doublet = LB[it]
n_donor_doublt = int(n_donor * (n_donor - 1) / 2)
doublet_prob = np.zeros((ID_prob.shape[0], n_donor_doublt))
RV = {}
RV['ID_prob'] = ID_prob
RV['GT_prob'] = GT_prob
RV['doublet_prob'] = doublet_prob
RV['theta_shapes'] = theta_shapes
RV['LB_list'] = LB[: it+1]
RV['LB_doublet'] = LB_doublet
return RV
def update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior,
Psi, doublet_prior=None, learn_GT=True, learn_theta=True,
check_doublet=False):
"""
Update the parameters of each component of the variantional
distribution.
The doublet probability can be created by doublet genotypes
"""
if check_doublet:
GT_both = add_doublet_GT(GT_prob)
theta_both = add_doublet_theta(theta_shapes)
n_doublet_pair = GT_both.shape[1] - GT_prob.shape[1]
if doublet_prior is None:
doublet_prior = min(0.5, AD.shape[1] / 100000)
Psi_both = np.append(Psi * (1 - doublet_prior),
(np.ones(n_doublet_pair) / n_doublet_pair *
doublet_prior))
else:
Psi_both = Psi.copy()
GT_both = GT_prob.copy()
theta_both = theta_shapes.copy()
ID_prob2, logLik_ID = get_ID_prob(AD, DP, GT_both, theta_both, Psi_both)
ID_prob = ID_prob2[:, :GT_prob.shape[1]]
if learn_GT:
GT_prob, logLik_GT = get_GT_prob(AD, DP, ID_prob,
theta_shapes, GT_prior)
if learn_theta:
theta_shapes = get_theta_shapes(AD, DP, ID_prob,
GT_prob, theta_prior)
### check how to calculate lower bound for when detecting doublets
LB_val = VB_lower_bound(logLik_ID, GT_prob, ID_prob2, theta_shapes,
theta_prior, GT_prior, Psi_both)
return ID_prob2, GT_prob, theta_shapes, LB_val
def get_theta_shapes(AD, DP, ID_prob, GT_prob, theta_prior):
"""
"""
S1_gt = AD * ID_prob
SS_gt = DP * ID_prob
S2_gt = SS_gt - S1_gt
theta_shapes = theta_prior.copy()
for ig in range(theta_shapes.shape[0]):
_axis = 1 if len(theta_shapes.shape) == 3 else None
theta_shapes[ig, 0] += np.sum(S1_gt * GT_prob[:, :, ig], axis=_axis)
theta_shapes[ig, 1] += np.sum(S2_gt * GT_prob[:, :, ig], axis=_axis)
return theta_shapes
def get_ID_prob(AD, DP, GT_prob, theta_shapes, Psi=None):
"""
"""
if Psi is None:
Psi = np.ones(GT_prob.shape[1]) / GT_prob.shape[1]
BD = DP - AD
logLik_ID = np.zeros((AD.shape[1], GT_prob.shape[1]))
for ig in range(GT_prob.shape[2]):
_digmma1 = digamma(theta_shapes[ig, 0]).reshape(-1, 1)
_digmma2 = digamma(theta_shapes[ig, 1]).reshape(-1, 1)
_digmmas = digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1, 1)
S1 = AD.transpose() * (GT_prob[:, :, ig] * _digmma1)
S2 = BD.transpose() * (GT_prob[:, :, ig] * _digmma2)
SS = DP.transpose() * (GT_prob[:, :, ig] * _digmmas)
logLik_ID += (S1 + S2 - SS)
Psi_norm = np.log(Psi / np.sum(Psi))
ID_prob = np.exp(loglik_amplify(logLik_ID + Psi_norm, axis=1))
ID_prob = normalize(ID_prob, axis=1)
return ID_prob, logLik_ID
def get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior=None):
"""
"""
if GT_prior is None:
GT_prior = np.ones((AD.shape[0], ID_prob.shape[1],
theta_shapes.shape[0]))
GT_prior = GT_prior / theta_shapes.shape[0]
S1_gt = AD * ID_prob
SS_gt = DP * ID_prob
S2_gt = SS_gt - S1_gt
logLik_GT = np.zeros(GT_prior.shape)
for ig in range(logLik_GT.shape[2]):
_digmma1 = digamma(theta_shapes[ig, 0]).reshape(-1, 1)
_digmma2 = digamma(theta_shapes[ig, 1]).reshape(-1, 1)
_digmmas = digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1, 1)
logLik_GT[:, :, ig] = (S1_gt * _digmma1 +
S2_gt * _digmma2 -
SS_gt * _digmmas)
# += np.log(GT_prior)
GT_prob = loglik_amplify(logLik_GT + np.log(GT_prior), axis=2)
GT_prob = normalize(np.exp(GT_prob), axis=2)
return GT_prob, logLik_GT
def VB_lower_bound(logLik_ID, GT_prob, ID_prob, theta_shapes,
theta_prior, GT_prior=None, Psi=None):
"""
"""
if GT_prior is None:
GT_prior = normalize(np.ones(GT_prob.shape), axis=2)
if Psi is None:
ID_prior = np.ones(ID_prob.shape) / ID_prob.shape[1]
else:
ID_prior = np.ones(ID_prob.shape) * np.log(Psi / np.sum(Psi))
LB_p = np.sum(logLik_ID * ID_prob)
KL_ID = -np.sum(entropy(ID_prob, ID_prior, axis=1))
KL_GT = -np.sum(entropy(GT_prob, GT_prior, axis=2))
KL_theta = -beta_entropy(theta_shapes, theta_prior)
# print(LB_p, KL_ID, KL_GT, KL_theta)
return LB_p - KL_ID - KL_GT - KL_theta
def add_doublet_theta(theta_shapes):
"""
calculate theta for doublet genotype: GT=0&1, GT=0&2, and GT=1&2 by
averaging thire beta paramters
Example
-------
theta_shapes = np.array([[0.3, 29.7], [3, 3], [29.7, 0.3]])
add_doublet_theta(theta_shapes)
"""
# TODO: support reduced GT for relatives
combn_iter = itertools.combinations(range(theta_shapes.shape[0]), 2)
db_idx = np.array([x for x in combn_iter])
_theta_p1 = theta_shapes[db_idx[:, 0]]
_theta_p2 = theta_shapes[db_idx[:, 1]]
_theta_mean = (normalize(_theta_p1, axis=1) +
normalize(_theta_p2, axis=1)) / 2.0
_theta_sum = np.sqrt(np.sum(_theta_p1, axis=1, keepdims=True) *
np.sum(_theta_p2, axis=1, keepdims=True))
theta_shapes_db = _theta_mean * _theta_sum
return np.append(theta_shapes, theta_shapes_db, axis=0)
def add_doublet_GT(GT_prob):
"""
Add doublet genotype by summarizing their probability:
New GT has five categories: 0, 1, 2, 1.5, 2.5
TODO: New GT has six categories: 0, 1, 2, 0_1, 0_2, 1_2
"""
combn_iter = itertools.combinations(range(GT_prob.shape[2]), 2)
gt_idx = np.array([x for x in combn_iter]) # GT combination
g_idx1 = gt_idx[:, 0]
g_idx2 = gt_idx[:, 1]
combn_iter = itertools.combinations(range(GT_prob.shape[1]), 2)
sp_idx = np.array([x for x in combn_iter]) # sample combination
s_idx1 = sp_idx[:, 0]
s_idx2 = sp_idx[:, 1]
## GT_prob has three genotypes: 0, 1, 2;
n_gt = GT_prob.shape[2]
GT_prob2 = np.zeros((GT_prob.shape[0], sp_idx.shape[0],
n_gt + gt_idx.shape[0]))
GT_prob2[:, :, :n_gt] = (GT_prob[:, s_idx1, :] *
GT_prob[:, s_idx2, :])
GT_prob2[:, :, n_gt:] = (GT_prob[:, s_idx1, :][:, :, g_idx1] *
GT_prob[:, s_idx2, :][:, :, g_idx2] +
GT_prob[:, s_idx1, :][:, :, g_idx2] *
GT_prob[:, s_idx2, :][:, :, g_idx1])
GT_prob2 = normalize(GT_prob2, axis=2)
GT_prob1 = np.append(GT_prob,
np.zeros((GT_prob.shape[0], GT_prob.shape[1], gt_idx.shape[0])), axis=2)
return np.append(GT_prob1, GT_prob2, axis=1)
| 2.375 | 2 |
Python/Programming Fundamentals/Exams/05. Contact list.py | teodoramilcheva/softuni-software-engineering | 0 | 12793217 | contacts = input().split()
while True:
line = input()
tokens = line.split()
command = tokens[0]
if command == 'Add':
name = tokens[1]
index = int(tokens[2])
if name not in contacts:
contacts.append(name)
else:
if 0 <= index < len(contacts):
contacts.insert(index, name)
elif command == 'Remove':
index = int(tokens[1])
if 0 <= index < len(contacts):
contacts.pop(index)
elif command == 'Export':
start_i = int(tokens[1])
count = int(tokens[2])
contacts_to_print = contacts[start_i:start_i + count]
print(' '.join(contacts_to_print))
elif command == 'Print':
if tokens[1] == 'Reversed':
contacts = contacts[::-1]
final_contacts = " ".join(contacts)
print(f'Contacts: {final_contacts}')
break
| 3.671875 | 4 |
__SocialMedia_Sites.py | elithaxxor/pi_repo | 0 | 12793218 | <reponame>elithaxxor/pi_repo
#import socialmedia
#from socialmedia import USERNAME
USERNAME = input(f'[sys] Enter Username:' )
# INSTAGRAM
instagram = f'https://www.instagram.com/{USERNAME}'
# FACEBOOK
facebook = f'https://www.facebook.com/{USERNAME}'
#TWITTER
twitter = f'https://www.twitter.com/{USERNAME}'
# YOUTUBE
youtube = f'https://www.youtube.com/{USERNAME}'
# BLOGGER
blogger = f'https://{USERNAME}.blogspot.com'
# GOOGLE+
google_plus = f'https://plus.google.com/s/{USERNAME}/top'
# REDDIT
reddit = f'https://www.reddit.com/user/{USERNAME}'
# WORDPRESS
wordpress = f'https://{USERNAME}.wordpress.com'
# PINTEREST
pinterest = f'https://www.pinterest.com/{USERNAME}'
# GITHUB
github = f'https://www.github.com/{USERNAME}'
# TUMBLR
tumblr = f'https://{USERNAME}.tumblr.com'
# FLICKR
flickr = f'https://www.flickr.com/people/{USERNAME}'
# STEAM
steam = f'https://steamcommunity.com/id/{USERNAME}'
# VIMEO
vimeo = f'https://vimeo.com/{USERNAME}'
# SOUNDCLOUD
soundcloud = f'https://soundcloud.com/{USERNAME}'
# DISQUS
disqus = f'https://disqus.com/by/{USERNAME}'
# MEDIUM
medium = f'https://medium.com/@{USERNAME}'
# DEVIANTART
deviantart = f'https://{USERNAME}.deviantart.com'
# VK
vk = f'https://vk.com/{USERNAME}'
# ABOUT.ME
aboutme = f'https://about.me/{USERNAME}'
# IMGUR
imgur = f'https://imgur.com/user/{USERNAME}'
# FLIPBOARD
flipboard = f'https://flipboard.com/@{USERNAME}'
# SLIDESHARE
slideshare = f'https://slideshare.net/{USERNAME}'
# FOTOLOG
fotolog = f'https://fotolog.com/{USERNAME}'
# SPOTIFY
spotify = f'https://open.spotify.com/user/{USERNAME}'
# MIXCLOUD
mixcloud = f'https://www.mixcloud.com/{USERNAME}'
# SCRIBD
scribd = f'https://www.scribd.com/{USERNAME}'
# BADOO
badoo = f'https://www.badoo.com/en/{USERNAME}'
# PATREON
patreon = f'https://www.patreon.com/{USERNAME}'
# BITBUCKET
bitbucket = f'https://bitbucket.org/{USERNAME}'
# DAILYMOTION
dailymotion = f'https://www.dailymotion.com/{USERNAME}'
# ETSY
etsy = f'https://www.etsy.com/shop/{USERNAME}'
# CASHME
cashme = f'https://cash.me/{USERNAME}'
# BEHANCE
behance = f'https://www.behance.net/{USERNAME}'
# GOODREADS
goodreads = f'https://www.goodreads.com/{USERNAME}'
# INSTRUCTABLES
instructables = f'https://www.instructables.com/member/{USERNAME}'
# KEYBASE
keybase = f'https://keybase.io/{USERNAME}'
# KONGREGATE
kongregate = f'https://kongregate.com/accounts/{USERNAME}'
# LIVEJOURNAL
livejournal = f'https://{USERNAME}.livejournal.com'
# ANGELLIST
angellist = f'https://angel.co/{USERNAME}'
# LAST.FM
last_fm = f'https://last.fm/user/{USERNAME}'
# DRIBBBLE
dribbble = f'https://dribbble.com/{USERNAME}'
# CODECADEMY
codecademy = f'https://www.codecademy.com/{USERNAME}'
# GRAVATAR
gravatar = f'https://en.gravatar.com/{USERNAME}'
# PASTEBIN
pastebin = f'https://pastebin.com/u/{USERNAME}'
# FOURSQUARE
foursquare = f'https://foursquare.com/{USERNAME}'
# ROBLOX
roblox = f'https://www.roblox.com/user.aspx?USERNAME={USERNAME}'
# GUMROAD
gumroad = f'https://www.gumroad.com/{USERNAME}'
# NEWSGROUND
newsground = f'https://{USERNAME}.newgrounds.com'
# WATTPAD
wattpad = f'https://www.wattpad.com/user/{USERNAME}'
# CANVA
canva = f'https://www.canva.com/{USERNAME}'
# CREATIVEMARKET
creative_market = f'https://creativemarket.com/{USERNAME}'
# TRAKT
trakt = f'https://www.trakt.tv/users/{USERNAME}'
# 500PX
five_hundred_px = f'https://500px.com/{USERNAME}'
# BUZZFEED
buzzfeed = f'https://buzzfeed.com/{USERNAME}'
# TRIPADVISOR
tripadvisor = f'https://tripadvisor.com/members/{USERNAME}'
# HUBPAGES
hubpages = f'https://{USERNAME}.hubpages.com'
# CONTENTLY
contently = f'https://{USERNAME}.contently.com'
# HOUZZ
houzz = f'https://houzz.com/user/{USERNAME}'
#BLIP.FM
blipfm = f'https://blip.fm/{USERNAME}'
# WIKIPEDIA
wikipedia = f'https://www.wikipedia.org/wiki/User:{USERNAME}'
# HACKERNEWS
hackernews = f'https://news.ycombinator.com/user?id={USERNAME}'
# CODEMENTOR
codementor = f'https://www.codementor.io/{USERNAME}'
# REVERBNATION
reverb_nation = f'https://www.reverbnation.com/{USERNAME}'
# DESIGNSPIRATION
designspiration = f'https://www.designspiration.net/{USERNAME}'
# BANDCAMP
bandcamp = f'https://www.bandcamp.com/{USERNAME}'
# COLOURLOVERS
colourlovers = f'https://www.colourlovers.com/love/{USERNAME}'
# IFTTT
ifttt = f'https://www.ifttt.com/p/{USERNAME}'
# EBAY
ebay = f'https://www.ebay.com/usr/{USERNAME}'
# SLACK
slack = f'https://{USERNAME}.slack.com'
# OKCUPID
okcupid = f'https://www.okcupid.com/profile/{USERNAME}'
# TRIP
trip = f'https://www.trip.skyscanner.com/user/{USERNAME}'
# ELLO
ello = f'https://ello.co/{USERNAME}'
# TRACKY
tracky = f'https://tracky.com/user/~{USERNAME}'
# BASECAMP
basecamp = f'https://{USERNAME}.basecamphq.com/login'
#
''' WEBSITE LIST - USE FOR SEARCHING OF USERNAME '''
WEBSITES = [
instagram, facebook, twitter, youtube, blogger, google_plus, reddit,
wordpress, pinterest, github, tumblr, flickr, steam, vimeo, soundcloud, disqus,
medium, deviantart, vk, aboutme, imgur, flipboard, slideshare, fotolog, spotify,
mixcloud, scribd, badoo, patreon, bitbucket, dailymotion, etsy, cashme, behance,
goodreads, instructables, keybase, kongregate, livejournal, angellist, last_fm,
dribbble, codecademy, gravatar, pastebin, foursquare, roblox, gumroad, newsground,
wattpad, canva, creative_market, trakt, five_hundred_px, buzzfeed, tripadvisor, hubpages,
contently, houzz, blipfm, wikipedia, hackernews, reverb_nation, designspiration,
bandcamp, colourlovers, ifttt, ebay, slack, okcupid, trip, ello, tracky, basecamp]
#
| 2.15625 | 2 |
shadowsocks/objects/user.py | PaperDashboard/shadowsocks-vanilla | 1 | 12793219 | class User(object):
def __init__(self, config):
self.port = int(config["port"])
self.password = str(config["password"])
self.method = str(config["method"])
if "_id" in config:
self.id = config["_id"]["$oid"]
else:
self.id = str(self.port)
def __eq__(self, other):
return (self.port == other.port and
self.password == <PASSWORD>.password and
self.method == other.method and
self.id == other.id) | 3.296875 | 3 |
tests/test_utilities.py | zachdj/team-crux-p1 | 1 | 12793220 | <filename>tests/test_utilities.py
"""
Unit tests for functions in the src.utilities package
"""
import unittest
import warnings
from pyspark import SparkContext, SparkConf
import src.utilities.preprocess as preprocess
import src.utilities.utils as utils
class TestPreprocess(unittest.TestCase):
def setUp(self):
"""
This setup function will be called before any test is run.
It's a useful place to do initialization that otherwise would have to be repeated for every test function
"""
pass
def test_remove_html_character_references(self):
# test that the function performs as expected
self.assertEqual(preprocess.remove_html_character_references(""snow"), "snow")
self.assertEqual(preprocess.remove_html_character_references("desk""), "desk")
self.assertEqual(preprocess.remove_html_character_references("airplane&"), "airplane")
self.assertEqual(preprocess.remove_html_character_references("air&plane"), "airplane")
self.assertEqual(preprocess.remove_html_character_references(""government""), "government")
self.assertEqual(preprocess.remove_html_character_references("Desk""), "Desk")
# test that the function has no side effects
word = "Investment""
preprocess.remove_html_character_references(word)
self.assertEqual(word, "Investment"")
def test_strip_punctuation(self):
# test that the function performs as expected
self.assertEqual(preprocess.strip_punctuation("'snow"), "snow")
self.assertEqual(preprocess.strip_punctuation("snow."), "snow")
self.assertEqual(preprocess.strip_punctuation("snow!"), "snow")
self.assertEqual(preprocess.strip_punctuation("?snow?"), "snow")
self.assertEqual(preprocess.strip_punctuation("snow\""), "snow")
self.assertEqual(preprocess.strip_punctuation("sn!ow"), "snow")
# test that the function has no side effects
word = "Investment."
preprocess.remove_html_character_references(word)
self.assertEqual(word, "Investment.")
def test_tokenize(self):
# test that the function performs as expected
line = "the quick brown \t fox jumps \r\n over the lazy \n dog"
tokens = preprocess.tokenize(line)
self.assertTrue("the" in tokens)
self.assertTrue("quick" in tokens)
self.assertTrue("brown" in tokens)
self.assertTrue("fox" in tokens)
self.assertTrue("jumps" in tokens)
self.assertTrue("over" in tokens)
self.assertTrue("the" in tokens)
self.assertTrue("lazy" in tokens)
self.assertTrue("dog" in tokens)
def test_split_by_comma(self):
line = "the,quick,brown,fox,jumps,over,the,lazy,dog"
tokens = preprocess.split_by_comma(line)
self.assertTrue("the" in tokens)
self.assertTrue("quick" in tokens)
self.assertTrue("brown" in tokens)
self.assertTrue("fox" in tokens)
self.assertTrue("jumps" in tokens)
self.assertTrue("over" in tokens)
self.assertTrue("the" in tokens)
self.assertTrue("lazy" in tokens)
self.assertTrue("dog" in tokens)
def test_remove_irrelevant_labels(self):
labels = ["GCAT", "CCAT", "ECAT", "MCAT", "E12", "E54" "G154", "M13", "GWEA"]
filtered = preprocess.remove_irrelevant_labels(labels)
self.assertEqual(len(filtered), 4)
self.assertTrue("GCAT" in filtered)
self.assertTrue("CCAT" in filtered)
self.assertTrue("ECAT" in filtered)
self.assertTrue("MCAT" in filtered)
class TestUtils(unittest.TestCase):
def setUp(self):
# for some reason, running spark code within a unittest throws a bunch of ResourceWarnings
# check out this issue: https://github.com/requests/requests/issues/3912
warnings.filterwarnings(action="ignore", category=ResourceWarning)
def test_custom_zip(self):
# for this test, we don't want to run on an actual cluster. A local master is sufficient
conf = SparkConf().setAppName("Unit Tests").setMaster("local").set('spark.logConf', 'true')
sc = SparkContext(conf=conf)
sc.setLogLevel("FATAL")
nums = list(range(0, 10))
squares = [num**2 for num in nums]
pairs = [(num, num**2) for num in nums]
# the custom zip function should work on RDDs with different numbers of slices
rdd1 = sc.parallelize(nums, 5)
rdd2 = sc.parallelize(squares, 3)
combined = utils.custom_zip(rdd1, rdd2)
combined = combined.sortByKey()
combined = list(combined.collect())
for idx, tuple in enumerate(pairs):
self.assertEqual(tuple, combined[idx])
if __name__ == '__main__':
unittest.main()
| 3 | 3 |
scripts/morphware_data_sizes.py | QuintusTheFifth/telliot-core | 9 | 12793221 | <filename>scripts/morphware_data_sizes.py
"""Print the size of the data returned by a Morphware query (data reported)."""
import json
import sys
import requests
from telliot_core.queries.morphware import Morphware
def main():
"""
Print the size of the data returned by a Morphware query
using Morphare-provided data source endpoint:
curl --request POST http://167.172.239.133:5000/products-2 -H "Content-Type: application/json" \
-d '{"provider":"amazon","service":"compute","region":"us-east-1"}'
Example:
$ python scripts/morphware_data_sizes.py
"""
# Retrieve data from source provided by Morphware
headers = {
# Already added when you pass json=
# 'Content-Type': 'application/json',
}
json_data = {
"provider": "amazon",
"service": "compute",
"region": "us-east-1",
}
rsp = requests.post("http://172.16.31.10:5000/products-2", headers=headers, json=json_data)
unfiltered_data = json.loads(rsp.text)
print("Num products:", len(unfiltered_data))
# Remove unneeded product data
data = []
for i in range(len(unfiltered_data)):
data.append(
json.dumps(
{
"instanceType": unfiltered_data[i]["Instance Type"],
"cudaCores": unfiltered_data[i]["CUDA Cores"],
"numCPUs": unfiltered_data[i]["Number of CPUs"],
"RAM": unfiltered_data[i]["RAM"],
"onDemandPricePerHour": unfiltered_data[i]["On-demand Price per Hour"],
}
)
)
print("Expample filtered data:")
print(json.dumps(data[:2], indent=4))
# Get size of encoded string[]
q = Morphware(version=1)
submit_value = q.value_type.encode(data)
print(f"Size of data being reported: {sys.getsizeof(submit_value)} bytes")
# print(submit_value.hex())
if __name__ == "__main__":
main()
| 2.875 | 3 |
mtse/model_utils.py | FractalySyn/mtse | 1 | 12793222 | <filename>mtse/model_utils.py
import torch
import torch.nn as nn
import torch.nn.functional as F
def rmspe(pred, true):
"""Computes RMSPE"""
return torch.mean(((true - pred) / true)**2)**0.5
def rmse(pred, true):
"""Computes RMSE"""
return torch.mean((true - pred)**2)**0.5
def mape(pred, true):
"""Computes MAPE"""
return torch.mean(torch.abs((true - pred) / true))
def acc(pred, true):
"""Computes Accuracy"""
return torch.mean(pred == true)
def evaluate_model(model, data_loader, dim, loss, argmax=False, device='cuda'):
"""Predicts and computes the loss over the provided data
Parameters
----------
model : torch.nn.Module
model containing the weights to compute the predictions
data_loader : torch.utils.data.DataLoader
data to evaluate
dim : int
number of time series
loss : torch loss
loss function
argmax : bool, optional
if True, an arg max is applied to predicted values (default is False)
device : str, optional
'cuda' or 'cpu', (default is 'cuda')
Returns
-------
float
"""
pred = []; true = []
for data_batch, label in data_loader:
data_batch, label = data_batch.to(device), label.float().to(device)
with torch.no_grad():
out = model(torch.cat((data_batch[:, :, :dim], data_batch[:, :, dim:2*dim]), 2), data_batch[:, :, -1])
if argmax:
pred = pred.argmax(1)
pred.append(out); true.append(label)
pred = torch.cat(pred, 0).squeeze()
true = torch.cat(true, 0).squeeze()
return loss(pred, true)
def predict(test_loader, device, model, model_type, dim):
"""
Parameters
----------
test_loader : torch.utils.data.DataLoader
data to predict on
device : str
'cuda' or 'cpu'
model : torch.nn.Module
model containing the weights to compute the predictions
model_type : str
'regression' or 'classification', for the latter an arg max is applied
dim : int
number of time series
Returns
-------
torch.tensor
"""
pred = []
for test_batch in test_loader:
test_batch = test_batch.to(device)
with torch.no_grad():
out = model(test_batch[:, :, :dim*2], test_batch[:, :, -1])
if model_type=='classification':
out = out.argmax(1)
pred.append(out)
return torch.cat(pred, 0).squeeze()
def count_parameters(model):
"""Returns the number of weights that can be trained in the provided model"""
return sum(p.numel() for p in model.parameters() if p.requires_grad)
class EarlyStopping():
"""
Early stopping to stop the training when the loss does not improve after
certain epochs.
Attributes
----------
patience : int
how many epochs to wait before stopping when loss is not improving
min_delta : float
minimum difference between new loss and old loss for new loss to be considered as an improvement
counter : int
number of epochs without improvement
best_loss : float or NoneType
validation loss of the last epoch for which the counter was equal to 0
early_stop : bool
if True, the training will break
"""
def __init__(self, patience=5, min_delta=0.):
"""
Parameters
---------
patience : int, optional
how many epochs to wait before stopping when loss is not improving (default is 5)
min_delta : float, optional
minimum difference between new loss and old loss for new loss to be considered as an improvement
(default is 0.)
"""
self.patience = patience
self.min_delta = min_delta
self.counter = 0
self.best_loss = None
self.early_stop = False
def __call__(self, val_loss):
"""
Parameters
----------
val_loss : torch loss or float
"""
if self.best_loss == None:
self.best_loss = val_loss
elif self.best_loss - val_loss > self.min_delta:
self.best_loss = val_loss
# reset counter if validation loss improves
self.counter = 0
elif self.best_loss - val_loss < self.min_delta:
self.counter += 1
if self.counter >= self.patience:
print('Early stopping')
self.early_stop = True
| 2.5625 | 3 |
train_seg.py | Debatrix/AquulaCam | 2 | 12793223 | <filename>train_seg.py<gh_stars>1-10
import os
from scipy import stats
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from base_train import train
from util import LoadConfig
from dataset import HUTDataset
from model.network import UNetLike
from model.framework import Segmentation
from model.loss import *
class Config(LoadConfig):
def __init__(self) -> None:
super(Config, self).__init__()
self.info = ""
self.train_name = "UNetLike_enhance_Segmentation"
self.dataset_path = 'dataset/HutIris-Blur'
self.cp_path = "checkpoints/1030_194858_UNetLike/80_2.2903e-03.pth"
self.cp_path = ""
self.visible = True
self.log_interval = 5
self.save_interval = 5
self.less_data = False
self.debug = False
self.use_dct = False
self.model_channel = 16
self.mask_down = 8
self.load_to_ram = False
self.batchsize = 32
self.device = [0, 1, 2, 3]
self.num_workers = 0
self.seed = np.random.randint(9999)
self.max_epochs = 500
self.lr = 8e-4
self.momentum = 0.9
self.weight_decay = 1e-4
self.apply()
def get_dataloaders(config):
train_data = HUTDataset(path=config['dataset_path'],
mode='train',
less_data=config['less_data'],
lrud_move=True)
train_data_loader = DataLoader(train_data,
config['batchsize'],
drop_last=True,
shuffle=True,
pin_memory=True,
num_workers=config['num_workers'])
val_data = HUTDataset(path=config['dataset_path'],
mode='val',
less_data=False)
val_data_loader = DataLoader(val_data,
config['batchsize'],
shuffle=True,
drop_last=True,
pin_memory=True,
num_workers=config['num_workers'])
return train_data_loader, val_data_loader
def evaluation(val_save, val_num):
pred_loss = val_save['pred_loss'] / val_num
mask_loss = val_save['mask_loss'] / val_num
position = np.concatenate(val_save['position'], axis=0)
offset = np.concatenate(val_save['offset'], axis=0)
srocc = stats.spearmanr(offset.reshape(-1), position.reshape(-1))[0]
lcc = stats.pearsonr(offset.reshape(-1), position.reshape(-1))[0]
return {
"Val_pred_loss": pred_loss,
"Val_mask_loss": mask_loss,
"SROCC": srocc,
"LCC": lcc
}
def val_plot(log_writer, epoch, val_save):
idx = torch.randint(val_save['image'].shape[0], (1, )).item()
image = nn.functional.interpolate(
val_save['image'],
(val_save['mask'][idx].shape[-2], val_save['mask'][idx].shape[-1]),
mode='bilinear',
align_corners=True)
mask = val_save['mask'][idx] + image[idx]
heatmap = val_save['heatmap'][idx] + image[idx]
image = torch.clamp(torch.cat((heatmap, mask, image[idx]), dim=0), 0, 1)
log_writer.add_image('Val/image', image, epoch)
if __name__ == "__main__":
# set config
config = Config()
# data
print('Loading Data')
dataloaders = get_dataloaders(config)
# model and criterion
criterion = SegmentationLoss()
model = UNetLike(dct=config['use_dct'],
channel=config['model_channel'],
downsample=False,
trainable={
'encoder': True,
'decoder': True,
'rnn': False
})
model = Segmentation(model, criterion)
# optimizer and scheduler
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
model.parameters()),
lr=config['lr'])
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer,
'min',
factor=0.5,
patience=config['log_interval'] * 2,
verbose=True)
optimizers = (optimizer, scheduler)
# train
train(config, dataloaders, model, optimizers, evaluation, val_plot)
| 2.078125 | 2 |
transistor/persistence/__init__.py | awesome-archive/transistor | 232 | 12793224 | # -*- coding: utf-8 -*-
"""
transistor.persistence
~~~~~~~~~~~~
This module implements classes and methods to aid persistence, including
database, spreadsheet export, write to file.
:copyright: Copyright (C) 2018 by BOM Quote Limited
:license: The MIT License, see LICENSE for more details.
~~~~~~~~~~~~
"""
from .exporters import (PprintItemExporter, PickleItemExporter, PythonItemExporter,
CsvItemExporter, MarshalItemExporter, BaseItemExporter)
from .containers import SplashScraperItems
from .item import Item, Field
from .newt_db.newt_crud import get_job_results, delete_job
__all__ = ['delete_job', 'Field', 'get_job_results', 'Item', 'PprintItemExporter',
'PickleItemExporter', 'PythonItemExporter', 'CsvItemExporter',
'MarshalItemExporter', 'BaseItemExporter', 'SplashScraperItems']
| 1.992188 | 2 |
convolution_matrices/convmat3D.py | zhaonat/RCWA | 66 | 12793225 | import numpy as np
import matplotlib.pyplot as plt
## preliminary tests
#inputs: A, P, Q, R
# A is the discrete representation of epsilon
#number of spatial harmonics (or orders)
P = 6;
Q = 6;
R = 6;
Nx = 20; Ny = 20; Nz = 1; #this is fundamentally 3D...not sure how to make general for 2D
N = np.array([Nx, Ny, Nz]);
## generalize two 2D geometries;
A = np.ones(N+1)
A[2:18, 2:18, 0] = 12;
plt.imshow(A[:,:,0]);
plt.show()
# deal with different dimensionalities
if(len(N) == 1):
Q = 1; R = 1;
elif(len(N) == 2):
R = 1;
NH = P*Q*R;
p = list(range(-int(np.floor(P/2)), int(np.floor(P/2))+1));
print(p)
q = list(range(-int(np.floor(Q/2)), int(np.floor(Q/2))+1));
r = list(range(-int(np.floor(R/2)), int(np.floor(R/2))+1));
Af = (1/np.prod(N))*np.fft.fftshift(np.fft.fftn(A));
#central indices;
p0 = int(np.floor(Nx/2));
q0 = int(np.floor(Ny/2));
r0 = int(np.floor(Nz/2));
C = np.zeros((NH, NH))
C = C.astype(complex);
for rrow in range(R):
for qrow in range(Q):
for prow in range(P):
#first term locates z plane, 2nd locates y column, prow locates x
row = (rrow)*Q*P+(qrow)*P + prow;
for rcol in range(R):
for qcol in range(Q):
for pcol in range(P):
col = (rcol)*Q*P + (qcol)*P + pcol;
pfft = p[prow] - p[pcol];
qfft = q[qrow] - q[qcol];
rfft = r[rrow] - r[rrow]
C[row, col] = Af[p0+pfft, q0+qfft, r0+rfft];
plt.imshow(np.abs(Af[:, :, 0]));
plt.show()
plt.imshow(np.abs(C));
plt.show()
plt.plot(np.diag(abs(C)))
plt.show() | 2.671875 | 3 |
pyassim/llock.py | ZoneTsuyoshi/pyassim | 0 | 12793226 | <reponame>ZoneTsuyoshi/pyassim<filename>pyassim/llock.py
"""
=====================================================================
Inference with Local Linear Operator Construction with Kalman Filter
=====================================================================
This module implements the Local LOCK
for Linear-Gaussian state space models
"""
from logging import getLogger, StreamHandler, DEBUG
logger = getLogger("llock")
handler = StreamHandler()
handler.setLevel(DEBUG)
logger.setLevel(DEBUG)
logger.addHandler(handler)
logger.propagate = False
import os
import math
import time
import multiprocessing as mp
import itertools
import numpy as np
from .utils import array1d, array2d
from .util_functions import _parse_observations, _last_dims, \
_determine_dimensionality
def _local_calculation(i, j, A, y):
local_A = A[i] | A[j]
local_node_number_i = len(np.where(local_A[:i])[0])
local_node_number_j = len(np.where(local_A[:j])[0])
global_node_number = np.where(local_A)[0]
Gh = y[1:, global_node_number].T \
@ np.linalg.pinv(y[:-1, global_node_number].T)
return Gh[local_node_number_i, local_node_number_j]
class LocalLOCK(object) :
"""Implements the Local LOCK.
This class implements the LLOCK,
for a Linear Gaussian model specified by,
.. math::
x_{t+1} &= F_{t} x_{t} + b_{t} + v_{t} \\
y_{t} &= H_{t} x_{t} + d_{t} + w_{t} \\
[v_{t}, w_{t}]^T &\sim N(0, [[Q_{t}, O], [O, R_{t}]])
The LLOCK is an algorithm designed to estimate
:math:`P(x_t | y_{0:t})` and :math:`F` in real-time.
As all state transitions and observations are
linear with Gaussian distributed noise, these distributions can be
represented exactly as Gaussian distributions with mean
`x_filt[t]` and covariances `V_filt`.
Args:
observation [n_time, n_dim_obs] {numpy-array, float}
also known as :math:`y`. observation value
initial_mean [n_dim_sys] {float}
also known as :math:`\mu_0`. initial state mean
initial_covariance [n_dim_sys, n_dim_sys] {numpy-array, float}
also known as :math:`\Sigma_0`. initial state covariance
transition_matrix [n_dim_sys, n_dim_sys]
or [n_dim_sys, n_dim_sys]{numpy-array, float}
also known as :math:`F`. transition matrix from x_{t-1} to x_{t}
observation_matrix [n_dim_sys, n_dim_obs] {numpy-array, float}
also known as :math:`H`. observation matrix from x_{t} to y_{t}
transition_covariance [n_time-1, n_dim_sys, n_dim_sys]
or [n_dim_sys, n_dim_sys]
{numpy-array, float}
also known as :math:`Q`. system transition covariance
observation_covariance [n_time, n_dim_obs, n_dim_obs] {numpy-array, float}
also known as :math:`R`. observation covariance
adjacency_matrix [n_dim_sys, n_dim_sys] {numpy-array, float}
also known as :math:`A`. adjacency matrix,
if there is a link between i and j, A[i,j]=1, else A[i,j]=0.
Besides, you should A[i,i]=1 forall i.
method {string}
: method for localized calculation
"elementwise": calculation for each element of transition matrix
"local-average": average calculation for specific two observation dimenstions
"all-average": average calculation for each observation dimenstions
update_interval {int}
interval of update transition matrix F
eta (in (0,1])
update rate for update transition matrix F
cutoff
cutoff distance for update transition matrix F
save_dir {str, directory-like}
directory for saving transition matrices and filtered states.
if this variable is `None`, cannot save them.
advance_mode {bool}
if True, calculate transition matrix before filtering.
if False, calculate the matrix after filtering.
n_dim_sys {int}
dimension of system transition variable
n_dim_obs {int}
dimension of observation variable
dtype {type}
data type of numpy-array
use_gpu {bool}
wheather use gpu and cupy.
if True, you need install package `cupy`.
if False, set `numpy` for calculation.
num_cpu {int} or `all`
number of cpus duaring calculating transition matrix.
you can set `all` or positive integer.
Attributes:
y : `observation`
F : `transition_matrix`
Q : `transition_covariance`
H : `observation_matrix`
R : `observation_covariance`
"""
def __init__(self, observation = None,
initial_mean = None, initial_covariance = None,
transition_matrix = None, observation_matrix = None,
transition_covariance = None, observation_covariance = None,
adjacency_matrix = None, method = "elementwise",
estimation_length = 10, estimation_interval = 1,
eta = 1., cutoff = 10.,
estimation_mode = "backward",
save_dir = None,
n_dim_sys = None, n_dim_obs = None, dtype = "float32",
use_gpu = False, num_cpu = "all"):
"""Setup initial parameters.
"""
if use_gpu:
try:
import cupy
self.xp = cupy
self.use_gpu = True
except:
self.xp = np
self.use_gpu = False
else:
self.xp = np
self.use_gpu = False
# determine dimensionality
self.n_dim_sys = _determine_dimensionality(
[(transition_matrix, array2d, -2),
(initial_mean, array1d, -1),
(initial_covariance, array2d, -2),
(observation_matrix, array2d, -1)],
n_dim_sys,
self.use_gpu
)
self.n_dim_obs = _determine_dimensionality(
[(observation_matrix, array2d, -2),
(observation_covariance, array2d, -2),
(adjacency_matrix, array2d, -2)],
n_dim_obs,
self.use_gpu
)
# self.y = _parse_observations(observation)
self.y = self.xp.asarray(observation).copy()
if initial_mean is None:
self.initial_mean = self.xp.zeros(self.n_dim_sys, dtype = dtype)
else:
self.initial_mean = self.xp.asarray(initial_mean, dtype = dtype)
if initial_covariance is None:
self.initial_covariance = self.xp.eye(self.n_dim_sys, dtype = dtype)
else:
self.initial_covariance = self.xp.asarray(initial_covariance, dtype = dtype)
if transition_matrix is None:
self.F = self.xp.eye(self.n_dim_sys, dtype = dtype)
else:
self.F = self.xp.asarray(transition_matrix, dtype = dtype)
if transition_covariance is not None:
self.Q = self.xp.asarray(transition_covariance, dtype = dtype)
else:
self.Q = self.xp.eye(self.n_dim_sys, dtype = dtype)
if observation_matrix is None:
self.H = self.xp.eye(self.n_dim_obs, self.n_dim_sys, dtype = dtype)
else:
self.H = self.xp.asarray(observation_matrix, dtype = dtype)
self.HI = self.xp.linalg.pinv(self.H)
if observation_covariance is None:
self.R = self.xp.eye(self.n_dim_obs, dtype = dtype)
else:
self.R = self.xp.asarray(observation_covariance, dtype = dtype)
if adjacency_matrix is None:
self.A = self.xp.eye(dtype=bool)
else:
self.A = self.xp.asarray(adjacency_matrix, dtype = bool)
if method in ["elementwise", "local-average", "all-average"]:
self.method = method
else:
raise ValueError("Variable \"method\" only allows \"elementwise\", \"local-average\" "
+ "or \"all-average\". So, your setting \"{}\" need to be changed.".format(method))
if estimation_mode in ["forward", "middle", "backward"]:
self.estimation_mode = estimation_mode
else:
raise ValueError("\"estimation_mode\" must be choosen from \"forward\","
+ " \"middle\", or \"backward\".")
if self.estimation_mode in ["forward", "backward"]:
self.tau = int(estimation_length)
self.tau2 = int((estimation_length - 1) / 2)
else:
self.tau2 = int((estimation_length - 1) / 2)
self.tau = 2 * self.tau2 + 1
self.I = estimation_interval
self.tm_count = 1
if save_dir is None:
self.save_change = False
else:
self.save_change = True
self.save_dir = save_dir
self.fillnum = len(str(int(self.y.shape[0] / self.I)))
self.xp.save(os.path.join(self.save_dir, "transition_matrix_" + str(0).zfill(self.fillnum) + ".npy"), self.F)
if num_cpu == "all":
self.num_cpu = mp.cpu_count()
else:
self.num_cpu = num_cpu
self.eta = eta
self.cutoff = cutoff
self.dtype = dtype
self.times = self.xp.zeros(5)
def forward(self):
"""Calculate prediction and filter for observation times.
Attributes:
T {int}
: length of data y
x_pred [n_time, n_dim_sys] {numpy-array, float}
: mean of hidden state at time t given observations
from times [0...t-1]
V_pred [n_dim_sys, n_dim_sys] {numpy-array, float}
: covariance of hidden state at time t given observations
from times [0...t-1]
x_filt [n_time, n_dim_sys] {numpy-array, float}
: mean of hidden state at time t given observations from times [0...t]
V_filt [n_dim_sys, n_dim_sys] {numpy-array, float}
: covariance of hidden state at time t given observations
from times [0...t]
"""
T = self.y.shape[0]
self.x_pred = self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype)
self.x_filt = self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype)
# calculate prediction and filter for every time
for t in range(T):
# visualize calculating time
print("\r filter calculating... t={}".format(t) + "/" + str(T), end="")
if t == 0:
# initial setting
self.x_pred[0] = self.initial_mean
self.V_pred = self.initial_covariance.copy()
self._update_transition_matrix(self.tau)
else:
if t >= 2 and t < T-self.tau+1 and (t-1)%self.I==0 and self.estimation_mode=="forward":
self._update_transition_matrix(t+self.tau-1)
elif t >= self.tau+1 and (t-self.tau)%self.I==0 and self.estimation_mode=="backward":
self._update_transition_matrix(t)
elif t >= self.tau2+2 and t < T-self.tau2 and (t-self.tau2-1)%self.I==0 and self.estimation_mode=="middle":
self._update_transition_matrix(t+self.tau2)
start_time = time.time()
self._predict_update(t)
self.times[0] += time.time() - start_time
if self.xp.any(self.xp.isnan(self.y[t])):
self.x_filt[t] = self.x_pred[t]
self.V_filt = self.V_pred
else :
start_time = time.time()
self._filter_update(t)
self.times[1] += time.time() - start_time
if self.save_change:
self.xp.save(os.path.join(self.save_dir, "states.npy"), self.x_filt)
def _predict_update(self, t):
"""Calculate fileter update
Args:
t {int} : observation time
"""
# extract parameters for time t-1
Q = _last_dims(self.Q, t - 1, 2, self.use_gpu)
# calculate predicted distribution for time t
self.x_pred[t] = self.F @ self.x_filt[t-1]
self.V_pred = self.F @ self.V_filt @ self.F.T + Q
def _filter_update(self, t):
"""Calculate fileter update without noise
Args:
t {int} : observation time
Attributes:
K [n_dim_sys, n_dim_obs] {numpy-array, float}
: Kalman gain matrix for time t
"""
# extract parameters for time t
R = _last_dims(self.R, t, 2, self.use_gpu)
# calculate filter step
K = self.V_pred @ (
self.H.T @ self.xp.linalg.inv(self.H @ (self.V_pred @ self.H.T) + R)
)
self.x_filt[t] = self.x_pred[t] + K @ (
self.y[t] - (self.H @ self.x_pred[t])
)
self.V_filt = self.V_pred - K @ (self.H @ self.V_pred)
def _update_transition_matrix(self, t):
"""Update transition matrix
Args:
t {int} : observation time
"""
G = self.xp.zeros((self.n_dim_obs, self.n_dim_obs), dtype=self.dtype)
start_time = time.time()
if self.method=="elementwise": # elementwise
if self.use_gpu:
A = self.A.get()
y = self.y[t-self.tau:t+1].get()
else:
A = self.A
y = self.y[t-self.tau:t+1]
where_is_A = np.where(A)
p = mp.Pool(self.num_cpu)
G_local = p.starmap(_local_calculation, zip(where_is_A[0],
where_is_A[1],
itertools.repeat(A),
itertools.repeat(y)))
p.close()
G[A] = G_local
elif self.method=="local-average": # local-average
for i in range(self.n_dim_obs):
local_node_number = len(self.xp.where(self.A[i][:i])[0]) #LA
global_node_number = self.xp.where(self.A[i])[0]
Gh = self.y[t-self.tau+1:t+1, global_node_number].T \
@ self.xp.linalg.pinv(self.y[t-self.tau:t, global_node_number].T)
G[i, global_node_number] += Gh[local_node_number] #LA
G[global_node_number, i] += Gh[:, local_node_number] #LA
G /= 2.0 #LA
elif self.method=="all-average": #all-average
C = self.xp.zeros((self.n_dim_obs, self.n_dim_obs), dtype=self.dtype) #AA
for i in range(self.n_dim_obs):
global_node_number = self.xp.where(self.A[i])[0]
Gh = self.y[t-self.tau+1:t+1, global_node_number].T \
@ self.xp.linalg.pinv(self.y[t-self.tau:t, global_node_number].T)
G[self.xp.ix_(global_node_number, global_node_number)] += Gh #AA
C[self.xp.ix_(global_node_number, global_node_number)] += 1 #AA
C[C==0] = 1 #AA
G /= C #AA
if self.tm_count==1:
self.F = self.HI @ G @ self.H
else:
self.times[2] += time.time() - start_time
Fh = self.HI @ G @ self.H
self.times[3] += time.time() - start_time
self.times[4] += 1
self.F = self.F - self.eta * self.xp.minimum(self.xp.maximum(-self.cutoff, self.F - Fh), self.cutoff)
if self.save_change:
self.xp.save(os.path.join(self.save_dir, "transition_matrix_" + str(self.tm_count).zfill(self.fillnum) + ".npy"), self.F)
self.tm_count += 1
def get_predicted_value(self, dim = None):
"""Get predicted value
Args:
dim {int} : dimensionality for extract from predicted result
Returns (numpy-array, float)
: mean of hidden state at time t given observations
from times [0...t-1]
"""
# if not implement `forward`, implement `forward`
try :
self.x_pred[0]
except :
self.forward()
if dim is None:
return self.x_pred
elif dim <= self.x_pred.shape[1]:
return self.x_pred[:, int(dim)]
else:
raise ValueError('The dim must be less than '
+ self.x_pred.shape[1] + '.')
def get_filtered_value(self, dim = None):
"""Get filtered value
Args:
dim {int} : dimensionality for extract from filtered result
Returns (numpy-array, float)
: mean of hidden state at time t given observations
from times [0...t]
"""
# if not implement `forward`, implement `forward`
try :
self.x_filt[0]
except :
self.forward()
if dim is None:
return self.x_filt
elif dim <= self.x_filt.shape[1]:
return self.x_filt[:, int(dim)]
else:
raise ValueError('The dim must be less than '
+ self.x_filt.shape[1] + '.')
# def smooth(self):
# """Calculate RTS smooth for times.
# Args:
# T : length of data y
# x_smooth [n_time, n_dim_sys] {numpy-array, float}
# : mean of hidden state distributions for times
# [0...n_times-1] given all observations
# V_smooth [n_time, n_dim_sys, n_dim_sys] {numpy-array, float}
# : covariances of hidden state distributions for times
# [0...n_times-1] given all observations
# A [n_dim_sys, n_dim_sys] {numpy-array, float}
# : fixed interval smoothed gain
# """
# # if not implement `filter`, implement `filter`
# try :
# self.x_pred[0]
# except :
# self.filter()
# T = self.y.shape[0]
# self.x_smooth = self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype)
# self.V_smooth = self.xp.zeros((T, self.n_dim_sys, self.n_dim_sys),
# dtype = self.dtype)
# A = self.xp.zeros((self.n_dim_sys, self.n_dim_sys), dtype = self.dtype)
# self.x_smooth[-1] = self.x_filt[-1]
# self.V_smooth[-1] = self.V_filt[-1]
# # t in [0, T-2] (notice t range is reversed from 1~T)
# for t in reversed(range(T - 1)) :
# # visualize calculating times
# print("\r smooth calculating... t={}".format(T - t)
# + "/" + str(T), end="")
# # extract parameters for time t
# F = _last_dims(self.F, t, 2)
# # calculate fixed interval smoothing gain
# A = self.xp.dot(self.V_filt[t], self.xp.dot(F.T, self.xp.linalg.pinv(self.V_pred[t + 1])))
# # fixed interval smoothing
# self.x_smooth[t] = self.x_filt[t] \
# + self.xp.dot(A, self.x_smooth[t + 1] - self.x_pred[t + 1])
# self.V_smooth[t] = self.V_filt[t] \
# + self.xp.dot(A, self.xp.dot(self.V_smooth[t + 1] - self.V_pred[t + 1], A.T))
# def get_smoothed_value(self, dim = None):
# """Get RTS smoothed value
# Args:
# dim {int} : dimensionality for extract from RTS smoothed result
# Returns (numpy-array, float)
# : mean of hidden state at time t given observations
# from times [0...T]
# """
# # if not implement `smooth`, implement `smooth`
# try :
# self.x_smooth[0]
# except :
# self.smooth()
# if dim is None:
# return self.x_smooth
# elif dim <= self.x_smooth.shape[1]:
# return self.x_smooth[:, int(dim)]
# else:
# raise ValueError('The dim must be less than '
# + self.x_smooth.shape[1] + '.')
| 2.140625 | 2 |
__init__.py | pslustig/galfitwrap | 0 | 12793227 | <gh_stars>0
from .galaxywrap import *
| 1.21875 | 1 |
lib/spot-2.8.1/tests/python/sum.py | AlessandroCaste/SynkrisisJupyter | 0 | 12793228 | <filename>lib/spot-2.8.1/tests/python/sum.py
# -*- mode: python; coding: utf-8 -*-
# Copyright (C) 2017, 2018 Laboratoire de Recherche et Développement de l'Epita
#
# This file is part of Spot, a model checking library.
#
# Spot is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Spot is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import spot
import sys
import itertools
# make sure that we are not allowed to build the sum of two automata with
# different dictionaries.
aut1 = spot.translate('Xa')
aut2 = spot.translate('Xb', dict=spot.make_bdd_dict())
try:
spot.sum(aut1, aut2)
exit(2)
except RuntimeError:
pass
opts = spot.option_map()
opts.set('output', spot.randltlgenerator.LTL)
opts.set('tree_size_min', 15)
opts.set('tree_size_max', 15)
opts.set('wf', False)
opts.set('seed', 0)
opts.set('simplification_level', 0)
spot.srand(0)
rg = spot.randltlgenerator(2, opts)
dict = spot.make_bdd_dict()
def produce_phi(rg, n):
phi = []
while len(phi) < n:
f = rg.next()
if f.is_syntactic_persistence():
phi.append(f)
return phi
phi1 = produce_phi(rg, 1000)
phi2 = produce_phi(rg, 1000)
inputres = []
aut = []
for p in zip(phi1, phi2):
inputres.append(spot.formula.Or(p))
a1 = spot.ltl_to_tgba_fm(p[0], dict)
a2 = spot.ltl_to_tgba_fm(p[1], dict)
aut.append(spot.to_generalized_buchi(
spot.remove_alternation(spot.sum(a1, a2), True)))
for p in zip(aut, inputres):
assert p[0].equivalent_to(p[1])
aut = []
inputres = []
for p in zip(phi1, phi2):
inputres.append(spot.formula.And(p))
a1 = spot.ltl_to_tgba_fm(p[0], dict)
a2 = spot.ltl_to_tgba_fm(p[1], dict)
aut.append(spot.to_generalized_buchi(
spot.remove_alternation(spot.sum_and(a1, a2), True)))
for p in zip(aut, inputres):
assert p[0].equivalent_to(p[1])
| 2.609375 | 3 |
server/resources/api/__init__.py | Saakshaat/umass-match | 0 | 12793229 | from fastapi import APIRouter
main_router = APIRouter()
from resources.db import session_dependency
session_dep = session_dependency()
@main_router.get("/", status_code=200)
async def root():
return {"msg": "Welcome to UMass Match!"}
from .user import user_router
from .match import match_router
# add individual routers to top-level router
main_router.include_router(user_router)
main_router.include_router(match_router)
| 2.265625 | 2 |
attention_model.py | qiaoliuhub/AttnToCrispr | 8 | 12793230 | import torch.nn as nn
from torch import cat, transpose
import torch
import torch.nn.functional as F
from Layers import EncoderLayer, DecoderLayer
from Sublayers import Norm, OutputFeedForward
import copy
import attention_setting
import numpy as np
import crispr_attn
import math
import OT_crispr_attn
import sys
import importlib
import pdb
# Setting the correct config file
config_path = ".".join(["models", sys.argv[1]]) + "." if len(sys.argv) >= 2 else ""
config = importlib.import_module(config_path + "config")
attention_setting = importlib.import_module(config_path+"attention_setting")
def get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class Encoder(nn.Module):
def __init__(self, d_input, d_model, N, heads, dropout):
super().__init__()
self.N = N
self.layers = get_clones(EncoderLayer(d_input, d_model, heads, dropout), N)
self.norm = nn.LayerNorm(d_model)
def forward(self, src, mask=None):
x = src
for i in range(self.N):
x = self.layers[i](x, mask)
return self.norm(x) if attention_setting.attention_layer_norm else x
class Decoder(nn.Module):
def __init__(self, d_input, d_model, N, heads, dropout):
super().__init__()
self.N = N
self.layers = get_clones(DecoderLayer(d_input, d_model, heads, dropout), N)
self.norm = nn.LayerNorm(d_model)
def forward(self, trg, e_outputs, src_mask=None, trg_mask=None):
x = trg
for i in range(self.N):
x = self.layers[i](x, e_outputs, src_mask, trg_mask)
return self.norm(x) if attention_setting.attention_layer_norm else x
class Transformer(nn.Module):
def __init__(self, d_input, d_model, n_feature_dim, N, heads, dropout, extra_length):
super().__init__()
self.encoder = Encoder(n_feature_dim, d_model, N, heads, dropout)
self.decoder = Decoder(n_feature_dim, d_model, N, heads, dropout)
#self.linear = nn.Linear()
self.cnn = customized_CNN()
assert not attention_setting.add_seq_cnn or not attention_setting.add_parallel_cnn
if attention_setting.add_seq_cnn:
d_input = 64 * (((d_input + 2) // 2 + 2) // 2)
if attention_setting.analysis == 'deepCrispr':
d_model += 4
extra_length = 0
if attention_setting.add_parallel_cnn:
d_input_1 = d_input * d_model
d_input_2 = ((64 * (((d_input + 2) // 2 + 2) // 2)) * config.embedding_vec_dim)
d_input = d_input_1 + d_input_2
d_model = 1
self.out = OutputFeedForward(d_model, d_input, extra_length, d_layers=attention_setting.output_FF_layers, dropout=dropout)
def forward(self, src, trg, extra_input_for_FF=None, src_mask=None, trg_mask=None):
e_outputs = self.encoder(src, src_mask)
# print("DECODER")
d_output = self.decoder(trg, e_outputs, src_mask, trg_mask)
if attention_setting.add_seq_cnn:
if extra_input_for_FF is not None and attention_setting.analysis == 'deepCrispr':
bs = extra_input_for_FF.size(0)
extra_input_for_FF = extra_input_for_FF.view(bs, -1, 4)
d_output = cat((d_output, extra_input_for_FF), dim = 2)
d_output = torch.unsqueeze(d_output, 1)
d_output = self.cnn(d_output)
flat_d_output = d_output.view(-1, d_output.size(-2)*d_output.size(-1))
if attention_setting.add_parallel_cnn:
src = torch.unsqueeze(src, 1)
inter_output = self.cnn(src).view(src.size(0), -1)
flat_d_output = cat((inter_output, flat_d_output),dim=1)
if extra_input_for_FF is not None and attention_setting.analysis != 'deepCrispr':
flat_d_output = cat((flat_d_output, extra_input_for_FF), dim=1)
output = self.out(flat_d_output)
return output
class customized_CNN(nn.Module):
def __init__(self):
super().__init__()
self.cnn_1 = nn.Conv2d(1, 32, kernel_size=(3,1), padding=(1,0))
self.maxpool_1 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0))
self.cnn_2 = nn.Conv2d(32, 64, kernel_size=(3,1), padding=(1,0))
if config.seq_len == 22:
self.maxpool_2 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0))
else:
self.maxpool_2 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0))
self.dropout = nn.Dropout(p = attention_setting.cnn_dropout)
def forward(self, input):
x = self.maxpool_1(self.cnn_1(input))
x = F.relu(x)
x = self.maxpool_2(self.cnn_2(x))
x = F.relu(x)
x = x.contiguous().view(x.size(0), -1, x.size(-1) * x.size(-2))
return x
class OTembeddingTransformer(nn.Module):
def __init__(self, embedding_vec_dim, d_model, N, heads, dropout, feature_len_map, classifier=False):
super().__init__()
self.feature_len_map = feature_len_map
extra_length = 0 if self.feature_len_map[-1] is None else self.feature_len_map[-1][1] - self.feature_len_map[-1][0]
d_input = self.feature_len_map[0][1] - self.feature_len_map[0][0]
self.transformer = Transformer(d_input, d_model, embedding_vec_dim, N, heads, dropout, extra_length)
self.embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim)
self.trg_embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim)
self.embedding_pos = nn.Embedding(d_input, embedding_vec_dim)
self.trg_embedding_pos = nn.Embedding(d_input, embedding_vec_dim)
self.dropout = nn.Dropout(p=config.dropout)
self.classifier = classifier
def forward(self, input, src_mask=None, trg_mask=None):
src = input[:,self.feature_len_map[0][0]: self.feature_len_map[0][1]].long()
embedded_src = self.embedding(src)
bs = src.size(0)
pos_len = src.size(1)
pos = torch.from_numpy(np.array([[i for i in range(pos_len)] for _ in range(bs)]))
pos = pos.to(OT_crispr_attn.device2)
embedded_pos = self.embedding_pos(pos)
embedded_src = embedded_pos + embedded_src
if self.feature_len_map[1] is not None:
trg = input[:, self.feature_len_map[1][0]:self.feature_len_map[1][1]].long()
else:
trg = src
embedded_trg = self.trg_embedding(trg)
embedded_pos_trg = self.trg_embedding_pos(pos)
embedded_trg = embedded_pos_trg + embedded_trg
embedded_src = self.dropout(embedded_src)
embedded_trg = self.dropout(embedded_trg)
extra_input_for_FF = None
if self.feature_len_map[2] is not None:
extra_input_for_FF = input[:, self.feature_len_map[2][0]: self.feature_len_map[2][1]]
output = self.transformer(embedded_src, embedded_trg, extra_input_for_FF=extra_input_for_FF,
src_mask=src_mask, trg_mask=trg_mask)
if self.classifier:
# output = F.log_softmax(output, dim = -1)
#output = F.softmax(output, dim = -1)
pass
return output
class EmbeddingTransformer(Transformer):
def __init__(self, embedding_vec_dim , d_input, d_model, N, heads, dropout, extra_length):
super().__init__(d_input, d_model, embedding_vec_dim, N, heads, dropout, extra_length)
self.embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim)
self.embedding_2 = nn.Embedding(config.embedding_voca_size, embedding_vec_dim)
self.trg_embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim)
self.embedding_pos = nn.Embedding(config.seq_len - config.word_len + 1, embedding_vec_dim)
self.trg_embedding_pos = nn.Embedding(config.seq_len - config.word_len + 1, embedding_vec_dim)
self.dropout = nn.Dropout(p = config.dropout)
def forward(self, src, trg = None, extra_input_for_FF=None, src_mask=None, trg_mask=None):
if config.sep_len != 0:
src_1 = src[:,:config.sep_len]
src_2 = src[:, config.sep_len:]
embedded_src = self.embedding(src_1)
embedded_src_2 = self.embedding_2(src_2)
embedded_src = cat(tuple([embedded_src, embedded_src_2]), dim=1)
else:
embedded_src = self.embedding(src)
bs = src.size(0)
pos_length = config.seq_len - config.seq_start - config.word_len + 1
pos = torch.from_numpy(np.array([[i for i in range(pos_length)] for _ in range(bs)]))
pos = pos.to(crispr_attn.device2)
embedded_src_pos = self.embedding_pos(pos)
embedded_src_1 = embedded_src + embedded_src_pos
embedded_src_2 = self.dropout(embedded_src_1)
if trg is not None:
embedded_trg = self.trg_embedding(trg)
embedded_trg_pos = self.trg_embedding_pos(pos)
embedded_trg_1 = embedded_trg + embedded_trg_pos
embedded_trg_2 = self.dropout(embedded_trg_1)
else:
embedded_trg_2 = embedded_src_2
#embedded_src_2 = transpose(embedded_src_2, 1, 2)
output = super().forward(embedded_src_2, embedded_trg_2, extra_input_for_FF)
return output
def get_OT_model(feature_len_map, classifier = False):
assert attention_setting.d_model % attention_setting.attention_heads == 0
assert attention_setting.attention_dropout < 1
if not classifier:
model = OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model,
attention_setting.n_layers, attention_setting.attention_heads,
attention_setting.attention_dropout, feature_len_map)
else:
model = OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model,
attention_setting.n_layers, attention_setting.attention_heads,
attention_setting.attention_dropout, feature_len_map, classifier = True)
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
def get_model(inputs_lengths=None, d_input = 20):
assert attention_setting.d_model % attention_setting.attention_heads == 0
assert attention_setting.attention_dropout < 1
#model = Transformer(d_input, attention_setting.d_model, attention_setting.n_feature_dim, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout)
extra_feature_length = len(config.extra_categorical_features + config.extra_numerical_features)
model = EmbeddingTransformer(attention_setting.n_feature_dim, d_input, attention_setting.d_model,
attention_setting.n_layers, attention_setting.attention_heads,
attention_setting.attention_dropout, extra_feature_length)
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
# if attention_setting.device == 0:
# model = model.cuda()
return model
| 2.375 | 2 |
main_deep_feat_select_mlp_l21norm.py | yifeng-li/DECRES | 42 | 12793231 | <gh_stars>10-100
#!/usr/bin/env python
"""
An example of running DFS for MLP based on L2,1-norm.
<NAME>
NRC, Ottawa
Aug. 06, 2015
Contact: <EMAIL>
"""
#qsub -l procs=1,pmem=2000mb,walltime=36:00:00 -r n -N main_DFS_feature_selection_l21norm -o main_DFS_feature_selection_l21norm.out -e main_DFS_feature_selection_l21norm.err -M <EMAIL> -m bea main_deep_feat_select_mlp_l21norm.py
import os
#os.environ['THEANO_FLAGS']='device=cpu,base_compile=/var/tmp'
import sys
import time
import numpy
import deep_feat_select_mlp_l21norm
import classification as cl
from gc import collect as gc_collect
numpy.warnings.filterwarnings('ignore') # Theano causes some warnings
numpy.set_printoptions(threshold=numpy.nan)
numpy.set_printoptions(precision=4)
# taking the input parameters
#cell=sys.argv[1] # cell type
#wid=sys.argv[2] # window size
# load data
"""
A data set includes three files:
[1]. A TAB seperated txt file, each row is a sample, each column is a feature.
No row and columns allowd in the txt file.
If an original sample is a matrix (3-way array), a row of this file is actually a vectorized sample,
by concatnating the rows of the original sample.
[2]. A txt file including the class labels.
Each row is a string (white space not allowed) as the class label of the corresponding row in [1].
[3]. A txt file including the name of features.
Each row is a string (white space not allowed) as the feature name of the corresponding column in [1].
"""
path="/home/yifengli/prog/my/DECRES/"
os.chdir(path)
data_dir="/home/yifengli/prog/my/DECRES/data/"
result_dir="/home/yifengli/prog/my/DECRES/result/"
#cells=["GM12878","HepG2","K562","HelaS3","HUVEC","A549","MCF7","HMEC"]
#wids=[200,500,1000,2000,4000]
cells=["HelaS3"]
wids=[200]
for cell in cells:
for wid in wids:
filename=data_dir + cell + "_" + str(wid) + "bp_Data.txt";
data=numpy.loadtxt(filename,delimiter='\t',dtype='float32')
filename=data_dir + cell + "_" + str(wid) + "bp_Classes.txt";
classes=numpy.loadtxt(filename,delimiter='\t',dtype=object)
filename=data_dir+ cell + "_Features.txt"
features=numpy.loadtxt(filename,delimiter='\t',dtype=object)
given=["A-E","I-E","A-P","I-P","A-X","I-X","UK"]
#given=["A-E","I-E"]
#given=["A-P","I-P"]
#given=["A-E","A-P"]
#given=["A-E","A-X"]
#given=["A-P","A-X"]
#given=["A-E","A-P","A-X"]
#given=["A-E","I-E","A-P","I-P"]
#given=["A-E","I-E","A-P","I-P","A-X","I-X"]
#given=["I-E","I-P"]
data,classes,_=cl.take_some_classes(data,classes,given=given,others=None)
# balance the sample sizes of the classes
rng=numpy.random.RandomState(1000)
data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng)
print data.shape
print numpy.unique(classes)
#group=[["A-E"],["I-E"],["A-P"],["I-P"],["A-X"],["I-X"],["UK"]]
#group=[["A-E","A-P"],["I-E","I-P","A-X","I-X","UK"]]
#group=[["A-E","A-P","A-X"],["I-E","I-P","I-X","UK"]]
group=[["A-E"],["A-P"],["I-E","I-P","A-X","I-X","UK"]]
#group=[["A-E"],["A-P"],["A-X"],["I-E","I-P","I-X","UK"]]
#group=[["A-E"],["I-E"]]
#group=[["A-P"],["I-P"]]
#group=[["A-E"],["A-P"]]
#group=[["A-E"],["A-X"]]
#group=[["A-P"],["A-X"]]
#group=[["A-E"],["A-P"],["A-X"]]
#group=[["A-E","I-E"],["A-P","I-P"]]
#group=[["A-E","A-P"],["I-E","I-P"]]
#group=[["A-E","I-E"],["A-P","I-P"],["A-X","I-X"]]
#group=[["A-E","A-P","A-X"],["I-E","I-P","I-X"]]
#group=[["I-E"],["I-P"]]
classes=cl.merge_class_labels(classes,group)
print numpy.unique(classes)
classes_unique,classes=cl.change_class_labels(classes)
print numpy.unique(classes)
# set random state
#numpy.random.seed(1000)
rng=numpy.random.RandomState(2000)
data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng)
print data.shape
print numpy.unique(classes)
# partition the data
train_set_x_org,train_set_y_org,valid_set_x_org,valid_set_y_org,test_set_x_org,test_set_y_org=cl.partition_train_valid_test(data,classes,ratio=(2,1,1),rng=rng)
# normalization
train_set_x_org,data_min,data_max=cl.normalize_col_scale01(train_set_x_org,tol=1e-10)
valid_set_x_org,_,_=cl.normalize_col_scale01(valid_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max)
test_set_x_org,_,_=cl.normalize_col_scale01(test_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max)
# train
# setting the parameter
learning_rate=0.1
alpha=0.1
alpha1=0.01
alpha2=0.0
n_hidden=[128,16]#[256,64,16]
n_epochs=1000
batch_size=200
activation_func='relu'
if cell=="GM12878":
lambda21s=numpy.arange(0.020,-0.0001,-0.0001)
if cell=="HepG2":
lambda21s=numpy.arange(0.028,-0.0001,-0.0001)
if cell=="HelaS3":
lambda21s=numpy.arange(0.028,-0.0001,-0.0001)
if cell=="K562":
lambda1s=numpy.arange(0.025,-0.0001,-0.0001)
lambda21s=[0.02]#numpy.arange(0.04,-0.001,-0.001)
features_selected=[]
weights_selected=[]
weights=[]
perfs=[]
for i in range(len(lambda21s)):
lambda21=lambda21s[i]
classifier,training_time=deep_feat_select_mlp_l21norm.train_model(train_set_x_org=train_set_x_org, train_set_y_org=train_set_y_org,
valid_set_x_org=valid_set_x_org, valid_set_y_org=valid_set_y_org,
learning_rate=learning_rate, alpha=alpha, lambda21=lambda21,
alpha1=alpha1, alpha2=alpha2, n_hidden=n_hidden,
n_epochs=n_epochs, batch_size=batch_size, activation_func=activation_func, rng=rng,
max_num_epoch_change_learning_rate=50, max_num_epoch_change_rate=0.8, learning_rate_decay_rate=0.8)
param0=classifier.params[0].get_value()
param1=classifier.params[1].get_value()
row_sum=numpy.sqrt((param0**2).sum(axis=1))
max_param=numpy.max(row_sum)
selected=row_sum>(max_param*0.001)
features_selected.append(features[selected])
weights_selected.append(row_sum[selected])
print row_sum
print 'Number of select variables:', sum(selected)
print features[selected]
#print param0[selected]
#print param0
#numpy.savetxt('param0.txt',param0,delimiter='\t',fmt='%.4e')
print abs(param0).sum(axis=1)
weights.append(row_sum)
# test
test_set_y_pred,test_set_y_pred_prob,test_time=deep_feat_select_mlp_l21norm.test_model(classifier, test_set_x_org, batch_size=200)
print test_set_y_pred[0:20]
print test_set_y_pred_prob[0:20]
print test_time
perf,conf_mat=cl.perform(test_set_y_org,test_set_y_pred,numpy.unique(train_set_y_org))
perfs.append(perf)
print perf
print conf_mat
perfs=numpy.asarray(perfs)
save_dir=result_dir + "_".join(classes_unique)
try:
os.makedirs(save_dir)
except OSError:
pass
# save the weights
filename=save_dir + '/' + cell + "_" + str(wid) + "bp.txt"
#cl.write_feature_weight(weights,features,lambda21s,filename)
filename=save_dir + '/' + cell + "_" + str(wid) + "bp_unique_yes.txt"
# save the features, lambdas, accuracies
#cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=True,tol=1e-3,filename=filename)
filename=save_dir + '/' + cell + "_" + str(wid) + "bp_unique_no.txt"
#cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=False,tol=1e-3,filename=filename)
#save_dir=result_dir + "_".join(classes_unique)
#filename=cell + "_" + str(wid) + "bp.txt"
#cl.save_perform(save_dir,filename,perf=perf,std=None,conf_mat=conf_mat,classes_unique=classes_unique,training_time=training_time,test_time=test_time)
gc_collect()
| 2.515625 | 3 |
client/hotbox.py | odontomachus/hotbox | 0 | 12793232 | <filename>client/hotbox.py
if __name__ == "__main__":
from gui import App
app = App()
app.mainloop()
| 1.695313 | 2 |
library/example-exceptions.py | kensoi/testcanarybot | 0 | 12793233 | import random
from testcanarybot import objects
from testcanarybot import exceptions
# Copyright 2021 kensoi
class Main(objects.libraryModule):
@objects.priority(commands = ['quit']) # @testcanarybot quit
async def second(self, tools: objects.tools, package: objects.package):
await tools.api.messages.send(
random_id = tools.gen_random(),
peer_id = package.peer_id,
message = 'выхожу из фреймворка...'
)
raise exceptions.Quit("test") # -> to finish your framework (closing all projects that was launched by tppm)
@objects.priority(commands = ['lib_reload']) # @testcanarybot lib_reload
async def second2(self, tools: objects.tools, package: objects.package):
await tools.api.messages.send(
random_id = tools.gen_random(),
peer_id = package.peer_id,
message = 'перезагружаю...'
)
raise exceptions.LibraryReload("Reload") # -> framework will reload your library | 2.296875 | 2 |
src/kivy_garden/qrcode/version.py | kivy-garden/qrcode | 7 | 12793234 | __version__ = '2021.0314'
| 1.0625 | 1 |
neuron_models/rvlm_model.py | joetaylor94/reservoir-computing | 1 | 12793235 | '''
<NAME> (<EMAIL>)
Department of Physics
University of Bath, UK
May 1st, 2020
Conductance model of an RVLM neuron for use with reservoir computing
using a modified Hodgkin-Huxley framework of ion channel gating.
Model parameters are chosen so as to replicate the behaviour of
the thalamocortical relay neuron presented in Huguenard J, McCormick DA,
Shepherd GM (1997) 'Electrophysiology of the Neuron'.
The neuron model consists of three ionic currents: a passive leak current,
a transient sodium current (NaT), and a potassium current (K). The sodium
current is controlled by an activation gating variable (m) and an
inactivation gating variable (h). The potassium channel is non-inactivating
and is controlld by a single activation gating variable (n).
The full model state x comprises four state variables - the membrane voltage
and the three gating varibales m, h, and n, and is thus described as:
x = [V,m,h,n]
The only state variable that it is possible to measure experimentally is the
membrane voltage. This is the state variable output by the python script.
'''
import scipy as sp
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
# Define constants
TEMP_C = 35
FARADAY = 96480
PI = 3.14159265359
# Model duration (ms)
T = 7400
dt = 0.025
# Generate array of time points, from zero to T
t = np.arange(0,T,dt)
##############################################################################
# Model Equations of Motion
##############################################################################
# Define functions for gating kinetics of ion channels
# Effect of temperature is accounted for by the Q10 coeff
def mm_inf(VV): return 0.5*(1 + sp.tanh((VV - amV1)/ amV2))
def mm_tau(VV): return (tm0 + epsm*(1 - sp.tanh((VV - amV1)/ amV3)*sp.tanh((VV - amV1)/ amV3))) / 3.0**((TEMP_C-23.5)/10)
def hh_inf(VV): return 0.5*(1 + sp.tanh((VV - ahV1)/ ahV2))
def hh_tau(VV): return (th0 + epsh*(1 - sp.tanh((VV - ahV1)/ ahV3)*sp.tanh((VV - ahV1)/ ahV3))) / 3.0**((TEMP_C-23.5)/10)
def nn_inf(VV): return 0.5*(1 + sp.tanh((VV - anV1)/ anV2))
def nn_tau(VV): return (tn0 + epsn*(1 - sp.tanh((VV - anV1)/ anV3)*sp.tanh((VV - anV1)/ anV3))) / 3.0**((TEMP_C-23.5)/10)
# Define functions for ionic currents (in uA/cm^2)
# Currents correspond to passive leak, delayed-rectifier potassium,
# and transient sodium currents
def I_Leak(VV): return gLeak * (VV - EL)
def I_K(VV,nn): return gK * nn**4 * (VV - EK)
def I_NaT(VV,mm,hh): return gNaT * mm**3 * hh * (VV - ENa)
# Define equations of motion for full neuron state x = [V,m,h,n]
# Use idx to read in correct current stimulation data point
# Function reads in system state and returns its derivative
def dXdt(X,t):
VV, mm, hh, nn, idx = X
soma_area = soma_len*soma_diam*PI
idx = int(t/dt)
dVVdt = (-(I_NaT(VV,mm,hh) + I_K(VV,nn) + I_Leak(VV)) + (i_inj(t) + stim[idx])/soma_area) / Cm
dmmdt = (mm_inf(VV) - mm)/mm_tau(VV)
dhhdt = (hh_inf(VV) - hh)/hh_tau(VV)
dnndt = (nn_inf(VV) - nn)/nn_tau(VV)
return dVVdt, dmmdt, dhhdt, dnndt, idx
##############################################################################
# Model Parameters
##############################################################################
# Soma dimensions (cm)
soma_len = 0.01
soma_diam = 0.029/PI
# Define model parameters
# conductances: gX; reversal potentials: EX;
# thresholds: aXV1; membrane capacitance: Cm;
# time constants: tx0, epsx
Cm = 1
gNaT = 69
ENa = 41
gK = 6.9
EK = -100
EL = -65
gLeak = 0.465
amV1 = -39.92
amV2 = 10
amV3 = 23.39
tm0 = 0.143
epsm = 1.099
ahV1 = -65.37
ahV2 = -17.65
ahV3 = 27.22
th0 = 0.701
epsh = 12.90
anV1 = -34.58
anV2 = 22.17
anV3 = 23.58
tn0 = 1.291
epsn = 4.314
##############################################################################
# Preparing current stimulation to be injected into the neuron
##############################################################################
# Function for injected a current step (uA/cm^2)
# Args: amplitude, init time, final time
def i_inj(t):
return amp*(t>t_i) - amp*(t>t_f)
# Function for loading current injection protocol (uA/cm^2)
# Args: file path, amplitude scale (default = 0.02), sample every 'n'th point
def load_stim(name, scale, n):
stim = []
with open(name, "r") as ins:
count = 0
for line in ins:
count+=1
if count % n == 0:
stim.append(scale*(float(line.rstrip('\n'))))
ins.close()
return stim
# Initialise stim or load external stimulation files
# If not loading in external stim, uncomment line below
#stim = np.zeros(int(2*T/dt))
stim = load_stim('stim_files/Pstandard_100khz_0.dat', 0.02, 20)
stim += load_stim('stim_files/Pstandard_100khz_1.dat', 0.02, 20)
stim += load_stim('stim_files/Pstandard_100khz_2.dat', 0.02, 20)
stim += load_stim('stim_files/Pstandard_100khz_3.dat', 0.02, 20)
stim += load_stim('stim_files/Pstandard_100khz_4.dat', 0.02, 20)
# Current step (uA/cm^2)
# Define amplitude, init time and end time
amp = 0 #0.003
t_i = 100
t_f = 300
##############################################################################
# Initializing the neuron model
##############################################################################
# Initialize state variable values for t=0: x(0) = [V(0),m(0),h(0),n(0)]
# Default vals correspond to neuron at steady-state resting potential
# Final value in the init array is idx (starts at 0)
init = [-65,0.00742,0.47258,0.06356,0]
##############################################################################
# Running model: forward-integrating the equations of motion
##############################################################################
# Integrate model equations
# Arguments: state derivative, initial neuron state x(0), time point array
X = odeint(dXdt, init, t)
# Define variables to simplify analysis
VV = X[:,0]
mm = X[:,1]
hh = X[:,2]
nn = X[:,3]
# Adding Gaussian error to voltage trace (mV)
sigma_obs = 0.1
obs_error = np.random.normal(0, sigma_obs, len(VV))
VV_obs = VV + obs_error
##############################################################################
# Plotting and saving model output
##############################################################################
# Define total current
stimulation = stim[0:len(VV)] + i_inj(t)
# Plotting membrane voltage and stimulation time series
plt.subplot(2,1,1)
plt.plot(t,VV_obs,'k',linewidth=0.8)
plt.ylabel("Membrane Potential (mV)")
plt.subplot(2,1,2)
plt.ylabel("Current (uA)")
plt.plot(t,stimulation,'b',linewidth=0.8)
plt.show()
# Save voltage data (without gaussian noise)
f = open('output/voltage_clean.csv', 'w')
for i in range(int(len(VV))):
f.write('%f \n' % VV[i])
f.close()
# Save voltage data (with gaussian noise)
f = open('output/voltage.csv', 'w')
for i in range(int(len(VV))):
f.write('%f \n' % VV_obs[i])
f.close()
# Save current stimulation data
f = open('output/stimulation.csv', 'w')
for i in range(int(len(VV))):
f.write('%f\n' % stimulation[i])
f.close()
| 2.90625 | 3 |
firetail/extensions/fleet_up/__init__.py | evidex/Firetail | 29 | 12793236 | <filename>firetail/extensions/fleet_up/__init__.py
from .fleet_up import FleetUp
def setup(bot):
bot.add_cog(FleetUp(bot))
| 1.570313 | 2 |
byte-adder.py | sandeshpoudel995/Byte-adder | 0 | 12793237 | # byte-adder.py
# A simple python program which simulates the behaviour of a digital circuit performing integer addition.
# It adds two 8 bit binary numbers using different logical gates.
import sys
import os
#defining input
def Input():
upper_bit_int = int(input("Enter \'first integer\' from 0 to 255 : ")) # first input from the user
lower_bit_int= int(input("Enter \'second integer\' from 0 to 255 : ")) # second input from the user
return(upper_bit_int, lower_bit_int)
# defining and gate
def andGate(bitOne, bitTwo):
return bitOne & bitTwo
# defining or gate
def orGate(bitOne, bitTwo):
return bitOne | bitTwo
# defining not gate
def compliment(bitValue):
return ~bitValue
# defining xor gate
def xorGate(bitOne, bitTwo):
return orGate(andGate(bitOne, compliment(bitTwo)), andGate(compliment(bitOne), bitTwo))
# calculating carry
def calculateCarry(a, b, c, d):
return orGate(andGate(a,b), andGate(c,d))
# performing bit operation
def bitOperation(upper_bit, lower_bit):
result = []
carry = 0
for index in range(len(upper_bit)):
after_xor_cal = xorGate(upper_bit[index], lower_bit[index])
result.append(xorGate(after_xor_cal, carry))
carry = calculateCarry(upper_bit[index], lower_bit[index], after_xor_cal, carry)
result.append(carry)
return list(reversed(result))
# Final processing and printing the sum
def Output():
upper_bit_int, lower_bit_int = Input()
if 255>=upper_bit_int>=0 and 255>=lower_bit_int>=0: #checking the input range min=0 and max=255
print('calculating sum...')
else :
print('Error: Input is not in range')
print('Restarting...')
print()
del upper_bit_int
del lower_bit_int
Output()
upper_bit = [int(x) for x in list('{:08b}'.format(upper_bit_int))]
lower_bit = [int(x) for x in list('{:08b}'.format(lower_bit_int))]
# output of bit operation
result = bitOperation(list(reversed(upper_bit)), list(reversed(lower_bit)))
final_sum_bin = ''.join(str(e) for e in result)
final_sum_int = int(''.join(str(e) for e in result),2)
print('Integer sum using bit operation = ', final_sum_int ,' In Binary = ',final_sum_bin)
print()
# loop for adding two integers again and again unless user exits
quit = input("Do you want to add again Y/N : ")
if quit.lower() == 'n':
exit()
elif quit.lower() == 'y':
print()
Output()
print('This is a python program which simulates the behaviour of a digital circuit performing integer addition.')
print()
Output()
| 4.28125 | 4 |
bin/premcce.py | newbooks/Develop-MCCE | 0 | 12793238 | #!/usr/bin/env python
import sys
from pymccelib import *
import logging
logging.basicConfig(level=logging.DEBUG, format='%(levelname)-s: %(message)s')
if __name__ == "__main__":
env.init()
prot = Protein()
prot.load_nativepdb(env.prm["INPDB"])
# identify N and C terminal
if env.prm["TERMINALS"].upper() == "T":
prot.identify_nc()
# remove exposed water
# Disulfide bridge
lines = prot.pdblines()
open(env.fn_step1_out,"w").writelines(lines)
| 2.15625 | 2 |
kunquat/tracker/ui/model/orderlistmanager.py | kagu/kunquat | 13 | 12793239 | # -*- coding: utf-8 -*-
#
# Author: <NAME>, Finland 2015
#
# This file is part of Kunquat.
#
# CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/
#
# To the extent possible under law, Kunquat Affirmers have waived all
# copyright and related or neighboring rights to Kunquat.
#
class OrderlistManager():
def __init__(self):
self._controller = None
self._session = None
def set_controller(self, controller):
self._controller = controller
self._session = controller.get_session()
def set_orderlist_selection(self, selection):
self._session.set_orderlist_selection(selection)
def get_orderlist_selection(self):
return self._session.get_orderlist_selection()
| 1.84375 | 2 |
src/phat/learn/__init__.py | rskene/phat | 2 | 12793240 | from .phatnet import PhatNet, PhatLoss, PhatMetric
from .utils import DataSplit
| 1.070313 | 1 |
DeepLearningArchitectures/UNet/models/unet1.py | SCCH-KVS/NuclearSegmentationPipeline | 13 | 12793241 | #!/usr/bin/env python
import lasagne
from lasagne.layers.conv import Conv2DLayer as Conv2DLayer
from lasagne.layers import MaxPool2DLayer, ConcatLayer, TransposedConv2DLayer
from lasagne.nonlinearities import elu, sigmoid, rectify
from lasagne.layers import batch_norm
from lasagne_wrapper.network import SegmentationNetwork
from lasagne_wrapper.training_strategy import get_binary_segmentation_TrainingStrategy,get_categorical_segmentation_TrainingStrategy
from lasagne_wrapper.batch_iterators import get_batch_iterator
from lasagne_wrapper.learn_rate_shedules import get_stepwise
from lasagne_wrapper.parameter_updates import get_update_momentum
Network = SegmentationNetwork
INPUT_SHAPE = [1, 256, 256]
nonlin = elu
def conv_bn(in_layer, num_filters, filter_size, nonlinearity=rectify, pad='same', name='conv'):
""" convolution block with with batch normalization """
in_layer = Conv2DLayer(in_layer, num_filters=num_filters, filter_size=filter_size,
nonlinearity=nonlinearity, pad=pad, name=name)
in_layer = batch_norm(in_layer)
return in_layer
def build_model():
""" Compile net architecture """
l_in = lasagne.layers.InputLayer(shape=(None, INPUT_SHAPE[0], INPUT_SHAPE[1], INPUT_SHAPE[2]), name='Input')
net1 = batch_norm(l_in)
# --- preprocessing ---
net1 = conv_bn(net1, num_filters=10, filter_size=1, nonlinearity=nonlin, pad='same')
net1 = conv_bn(net1, num_filters=1, filter_size=1, nonlinearity=nonlin, pad='same', name='color_deconv_preproc')
# number of filters in first layer
# decreased by factor 2 in each block
nf0 = 16
# --- encoder ---
net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same')
net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same')
p1 = net1
net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool1')
net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')
net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')
p2 = net1
net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool2')
net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')
net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')
p3 = net1
net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool3')
net1 = conv_bn(net1, num_filters=8 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')
net1 = conv_bn(net1, num_filters=8 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')
# --- decoder ---
net1 = TransposedConv2DLayer(net1, num_filters=4 * nf0, filter_size=2, stride=2, name='upconv')
net1 = ConcatLayer((p3, net1), name='concat')
net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')
net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')
net1 = TransposedConv2DLayer(net1, num_filters=2 * nf0, filter_size=2, stride=2, name='upconv')
net1 = ConcatLayer((p2, net1), name='concat')
net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')
net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')
net1 = TransposedConv2DLayer(net1, num_filters=nf0, filter_size=2, stride=2, name='upconv')
net1 = ConcatLayer((p1, net1), name='concat')
net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same')
net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same')
net1 = Conv2DLayer(net1, num_filters=1, filter_size=1, nonlinearity=sigmoid, pad='same', name='segmentation')
return net1
# prepare training strategy
train_strategy = get_binary_segmentation_TrainingStrategy(batch_size=2, max_epochs=1000, samples_per_epoch=250, patience=300,
ini_learning_rate=0.2, L2=None, use_weights=False,
adapt_learn_rate=get_stepwise(k=1000, factor=0.5),
update_function=get_update_momentum(0.9),
valid_batch_iter=get_batch_iterator(),
train_batch_iter=get_batch_iterator())
| 2.8125 | 3 |
pkg/R/late.py | cnk113/ESCAPE | 0 | 12793242 | #!/usr/bin/python
import sys
import os
import numpy as np
import pandas as pd
import argparse
import tensorflow as tf
from importlib.machinery import SourceFileLoader
import math
import psutil
import time
from scipy.sparse import csr_matrix
import gc
import matplotlib
matplotlib.use('Agg')
import scimpute
def learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec, stage, skip=1):
'''Save mse curves to csv files
Parameters:
-----------
skip:
epoch_log:
mse_batch_vec:
mse_valid_vec:
stage: step1 or step2
'''
print('> plotting learning curves')
scimpute.learning_curve(epoch_log, mse_batch_vec, mse_valid_vec,
title="Learning Curve MSE.{}".format(stage),
ylabel='MSE (X vs Y, nz)',
dir=stage,
skip=skip
)
_ = np.asarray(list(zip(epoch_log, mse_batch_vec, mse_valid_vec)))
_ = pd.DataFrame(data=_,
index=epoch_log,
columns=['Epoch', 'MSE_batch', 'MSE_valid']
).set_index('Epoch')
_.to_csv("./{}/mse.csv".format(stage))
#def learning_curve_mse_nz(skip=1):
def learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, stage, skip=1):
'''Save mse curves to csv files
Parameters:
-----------
skip:
epoch_log:
mse_nz_batch_vec:
mse_nz_valid_vec:
stage:
'''
print('> plotting learning curves')
scimpute.learning_curve(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec,
title="Learning Curve MSE_NZ.{}".format(stage),
ylabel='MSE_NZ (X vs Y, nz)',
dir=stage,
skip=skip
)
_ = np.asarray(list(zip(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec)))
_ = pd.DataFrame(data=_,
index=epoch_log,
columns=['Epoch', 'MSE_NZ_batch', 'MSE_NZ_valid']
).set_index('Epoch')
_.to_csv("./{}/mse_nz.csv".format(stage))
def fast_imputation(sess, h, X, pIn_holder, pHidden_holder, input_data, gene_ids, cell_ids):
'''Calculate /and save/ the snapshot results of the current model on the whole dataset
Parameters:
-----------
'''
Y_input_arr = sess.run(h, feed_dict={X: input_data, pIn_holder: 1, pHidden_holder: 1})
# save sample imputation
Y_input_df = pd.DataFrame(data=Y_input_arr, columns=gene_ids, index=cell_ids)
return Y_input_df
#def save_whole_imputation:
def save_whole_imputation(sess, X, h, a_bottleneck, pIn_holder,pHidden_holder, input_matrix, gene_ids, cell_ids, p, m):
''' calculate and save imputation results for an input matrix at the 'impute' mode. If the number
of cells is larger than a threshold (large_size: 1e5), save results of m//p.sample_size 'folds'.
Parameters
----------
'''
if m > p.large_size:
#impute on small data blocks to avoid high memory cost
n_out_batches = m//p.sample_size
print('num_out_batches:', n_out_batches)
handle2 = open('./{}/latent_code.{}.csv'.format(p.stage, p.stage), 'w')
with open('./{}/imputation.{}.csv'.format(p.stage, p.stage), 'w') as handle:
for i_ in range(n_out_batches+1):
start_idx = i_*p.sample_size
end_idx = min((i_+1)*p.sample_size, m)
print('saving:', start_idx, end_idx)
x_out_batch = input_matrix[start_idx:end_idx, :].todense()
y_out_batch = sess.run(
h,
feed_dict={
X: x_out_batch,
pIn_holder: 1, pHidden_holder: 1
}
)
df_out_batch = pd.DataFrame(
data=y_out_batch,
columns=gene_ids,
index=cell_ids[range(start_idx, end_idx)]
)
latent_code = sess.run(
a_bottleneck,
feed_dict={
X: x_out_batch,
pIn_holder: 1, pHidden_holder: 1
}
)
latent_code_df = pd.DataFrame(
data=latent_code,
index=cell_ids[range(start_idx, end_idx)]
)
if i_ == 0:
df_out_batch.to_csv(handle, float_format='%.6f')
latent_code_df.to_csv(handle2, float_format='%.6f')
print('RAM usage during mini-batch imputation and saving output: ',
'{} M'.format(usage()))
else:
df_out_batch.to_csv(handle, header=None)
latent_code_df.to_csv(handle2, header=None)
handle2.close()
else: # if m the # of cells is less than large_size (1e5))
Y_input_arr = sess.run(h, feed_dict={X: input_matrix.todense(),
pIn_holder: 1, pHidden_holder: 1})
# save sample imputation
Y_input_df = pd.DataFrame(data=Y_input_arr,
columns=gene_ids,
index=cell_ids)
latent_code = sess.run(a_bottleneck, feed_dict={X: input_matrix.todense(),
pIn_holder: 1, pHidden_holder: 1})
latent_code_df = pd.DataFrame(data=latent_code,
index=cell_ids)
print('RAM usage during whole data imputation and saving output: ',
'{} M'.format(usage()))
scimpute.save_hd5(Y_input_df, "{}/imputation.{}.hd5".format(p.stage,
p.stage))
scimpute.save_hd5(latent_code_df, "{}/latent_code.{}.hd5".format(p.stage,
p.stage))
def visualize_weight(sess, stage, w_name, b_name):
w = eval(w_name)
b = eval(b_name)
w_arr = sess.run(w)
b_arr = sess.run(b)
b_arr = b_arr.reshape(len(b_arr), 1)
b_arr_T = b_arr.T
scimpute.visualize_weights_biases(w_arr, b_arr_T,
'{},{}.{}'.format(w_name, b_name, stage),
dir=stage)
def visualize_weights(sess, stage, en_de_layers):
for l1 in range(1, en_de_layers+1):
encoder_weight = 'e_w'+str(l1)
encoder_bias = 'e_b'+str(l1)
visualize_weight(sess, stage, encoder_weight, encoder_bias)
decoder_bias = 'd_b'+str(l1)
decoder_weight = 'd_w'+str(l1)
visualize_weight(sess, stage, decoder_weight, decoder_bias)
def save_weights(sess, stage, en_de_layers):
print('save weights in npy')
for l1 in range(1, en_de_layers+1):
encoder_weight_name = 'e_w'+str(l1)
encoder_bias_name = 'e_b'+str(l1)
decoder_bias_name = 'd_b'+str(l1)
decoder_weight_name = 'd_w'+str(l1)
np.save('{}/{}.{}'.format(stage, encoder_weight_name, stage),
sess.run(eval(encoder_weight_name)))
np.save('{}/{}.{}'.format(stage, decoder_weight_name, stage),
sess.run(eval(decoder_weight_name)))
np.save('{}/{}.{}'.format(stage, encoder_bias_name, stage),
sess.run(eval(encoder_bias_name)))
np.save('{}/{}.{}'.format(stage, decoder_bias_name, stage),
sess.run(eval(decoder_bias_name)))
def usage():
process = psutil.Process(os.getpid())
ram = process.memory_info()[0] / float(2 ** 20)
ram = round(ram, 1)
return ram
# sys.path.append('./bin')
# print('sys.path', sys.path)
#print('python version:', sys.version)
#print('tf.__version__', tf.__version__)
def late_main(p, log_dir, rand_state=3):
##0. read data and extract gene IDs and cell IDs
input_matrix, gene_ids, cell_ids = read_data(p)
##1. split data and save indexes
#input p, input_matrix, cell_ids
#return cell_ids_train, cell_ids_valid, cell_ids_test
m, n = input_matrix.shape
input_train, input_valid, input_test, train_idx, valid_idx, test_idx = \
scimpute.split__csr_matrix(input_matrix, a=p.a, b=p.b, c=p.c)
cell_ids_train = cell_ids[train_idx]
cell_ids_valid = cell_ids[valid_idx]
cell_ids_test = cell_ids[test_idx]
np.savetxt('{}/train.{}_index.txt'.format(p.stage, p.stage), cell_ids_train, fmt='%s')
np.savetxt('{}/valid.{}_index.txt'.format(p.stage, p.stage), cell_ids_valid, fmt='%s')
np.savetxt('{}/test.{}_index.txt'.format(p.stage, p.stage), cell_ids_test, fmt='%s')
print('RAM usage after splitting input data is: {} M'.format(usage()))
# todo: for backward support for older parameter files only
# sample_size is 1000 in default; if sample_size is less than the number of cells (m),
# we reconstruct the training and validation sets by randomly sampling.
try:
p.sample_size
sample_size = p.sample_size
except:
sample_size = int(9e4)
if sample_size < m:
np.random.seed(1)
rand_idx = np.random.choice(
range(len(cell_ids_train)), min(sample_size, len(cell_ids_train)))
sample_train = input_train[rand_idx, :].todense()
sample_train_cell_ids = cell_ids_train[rand_idx]
rand_idx = np.random.choice(
range(len(cell_ids_valid)), min(sample_size, len(cell_ids_valid)))
sample_valid = input_valid[rand_idx, :].todense()
sample_valid_cell_ids = cell_ids_valid[rand_idx]
#?? the following sample_input is a matrix sampled randomly, and should it be a matrix containing
# sample_training and sample_valid
rand_idx = np.random.choice(range(m), min(sample_size, m))
sample_input = input_matrix[rand_idx, :].todense()
sample_input_cell_ids = cell_ids[rand_idx]
del rand_idx
gc.collect()
np.random.seed()
else:
sample_input = input_matrix.todense()
sample_train = input_train.todense()
sample_valid = input_valid.todense()
sample_input_cell_ids = cell_ids
sample_train_cell_ids = cell_ids_train
sample_valid_cell_ids = cell_ids_valid
print('len of sample_train: {}, sample_valid: {}, sample_input {}'.format(
len(sample_train_cell_ids), len(sample_valid_cell_ids), len(sample_input_cell_ids)
))
##2. model training and validation
#2.1 init --> keep this in the main
tf.reset_default_graph()
# define placeholders and variables
X = tf.placeholder(tf.float32, [None, n], name='X_input') # input
pIn_holder = tf.placeholder(tf.float32, name='p.pIn') #keep_prob for dropout
pHidden_holder = tf.placeholder(tf.float32, name='p.pHidden')#keep_prob for dropout
#2.2 define layers and variables
# input p, X, pIn_holder, pHidden_holder, n
# return a_bottleneck, h(d_a1)
a_bottleneck, h = build_late(X, pHidden_holder, pIn_holder, p, n, rand_state = 3)
#2.3 define loss
# input X, h, p
# return mse_nz, mse, reg_term
mse_nz, mse, reg_term = build_metrics(X, h, p.reg_coef)
#2.4 costruct the trainer --> keep this section in the main
optimizer = tf.train.AdamOptimizer(p.learning_rate)
if p.mse_mode in ('mse_omega', 'mse_nz'):
print('training on mse_nz')
trainer = optimizer.minimize(mse_nz + reg_term)
elif p.mse_mode == 'mse':
print('training on mse')
trainer = optimizer.minimize(mse + reg_term)
else:
raise Exception('mse_mode spelled wrong')
#2.5 Init a session accoding to the run_flag
sess = tf.Session()
# restore variables
saver = tf.train.Saver()
if p.run_flag == 'load_saved':
print('*** In TL Mode')
saver.restore(sess, "./step1/step1.ckpt")
elif p.run_flag == 'rand_init':
print('*** In Rand Init Mode')
init = tf.global_variables_initializer()
sess.run(init)
elif p.run_flag == 'impute':
print('*** In impute mode loading "step2.ckpt"..')
saver.restore(sess, './step2/step2.ckpt')
p.max_training_epochs = 0
p.learning_rate = 0.0
## save_whole_imputation
save_whole_imputation(sess, X, h, a_bottleneck, pIn_holder,
pHidden_holder, input_matrix, gene_ids,
cell_ids, p, m)
print('imputation finished')
#toc_stop = time.time()
#print("reading took {:.1f} seconds".format(toc_stop - tic_start))
exit()
else:
raise Exception('run_flag err')
# define tensor_board writer
batch_writer = tf.summary.FileWriter(log_dir + '/batch', sess.graph)
valid_writer = tf.summary.FileWriter(log_dir + '/valid', sess.graph)
# prep mini-batch, and reporter vectors
num_batch = int(math.floor(len(train_idx) // p.batch_size)) # floor
epoch_log = []
mse_nz_batch_vec, mse_nz_valid_vec = [], [] #, mse_nz_train_vec = [], [], []
mse_batch_vec, mse_valid_vec = [], [] # mse = MSE(X, h)
#msej_batch_vec, msej_valid_vec = [], [] # msej = MSE(X, h), for genej, nz_cells
print('RAM usage after building the model is: {} M'.format(usage()))
epoch = 0
#2.6. pre-training epoch (0)
#save imputation results before training steps
print("Evaluation: epoch{}".format(epoch))
epoch_log.append(epoch)
mse_train, mse_nz_train = sess.run([mse, mse_nz], feed_dict={X: sample_train,pHidden_holder: 1.0, pIn_holder: 1.0})
mse_valid, mse_nz_valid = sess.run([mse, mse_nz],feed_dict={X: sample_valid,pHidden_holder: 1.0, pIn_holder: 1.0})
print("mse_nz_train=", round(mse_nz_train, 3), "mse_nz_valid=",round(mse_nz_valid, 3))
print("mse_train=", round(mse_train, 3),"mse_valid=", round(mse_valid, 3))
mse_batch_vec.append(mse_train)
mse_valid_vec.append(mse_valid)
mse_nz_batch_vec.append(mse_nz_train)
mse_nz_valid_vec.append(mse_nz_valid)
#2.7. training epochs (1-)
for epoch in range(1, p.max_training_epochs+1):
tic_cpu, tic_wall = time.clock(), time.time()
ridx_full = np.random.choice(len(train_idx), len(train_idx), replace=False)
#2.7.1 training model on mini-batches
for i in range(num_batch):
# x_batch
indices = np.arange(p.batch_size * i, p.batch_size*(i+1))
ridx_batch = ridx_full[indices]
# x_batch = df1_train.ix[ridx_batch, :]
x_batch = input_train[ridx_batch, :].todense()
sess.run(trainer, feed_dict={X: x_batch,
pIn_holder: p.pIn,
pHidden_holder: p.pHidden})
toc_cpu, toc_wall = time.clock(), time.time()
#2.7.2 save the results of epoch 1 and all display steps (epochs)
if (epoch == 1) or (epoch % p.display_step == 0):
tic_log = time.time()
print('#Epoch {} took: {} CPU seconds; {} Wall seconds'.format(
epoch, round(toc_cpu - tic_cpu, 2), round(toc_wall - tic_wall, 2)
))
print('num-mini-batch per epoch: {}, till now: {}'.format(i+1, epoch*(i+1)))
print('RAM usage: {:0.1f} M'.format(usage()))
# debug
# print('d_w1', sess.run(d_w1[1, 0:4])) # verified when GradDescent used
# training mse and mse_nz of the last batch
mse_batch, mse_nz_batch, h_batch = sess.run(
[mse, mse_nz, h],
feed_dict={X: x_batch, pHidden_holder: 1.0, pIn_holder: 1.0}
)
# validation mse and mse_nz of the sample validation set (1000)
mse_valid, mse_nz_valid, Y_valid = sess.run(
[mse, mse_nz, h],
feed_dict={X: sample_valid, pHidden_holder: 1.0, pIn_holder: 1.0}
)
toc_log = time.time()
print('mse_nz_batch:{}; mse_omage_valid: {}'.
format(mse_nz_batch, mse_nz_valid))
print('mse_batch:', mse_batch, '; mse_valid:', mse_valid)
print('log time for each epoch: {}\n'.format(round(toc_log - tic_log, 1)))
mse_batch_vec.append(mse_batch)
mse_valid_vec.append(mse_valid)
mse_nz_batch_vec.append(mse_nz_batch)
mse_nz_valid_vec.append(mse_nz_valid)
epoch_log.append(epoch)
#2.7.3 save snapshot step
if (epoch % p.snapshot_step == 0) or (epoch == p.max_training_epochs):
tic_log2 = time.time()
#1.save imputation results
#if the input matrix is large (m > p.large_size), only save the
#imputation results of a small sample set (sample_input)
print("> Impute and save.. ")
if m > p.large_size:
Y_input_df = fast_imputation(sess, h, X, pIn_holder, pHidden_holder, sample_input, gene_ids, sample_input_cell_ids)
scimpute.save_hd5(Y_input_df, "{}/sample_imputation.{}.hd5".format(p.stage,
p.stage))
else:
Y_input_df = fast_imputation(sess, h, X, pIn_holder, pHidden_holder, input_matrix.todense(), gene_ids, cell_ids)
scimpute.save_hd5(Y_input_df, "{}/imputation.{}.hd5".format(p.stage,
p.stage))
#2.save model
print('> Saving model..')
save_path = saver.save(sess, log_dir + "/{}.ckpt".format(p.stage))
print("Model saved in: %s" % save_path)
#3.save the training and test curve
if p.mse_mode in ('mse_nz', 'mse_omega'):
#learning_curve_mse_nz(skip=math.floor(epoch / 5 / p.display_step))
learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec,
p.stage, skip=math.floor(epoch / 5 / p.display_step))
elif p.mse_mode == 'mse':
#learning_curve_mse(skip=math.floor(epoch / 5 / p.display_step))
learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec, p.stage,
skip=math.floor(epoch / 5 / p.display_step))
#4.save save_bottleneck_representation
print("> save bottleneck_representation")
code_bottleneck_input = sess.run(a_bottleneck,
feed_dict={
X: sample_input,
pIn_holder: 1,
pHidden_holder: 1})
np.save('{}/code_neck_valid.{}.npy'.format(p.stage, p.stage),
code_bottleneck_input)
#save_weights()
save_weights(sess, p.stage, en_de_layers=p.l)
#visualize_weights()
visualize_weights(sess, p.stage, en_de_layers=p.l)
toc_log2 = time.time()
log2_time = round(toc_log2 - tic_log2, 1)
min_mse_valid = min(mse_nz_valid_vec)
# os.system(
# '''for file in {0}/*npy
# do python -u weight_clustmap.py $file {0}
# done'''.format(p.stage)
# )
print('min_mse_nz_valid till now: {}'.format(min_mse_valid))
print('snapshot_step: {}s'.format(log2_time))
batch_writer.close()
valid_writer.close()
sess.close()
def build_late(X, pHidden_holder, pIn_holder, p, n, rand_state = 3):
#5.2 define layers and variables
# input p, X, pIn_holder, pHidden_holder, n
# return a_bottleneck, h(d_a1)
tf.set_random_seed(rand_state) # seed
global e_w1, e_b1, e_a1, e_w2, e_b2, e_a2, e_w3, e_b3, e_a3
global d_w1, d_b1, d_a1, d_w2, d_b2, d_a2, d_w3, d_b3, d_a3
if p.L == 7:
# change with layer
with tf.name_scope('Encoder_L1'):
e_w1, e_b1 = scimpute.weight_bias_variable('encoder1', n, p.n_hidden_1, p.sd)
e_a1 = scimpute.dense_layer('encoder1', X, e_w1, e_b1, pIn_holder)
with tf.name_scope('Encoder_L2'):
e_w2, e_b2 = scimpute.weight_bias_variable('encoder2', p.n_hidden_1, p.n_hidden_2, p.sd)
e_a2 = scimpute.dense_layer('encoder2', e_a1, e_w2, e_b2, pHidden_holder)
with tf.name_scope('Encoder_L3'):
e_w3, e_b3 = scimpute.weight_bias_variable('encoder3', p.n_hidden_2, p.n_hidden_3, p.sd)
e_a3 = scimpute.dense_layer('encoder3', e_a2, e_w3, e_b3, pHidden_holder)
# # with tf.name_scope('Encoder_L4'):
# # e_w4, e_b4 = scimpute.weight_bias_variable('encoder4', p.n_hidden_3, p.n_hidden_4, p.sd)
# # e_a4 = scimpute.dense_layer('encoder4', e_a3, e_w4, e_b4, pHidden_holder)
# # with tf.name_scope('Decoder_L4'):
# # d_w4, d_b4 = scimpute.weight_bias_variable('decoder4', p.n_hidden_4, p.n_hidden_3, p.sd)
# # d_a4 = scimpute.dense_layer('decoder4', e_a4, d_w4, d_b4, pHidden_holder)
with tf.name_scope('Decoder_L3'):
d_w3, d_b3 = scimpute.weight_bias_variable('decoder3', p.n_hidden_3, p.n_hidden_2, p.sd)
d_a3 = scimpute.dense_layer('decoder3', e_a3, d_w3, d_b3, pHidden_holder)
with tf.name_scope('Decoder_L2'):
d_w2, d_b2 = scimpute.weight_bias_variable('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd)
d_a2 = scimpute.dense_layer('decoder2', d_a3, d_w2, d_b2, pHidden_holder)
with tf.name_scope('Decoder_L1'):
d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd)
d_a1 = scimpute.dense_layer('decoder1', d_a2, d_w1, d_b1, pHidden_holder) # todo: change input activations if model changed
# define input/output
a_bottleneck = e_a3
elif p.L == 5:
# change with layer
with tf.name_scope('Encoder_L1'):
e_w1, e_b1 = scimpute.weight_bias_variable('encoder1', n, p.n_hidden_1, p.sd)
e_a1 = scimpute.dense_layer('encoder1', X, e_w1, e_b1, pIn_holder)
with tf.name_scope('Encoder_L2'):
e_w2, e_b2 = scimpute.weight_bias_variable('encoder2', p.n_hidden_1, p.n_hidden_2, p.sd)
e_a2 = scimpute.dense_layer('encoder2', e_a1, e_w2, e_b2, pHidden_holder)
with tf.name_scope('Decoder_L2'):
d_w2, d_b2 = scimpute.weight_bias_variable('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd)
d_a2 = scimpute.dense_layer('decoder2', e_a2, d_w2, d_b2, pHidden_holder)
with tf.name_scope('Decoder_L1'):
d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd)
d_a1 = scimpute.dense_layer('decoder1', d_a2, d_w1, d_b1, pHidden_holder) # todo: change input activations if model changed
# define input/output
a_bottleneck = e_a2
elif p.L == 3:
# change with layer
with tf.name_scope('Encoder_L1'):
e_w1, e_b1 = scimpute.weight_bias_variable('encoder1', n, p.n_hidden_1, p.sd)
e_a1 = scimpute.dense_layer('encoder1', X, e_w1, e_b1, pIn_holder)
with tf.name_scope('Decoder_L1'):
d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd)
d_a1 = scimpute.dense_layer('decoder1', e_a1, d_w1, d_b1,
pHidden_holder) # todo: change input activations if model changed
# define input/output
a_bottleneck = e_a1
else:
raise Exception("{} L not defined, only 3, 5, 7 implemented".format(p.L))
h = d_a1
return a_bottleneck, h
def build_metrics(X, h, coef):
with tf.name_scope("Metrics"):
omega = tf.sign(X) # 0 if 0, 1 if > 0; not possibly < 0 in our data
mse_nz = tf.reduce_mean(
tf.multiply(
tf.pow(X-h, 2),
omega
)
)
mse = tf.reduce_mean(tf.pow(X-h, 2))
reg_term = tf.reduce_mean(tf.pow(h, 2)) * coef
tf.summary.scalar('mse_nz__Y_vs_X', mse_nz)
mse = tf.reduce_mean(tf.pow(X - h, 2)) # for report
tf.summary.scalar('mse__Y_vs_X', mse)
return mse_nz, mse, reg_term
def load_params(mode, infile):
'''load the 'global_params.py' file '''
cwd = os.getcwd()
param_file = 'global_params.py'
param_name = param_file.rstrip('.py')
p = SourceFileLoader(param_name,
cwd + '/' + param_file).load_module()
p.fname_input = infile
p.mode = mode
if mode == 'pre-training':
# step1/rand_init for pre-training on reference
p.stage = 'step1'
p.run_flag = 'rand_init'
p.learning_rate = 3e-4 # step1: 3e-4 for 3-7L, 3e-5 for 9L
elif mode == 'translate':
# step2/load_saved from step1, for transfer learning
p.stage = 'step2' # step1/step2 (not others)
p.run_flag = 'load_saved' # rand_init/load_saved
p.learning_rate = 3e-5 # step2: 3e-5 for 3-7L, 3e-6 for 9L
elif mode == 'late':
# step2/rand_init for one step training
p.stage = 'step2'
p.run_flag = 'rand_init'
p.learning_rate = 3e-4 # step1: 3e-4 for 3-7L, 3e-5 for 9L
elif mode == 'impute':
# step2/load_saved/learning_rate=0, just impute and output
p.stage = 'impute'
p.run_flag = 'impute'
p.learning_rate = 0.0
elif mode == 'analysis':
p.tag = 'Eval'
p.stage = 'Eval'
else:
print('The mode you entered cannot be recognized.')
print('Valid mode options: pre-training | late | translate | impute | analysis')
p.mode = 'invalid'
return p
if p.test_flag:
p.max_training_epochs = 10 # 3L:100, 5L:1000, 7L:1000, 9L:3000
p.display_step = 1 # interval on learning curve
p.snapshot_step = 5 # interval of saving session, imputation
p.m = 1000
p.n = 300
p.sample_size = int(240)
print('in test mode\n',
'num-genes set to {}, num-cells set to {}\n'.format(p.n, p.m),
'sample size set to {}'.format(p.sample_size))
return p
# to do: modify to display based on mode
#
def display_params(p):
# PRINT PARAMETERS
print('\nmode:', p.mode)
print('\nData:')
print('fname_input:', p.fname_input)
print('name_input:', p.name_input)
print('ori_input:', p.ori_input)
print('transformation_input:', p.transformation_input)
if (p.mode == 'pre-training') or (p.mode == 'late') or (p.mode == 'translate'):
print('data split: [{}/{}/{}]'.format(p.a, p.b, p.c))
print('\nParameters:')
print('mse_mode:', p.mse_mode)
print('stage:', p.stage)
print('init:', p.run_flag)
print('test_mode:', p.test_flag)
print('total number of layers: {}'.format(p.L))
for l_tmp in range(1, p.l+1):
print("n_hidden{}: {}".format(l_tmp, eval('p.n_hidden_'+str(l_tmp))))
print('learning_rate:', p.learning_rate)
print('reg_coef:', p.reg_coef)
print('batch_size:', p.batch_size)
print('sample_zie: ', p.sample_size)
print('pIn:', p.pIn)
print('pHidden:', p.pHidden)
print('max_training_epochs:', p.max_training_epochs)
print('display_step', p.display_step)
print('snapshot_step', p.snapshot_step)
elif p.mode == 'analysis':
print('fname_imputation:', p.fname_imputation)
print('transformation_imputation', p.transformation_imputation)
print('fname_ground_truth: ', p.fname_ground_truth)
print('transformation_ground_truth', p.transformation_ground_truth)
print('gene_pair_list: ', p.gene_pair_list)
print('\n')
def read_data(p):
'''READ DATA
Parameters
------------
p:
Return
-----------
'''
print('>READING DATA..')
print('RAM usage before reading data: {} M'.format(usage()))
if p.fname_input.endswith('h5'):
# for 10x genomics large h5 files
input_obj = scimpute.read_sparse_matrix_from_h5(p.fname_input, p.genome_input,
p.ori_input)
# gene_be_matrix.matrix = input_obj.matrix.log1p()
input_matrix = input_obj.matrix
gene_ids = input_obj.gene_ids
cell_ids = input_obj.barcodes
print('RAM usage after reading sparse matrix: {} M'.format(usage()))
gc.collect()
# Data Transformation
print('> DATA TRANSFORMATION..')
input_matrix = scimpute.sparse_matrix_transformation(input_matrix,
p.transformation_input)
del(input_obj)
gc.collect()
print('RAM usage after {} transformation: {} M'.format(p.transformation_input,
usage()))
# Test or not: m*n subset (1000 * 300). Delete later
if p.test_flag:
print('in test mode')
input_matrix = input_matrix[:p.m, :p.n]
gene_ids = gene_ids[:p.n]
cell_ids = cell_ids[:p.m]
gc.collect()
else:
# For smaller files (hd5, csv, csv.gz)
input_df = scimpute.read_data_into_cell_row(p.fname_input, p.ori_input)
print('RAM usage after reading input_df: {} M'.format(usage()))
# Data Transformation
print('> DATA TRANSFORMATION..')
input_df = scimpute.df_transformation(
input_df.transpose(),
transformation=p.transformation_input
).transpose() # [genes, cells] in df_trans()
print('pandas input_df mem usage: ')
input_df.info(memory_usage='deep')
# Test or not
if p.test_flag:
print('in test mode')
input_df = input_df.ix[:p.m, :p.n]
gc.collect()
# To sparse
input_matrix = csr_matrix(input_df) # todo: directly read into csr, get rid of input_df
gene_ids = input_df.columns
cell_ids = input_df.index
print('RAM usage before deleting input_df: {} M'.format(usage()))
del(input_df)
gc.collect() # working on mac
print('RAM usage after deleting input_df: {} M'.format(usage()))
# Summary of data
print("name_input:", p.name_input)
_ = pd.DataFrame(data=input_matrix[:20, :4].todense(), index=cell_ids[:20],
columns=gene_ids[:4])
print("input_df:\n", _, "\n")
m, n = input_matrix.shape # m: n_cells; n: n_genes
print('input_matrix: {} cells, {} genes\n'.format(m, n))
return input_matrix, gene_ids, cell_ids
def load_results(p):
'''READ DATA
Parameters
------------
p: parameters from global_params.py and example.py
Return
-----------
X: input data matrix; genes in columns (same below)
Y: imputed data matrix
G: ground truth
'''
# print('>READING DATA..')
# X = scimpute.read_data_into_cell_row(p.fname_input, p.ori_input)
X, gene_ids, cell_ids = read_data(p)
X = pd.DataFrame(data=X.todense(), index=cell_ids,
columns=gene_ids)
Y = scimpute.read_data_into_cell_row(p.fname_imputation, p.ori_imputation)
if p.fname_input == p.fname_ground_truth:
G = X
else:
G = scimpute.read_data_into_cell_row(p.fname_ground_truth, p.ori_ground_truth)
# print('> DATA TRANSFORMATION..')
Y = scimpute.df_transformation(Y.transpose(), transformation=p.transformation_imputation).transpose()
# X = scimpute.df_transformation(X.transpose(), transformation=p.transformation_input).transpose()
if p.fname_input == p.fname_ground_truth:
G = X
else:
G = scimpute.df_transformation(G.transpose(), transformation=p.transformation_ground_truth).transpose()
# subset/sort X, G to match Y
# todo: support sparse matrix
X = X.loc[Y.index, Y.columns]
G = G.loc[Y.index, Y.columns]
# TEST MODE OR NOT
if p.test_flag:
print('in test mode')
Y = Y.ix[0:p.m, 0:p.n]
G = G.ix[0:p.m, 0:p.n]
X = X.ix[0:p.m, 0:p.n]
# INPUT SUMMARY
print('\nIn this code, matrices should have already been transformed into cell_row')
print('Y (imputation):', p.fname_imputation, p.ori_imputation, p.transformation_imputation,'\n', Y.ix[0:20, 0:3])
print('X (input):', p.fname_input, p.ori_input, p.transformation_input,'\n', X.ix[0:20, 0:3])
print('G (ground truth):', p.fname_ground_truth, p.ori_ground_truth, p.transformation_ground_truth,'\n', G.ix[0:20, 0:3])
print('Y.shape', Y.shape)
print('X.shape', X.shape)
print('G.shape', G.shape)
return X, Y, G
def calculate_MSEs(X, Y, G):
'''calculate MSEs
MSE between imputation and input
MSE between imputation and ground truth
Parameters
------------
X: input data matrix; genes in columns (same below)
Y: imputed data matrix
G: ground truth
Return
-----------
4 MSEs
'''
print('\n> MSE Calculation')
max_y, min_y = scimpute.max_min_element_in_arrs([Y.values])
print('Max in Y is {}, Min in Y is{}'.format(max_y, min_y))
max_g, min_g = scimpute.max_min_element_in_arrs([G.values])
print('Max in G is {}, Min in G is{}'.format(max_g, min_g))
mse1_nz = scimpute.mse_omega(Y, X)
mse1_nz = round(mse1_nz, 7)
print('MSE1_NZ between Imputation and Input: ', mse1_nz)
mse1 = scimpute.mse(Y, X)
mse1 = round(mse1, 7)
print('MSE1 between Imputation and Input: ', mse1)
mse2_nz = scimpute.mse_omega(Y, G)
mse2_nz = round(mse2_nz, 7)
print('MSE2_NZ between Imputation and Ground_truth: ', mse2_nz)
mse2 = scimpute.mse(Y, G)
mse2 = round(mse2, 7)
print('MSE2 between Imputation and Ground_truth: ', mse2)
return mse1_nz, mse1, mse2_nz, mse2
def analyze_variation_in_genes(X, Y, G, p):
'''calculate and visualize standard deviation in each gene
write SDs to files
plot histograms of SDs
Parameters
------------
X: input data matrix; genes in columns (same below)
Y: imputed data matrix
G: ground truth
p: parameters
Return
-----------
None
'''
print('\n calculating standard deviation in each gene for input and imputed matrix')
x_std_df, y_std_df = scimpute.nz_std(X, Y)
x_std_df, g_std_df = scimpute.nz_std(X, G) # purpose: compare G with Y
#std_ratio_yx_df = pd.DataFrame(data= y_std_df.values / x_std_df.values, index=X.columns, columns=['sd_ratio'])
#std_ratio_yg_df = pd.DataFrame(data= y_std_df.values / g_std_df.values, index=X.columns, columns=['sd_ratio'])
std_ratio_yx_data = [(y/x if x!=0 else None) for y, x in zip(y_std_df.values, x_std_df.values)]
std_ratio_yx_df =pd.DataFrame(data = std_ratio_yx_data, index=X.columns, columns=['sd_ratio'])
std_ratio_yg_data = [(y/x if x!=0 else None) for y, x in zip(y_std_df.values, g_std_df.values)]
std_ratio_yg_df = pd.DataFrame(data= std_ratio_yg_data, index=X.columns, columns=['sd_ratio'])
std_min = min(y_std_df.min(), x_std_df.min(), g_std_df.min())
std_max = max(y_std_df.max(), x_std_df.max(), g_std_df.max())
print('generating histograms of standard deviations')
scimpute.hist_df(
y_std_df,
xlab='Standard Deviation', title='Imputation({})'.format(p.name_imputation),
range=(std_min, std_max),
dir=p.tag)
scimpute.hist_df(
x_std_df,
xlab='Standard Deviation', title='Input({})'.format(p.name_input),
range=(std_min, std_max),
dir=p.tag)
scimpute.hist_df(
g_std_df,
xlab='Standard Deviation', title='Ground Truth({})'.format(p.name_input),
range=(std_min, std_max),
dir=p.tag)
scimpute.hist_df(
std_ratio_yx_df,
xlab='Ratio of Imputation SD vs Input SD',
title='',
range=(std_min, std_max),
dir=p.tag)
scimpute.hist_df(
std_ratio_yg_df,
xlab='Ratio of Imputation SD vs Ground Truth SD',
title='',
range=(std_min, std_max),
dir=p.tag)
std_ratio_yx_df.to_csv('sd_ratio_imputed_vs_input.csv')
std_ratio_yg_df.to_csv('sd_ratio_imputed_vs_groundtruth.csv')
def visualize_all_genes(X, Y, G, p):
''' generate plots using all genes
Parameters
------------
X: input data matrix; genes in columns (same below)
Y: imputed data matrix
G: ground truth
p: parameters
Return
-----------
None
'''
# histograms of gene expression
max_expression = max(G.values.max(), X.values.max(), Y.values.max())
min_expression = min(G.values.min(), X.values.min(), Y.values.min())
print('\n max expression:', max_expression)
print('\n min expression:', min_expression)
scimpute.hist_df(
Y, xlab='Expression', title='Imputation({})'.format(p.name_imputation),
dir=p.tag, range=[min_expression, max_expression])
scimpute.hist_df(
X, xlab='Expression', title='Input({})'.format(p.name_input),
dir=p.tag, range=[min_expression, max_expression])
scimpute.hist_df(
G, xlab='Expression', title='Ground Truth({})'.format(p.name_ground_truth),
dir=p.tag, range=[min_expression, max_expression])
# histograms of correlations between genes in imputation and ground truth
# and of correlations between cells in imputation and ground truth
# when ground truth is not provide,
# input is used as ground truth
print('\n> Correlations between ground truth and imputation')
print('ground truth dimension: ', G.shape, 'imputation dimension: ', Y.shape)
print('generating histogram for correlations of genes between ground truth and imputation')
scimpute.hist_2matrix_corr(
G.values, Y.values,
title="Correlation for each gene\n(Ground_truth vs Imputation)\n{}\n{}".
format(p.name_ground_truth, p.name_imputation),
dir=p.tag, mode='column-wise', nz_mode='first' # or ignore
)
print('generating histogram for correlations of cells between ground truth and imputation')
scimpute.hist_2matrix_corr(
G.values, Y.values,
title="Correlation for each cell\n(Ground_truth vs Imputation)\n{}\n{}".
format(p.name_ground_truth, p.name_imputation),
dir=p.tag, mode='row-wise', nz_mode='first'
)
# heatmaps of data matrices
print('\n> Generating heatmaps of data matrices')
range_max, range_min = scimpute.max_min_element_in_arrs([Y.values, G.values, X.values])
print('\nrange:', range_max, ' ', range_min)
scimpute.heatmap_vis(Y.values,
title='Imputation ({})'.format(p.name_imputation),
xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag)
scimpute.heatmap_vis(X.values,
title='Input ({})'.format(p.name_input),
xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag)
scimpute.heatmap_vis(G.values,
title='Ground_truth ({})'.format(p.name_ground_truth),
xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag)
# PCA and tSNE plots
print('\n> Generating PCA and tSNE plots')
if p.cluster_file is not None:
cluster_info = scimpute.read_data_into_cell_row(p.cluster_file)
# cluster_info = cluster_info.astype('str')
else:
cluster_info = None
scimpute.pca_tsne(df_cell_row=Y, cluster_info=cluster_info,
title=p.name_imputation, dir=p.tag)
scimpute.pca_tsne(df_cell_row=X, cluster_info=cluster_info,
title=p.name_input, dir=p.tag)
scimpute.pca_tsne(df_cell_row=G, cluster_info=cluster_info,
title=p.name_ground_truth, dir=p.tag)
def visualize_selected_genes(X, Y, G, p):
''' generate plots for genes specified by the user
Parameters
------------
X: input data matrix; genes in columns (same below)
Y: imputed data matrix
G: ground truth
p: parameters
Return
-----------
None
'''
gene_pair_dir = p.tag+'/pairs'
List = p.gene_pair_list
print(">n> Scatterplots of selected gene pairs")
scimpute.gene_pair_plot(Y, list=List, tag='(Imputation)', dir=gene_pair_dir)
scimpute.gene_pair_plot(X, list=List, tag='(Input)', dir=gene_pair_dir)
scimpute.gene_pair_plot(G, list=List, tag='(Ground_truth)', dir=gene_pair_dir)
print("\n> Scatterplots for selected genes")
print("ground truth vs imputation, ground truth vs input")
gene_dir = p.tag+'/genes'
# genetate a list of genes using the gene_pair_list
gene_list = [gene for pair in List for gene in pair]
for j in gene_list:
try:
print('for ', j)
Y_j = Y.ix[:, j]
G_j = G.ix[:, j]
X_j = X.ix[:, j]
except KeyError:
print('KeyError: gene ID does not exist')
continue
scimpute.scatterplot2(G_j, Y_j, range='same',
title=str(str(j) + '\n(Ground Truth vs Imputation) '),
xlabel='Ground Truth',
ylabel='Imputation',
dir=gene_dir
)
scimpute.scatterplot2(G_j, X_j, range='same',
title=str(str(j) + '\n(Ground Truth vs Input) '),
xlabel='Ground Truth',
ylabel='Input',
dir=gene_dir
)
# Discretize gene expression values
# and re-generate pairwise plots
Y = scimpute.df_exp_discretize_log10(Y)
print('\n> Discrete gene pair relationship in imputation')
gene_pair_dir = p.tag+'/pairs_discrete'
# List = p.gene_pair_list
scimpute.gene_pair_plot(Y, list=List, tag='(Imputation Discrete) ',
dir=gene_pair_dir)
print("\n> Discrete imputation vs ground truth")
gene_dir = p.tag+'/genes_discrete'
for j in gene_list:
try:
print('for ', j)
Y_j = Y.ix[:, j]
G_j = G.ix[:, j]
X_j = X.ix[:, j]
except KeyError:
print('KeyError: gene ID does not exist')
continue
scimpute.scatterplot2(G_j, Y_j, range='same',
title=str(str(j) + '\n(Ground_truth vs Imputation) '),
xlabel='Ground Truth',
ylabel='Imputation',
dir=gene_dir
)
scimpute.scatterplot2(G_j, X_j, range='same',
title=str(str(j) + '\n(Ground_truth vs Input) '),
xlabel='Ground Truth',
ylabel='Input',
dir=gene_dir
)
def result_analysis_main(p):
'''analyzing imputation output
Parameters
------------
p: parameters from global_params.py and example.py
Return
-----------
None
'''
# load imputation results and input data
X, Y, G = load_results(p)
# calculate MSEs
mse1_nz, mse1, mse2_nz, mse2 = calculate_MSEs(X, Y, G)
# calculate and visualize variation in genes
analyze_variation_in_genes(X, Y, G, p)
# visualize results using all genes
visualize_all_genes(X, Y, G, p)
# visualize selected genes
visualize_selected_genes(X, Y, G, p)
def parse_args(argv):
parser = argparse.ArgumentParser(description = 'Help information')
parser.add_argument('-mode', help='mode options: pre-training | late | translate | impute | analysis')
parser.add_argument('-infile', help='file path of input data')
return parser.parse_args(argv)
if __name__ == '__main__':
##1. load parameter module and use name 'p'
#print("Usage: python late.py -mode <late> -infile <xx.hd5>")
argms = parse_args(sys.argv[1:])
p = load_params(argms.mode, argms.infile)
if p.mode =='invalid':
exit(0)
##2. refresh folder
log_dir = './{}'.format(p.stage)
scimpute.refresh_logfolder(log_dir)
tic_start = time.time()
#3. load data
input_matrix, gene_ids, cell_ids = read_data(p)
#4. call late
late_main(input_matrix, gene_ids, cell_ids, p, log_dir, rand_state = 3)
toc_stop = time.time()
time_finish = round((toc_stop - tic_start), 2)
print("Imputation Finished!")
print("Wall Time Used: {} seconds".format(time_finish)) | 2.328125 | 2 |
irc/messages/message.py | AlexGustafsson/irc-news-bot | 0 | 12793243 | """IRC message."""
import re
from typing import Optional
from irc.messages.base import IRCBaseMessage
# Regex for matching the individual parts of an IRC message
private_message_regex = re.compile("^:([^!]+)!(.*?) (PRIVMSG|NOTICE) ([^ ]+) :(.*)")
class IRCMessage(IRCBaseMessage):
"""An IRC private message."""
def __init__( # pylint: disable=too-many-arguments
self,
raw_message: str,
author: str,
hostname: str,
is_notice: bool,
target: str,
message: str
) -> None:
super().__init__(raw_message)
self.__author = author
self.__hostname = hostname
self.__is_notice = is_notice
self.__target = target
self.__message = message
@property
def author(self) -> str:
"""The author of the message."""
return self.__author
@property
def hostname(self) -> str:
"""The hostname of the message's author."""
return self.__hostname
@property
def is_notice(self) -> bool:
"""Whether or not the message is a NOTICE."""
return self.__is_notice
@property
def target(self) -> str:
"""The target of the message."""
return self.__target
@property
def message(self) -> str:
"""The message itself."""
return self.__message
def __str__(self) -> str:
"""String representation of the message."""
if self.__is_notice:
return "NOTICE {} : {}".format(self.__author, self.__message)
return "PRIVMSG {} : {}".format(self.__author, self.__message)
@staticmethod
def parse(line: str) -> Optional["IRCMessage"]:
"""Parse a message."""
match = private_message_regex.match(line)
if not match:
return None
author, hostname, type, target, message = match.groups()
is_notice = type == "NOTICE"
return IRCMessage(line, author, hostname, is_notice, target, message)
| 3.21875 | 3 |
main/calendar-module/calendar-module.py | EliahKagan/old-practice-snapshot | 0 | 12793244 | #!/usr/bin/env python3
from calendar import day_name, weekday
month, day, year = map(int, input().split())
print(day_name[weekday(year, month, day)].upper())
| 4.125 | 4 |
tests/class/method06.py | ktok07b6/polyphony | 83 | 12793245 | <reponame>ktok07b6/polyphony<gh_stars>10-100
from polyphony import testbench
class C:
def __init__(self, x):
self.x = x * x
def calc(self, x):
for i in range(x):
self.x += 1
return self.x
def method06(x):
return C(x).calc(x)
@testbench
def test():
assert 2 == method06(1)
assert 6 == method06(2)
assert 12 == method06(3)
test()
| 2.71875 | 3 |
plugins/nmap.py | hack654a/w12scan-client | 159 | 12793246 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2019/1/21 10:05 PM
# @Author : w8ay
# @File : nmap.py
import nmap
from lib.data import logger
def nmapscan(host, ports):
# 接受从masscan上扫描出来的结果
# 为了可以多线程使用,此函数支持多线程调用
nm = nmap.PortScanner()
argument = "-sV -sS -Pn --host-timeout 1m -p{}".format(','.join(ports))
try:
ret = nm.scan(host, arguments=argument)
except nmap.PortScannerError:
logger.debug("Nmap PortScannerError host:{}".format(host))
return None
except:
return None
# debug
elapsed = ret["nmap"]["scanstats"]["elapsed"]
command_line = ret["nmap"]["command_line"]
logger.debug("[nmap] successed,elapsed:%s command_line:%s" % (elapsed, command_line))
if host in ret["scan"]:
try:
result = ret["scan"][host]["tcp"]
except KeyError:
return None
return result
return None
| 2.734375 | 3 |
Fig6-splocs/plotenergy.py | YuePengUSTC/AADR | 7 | 12793247 | <filename>Fig6-splocs/plotenergy.py<gh_stars>1-10
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import os
from IPython.core.pylabtools import figsize
font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 20}
font2 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 16}
labels = ['', '', '', '']
colors = []
colors.append([0 / 255, 113 / 255, 188 / 255]) # 1
colors.append([216 / 255, 82 / 255, 24 / 255]) # 2
colors.append([236 / 255, 176 / 255, 31 / 255]) # 3
colors.append([125 / 255, 46 / 255, 141 / 255]) # 4
colors.append([118 / 255, 171 / 255, 47 / 255]) # 5
colors.append([76 / 255, 189 / 255, 237 / 255]) # 6
colors.append([255 / 255, 128 / 255, 0 / 255]) # 7
def plot_errmore(data, is_iter, id, is_dr, min_err):
len1 = np.shape(data)[0]
if is_iter:
x = np.linspace(0, len1, len1)
else:
x = data[:, 0] - data[0, 0]
# y = data[:, 2] # /data[0, 1]
y = data[:, 2] #/ data[0, 2]
if id == 0:
label1 = 'ADMM'
else:
if is_dr:
label1 = 'DR m=' + str(id)
else:
label1 = 'AA m=' + str(id)
l1, = plt.semilogy(x, y, label=label1, color=colors[id], linewidth=2.5)
# l1, = plt.plot(x, y, label=label1, color=colors[id], linewidth=2)
max_t = max(x)
return (l1, max_t)
def plot_err3(data, is_iter, id, is_dr, cid, min_err, ytype, resetype):
len1 = np.shape(data)[0]
if is_iter:
x = np.linspace(0, len1, len1)
else:
x = data[:, 0] - data[0, 0]
if ytype == 'e':
y = data[:, 1] - min_err
else:
y = data[:, 2]
if id == 0:
label1 = 'ADMM'
else:
if is_dr:
label1 = 'ours' + resetype
else:
label1 = 'AA ADMM' + resetype
if ytype == 'r':
l1, = plt.semilogy(x, y, label=label1, color=colors[cid], linewidth=2.5)
else:
l1, = plt.semilogy(x, y, label=label1, color=colors[cid], linewidth=2.5)
max_t = max(x)
return (l1, max_t)
def plot_reset(data, is_iter):
len1 = np.shape(data)[0]
if is_iter:
x = np.linspace(0, len1, len1)
else:
x = data[:, 0] - data[0, 0]
y = data[:, 1] / data[0, 1]
reset = data[:, 2]
nx = []
ny = []
for i in range(1, len(reset)):
if reset[i] > reset[i - 1]:
nx.append(x[i])
ny.append(y[i])
plt.scatter(nx, ny, color='blue', alpha=0.6, s=20)
# max_t = max(x)
# return (l1, max_t)
## diff m
if 0:
for fid in range(1, 11):
path = "D:/project/ADMMAA/BasisPursuit/res/"
# os.system("mkdir D:\\project\\ADMMAA\\data\\no_noise_fig3\\file" + str(fid))
savepath = "D:/project/ADMMAA/BasisPursuit/fig/" #'D:/project/ADMMAA/data/diffpm/p0.5/fig/2efile' + str(fid) + '_'
# for is_iter in (1, 0):
is_iter = 1
outer = 0
for is_iter in range(0, 2):
for nmid in range(1, 2):
# outer = 0
#
# nmid = 1
mid = str(nmid)
ls = []
maxts = []
res = np.loadtxt(path + str(fid) + 'admm.txt')
(l1, maxt) = plot_errmore(res, is_iter, 0, nmid % 3 == 2, 0)
ls.append(l1)
maxts.append(maxt)
# min_err = min(res[:, 3])
for i in range(1, 7):
name = path + str(fid) + 'aaadmm' + str(i) + '.txt'
res = np.loadtxt(name)
# min_err1 = min(res[:, 3])
# min_err = min(min_err, min_err1)
(l1, maxt) = plot_errmore(res, is_iter, i, nmid % 3 == 2, 0)
ls.append(l1)
maxts.append(maxt)
# for i in range(1, 7):
# name = path + 'aadr' + str(i) + '.txt'
# res = np.loadtxt(name)
#
if is_iter:
plt.xlabel("#Iters", font1)
plt.xlim(0, 1000)
else:
plt.xlabel("Time(ms)", font1)
plt.xlim(0, max(maxts))
plt.legend(handles=ls, loc='best', prop=font2)
# plt.title('file' + str(fid) + ' outer ' + str(outer))
plt.tight_layout()
save_name = savepath + str(fid) + "emid_" + mid + str(is_iter) + "_outer_" + str(outer) + ".png"
print(save_name)
plt.savefig(save_name, transparent=True, dpi=150)
plt.clf()
# plt.show()
# AA-DR
if 1:
ytype = 'r'
for fid in range(1, 4):
path = "D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/res/"
# os.system("mkdir D:\\project\\ADMMAA\\data\\no_noise_fig3\\file" + str(fid))
savepath = "D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/fig/"
# for is_iter in (1, 0):
is_iter = 1
outer = 1
nmid = 1
aa_admm_m = 6
aa_dr_m = 6
for is_iter in range(0, 2):
mid = str(nmid)
ls = []
maxts = []
maxis = []
name = path + str(fid) + '_mid0_outer_0.txt'
res1 = np.loadtxt(name)
# print(res)
name = path + str(fid) + '_mid1_outer_0.txt'
res2 = np.loadtxt(name)
name = path + str(fid) + '_mid2_outer_0.txt'
res3 = np.loadtxt(name)
# ADMM
min_err=0
(l1, maxt) = plot_err3(res1, is_iter, 0, 0, 0, min_err, ytype, ' ')
ls.append(l1)
maxts.append(maxt)
iter = 0
for eachiter in range(1, len(res1)):
if np.isnan([res1[eachiter, 0]]):
break
# if res3[eachiter, 2] == 0:
# break
iter = iter + 1
maxis.append(iter)
# print(iter)
# AA-ADMM
(l1, maxt) = plot_err3(res2, is_iter, aa_admm_m, 0, 1, min_err,ytype, ' ')
ls.append(l1)
maxts.append(maxt)
iter = 0
for eachiter in range(1, len(res2)):
if np.isnan([res2[eachiter, 0]]):
break
# if res2[eachiter, 2] == 0:
# break
iter = iter + 1
maxis.append(iter)
# print(iter)
## AA-DR
(l1, maxt) = plot_err3(res3, is_iter, aa_dr_m, 1, 2, min_err,ytype, '-PR')
ls.append(l1)
maxts.append(maxt)
iter = 0
for eachiter in range(1, len(res3)):
if np.isnan([res3[eachiter, 0]]):
break
# if res3[eachiter, 2] == 0:
# break
iter = iter + 1
maxis.append(iter)
# print(iter)
# plot_err3(data, is_iter, id, is_dr, cid, min_err, ytype, resetype):
name = path + str(fid) + '_mid3_outer_0.txt'
res3 = np.loadtxt(name)
(l1, maxt) = plot_err3(res3, is_iter, aa_dr_m, 1, 3, min_err, ytype, '-DRE')
ls.append(l1)
maxts.append(maxt)
iter = 0
for eachiter in range(1, len(res3)):
if np.isnan([res3[eachiter, 0]]):
break
# if res3[eachiter, 2] == 0:
# break
iter = iter + 1
maxis.append(iter)
if is_iter:
plt.xlabel("#Iters", font1)
plt.xlim(0, max(maxis))
else:
plt.xlabel("Time(s)", font1)
plt.xlim(0, max(maxts))
# plt.xlim(0, 50)
if ytype == 'r':
plt.ylabel("Combined residual", font1)
else:
plt.ylabel("f(x)+g(z)", font1)
# plt.ylabel("f(x)+g(z)", font1)
plt.legend(handles=ls, loc='best', prop=font2)
# plt.title('file' + str(fid))
plt.tight_layout()
save_name = savepath + str(fid) + ytype + "AA" + str(aa_admm_m) + "_DR" + str(aa_dr_m) + "_t" + str(is_iter) + "_outer_" + str(
outer) + ".png"
print(save_name)
plt.savefig(save_name, transparent=True, dpi=600)
plt.clf()
# AA-DR test mu
if 0:
for fid in range(1, 5):
path = "data/coma_data/res/f" + str(fid) + '_'
# os.system("mkdir D:\\project\\ADMMAA\\data\\test_mu\\file" + str(fid))
savepath = 'data/coma_data/fig/file' + str(fid) + '_'
# for is_iter in (1, 0):
is_iter = 1
outer = 0
nmid = 1
aa_admm_m = 6
aa_dr_m = 6
for mu in (10, 100, 1000, 10000, 100000, 1000000):
for is_iter in range(1, 2):
mid = str(nmid)
ls = []
maxts = []
res = np.loadtxt(path + 'mid0_mu' + str(mu) + '.txt')
# ADMM
# data, is_iter, id, is_dr, cid, min_err, ytype, resetype
(l1, maxt) = plot_err3(res, is_iter, 0, 0, 0, 0, ytype, ' ')
ls.append(l1)
maxts.append(maxt)
# AA-ADMM
res = np.loadtxt(path + 'mid1_mu' + str(mu) + '.txt')
(l1, maxt) = plot_err3(res, is_iter, 1, 0, 1, 0, ytype, ' ')
ls.append(l1)
maxts.append(maxt)
# plot_reset(res, is_iter)
## AA-DR
res = np.loadtxt(path + 'mid2_mu' + str(mu) + '.txt')
(l1, maxt) = plot_err3(res, is_iter, 2, 1, 2, 0, ytype, '-PR')
ls.append(l1)
maxts.append(maxt)
# plot_reset(res, is_iter)
res = np.loadtxt(path + 'mid3_mu' + str(mu) + '.txt')
(l1, maxt) = plot_err3(res, is_iter, 2, 1, 3, 0, ytype, '-DRE')
ls.append(l1)
maxts.append(maxt)
if is_iter:
plt.xlabel("#Iters", font1)
plt.xlim(0, 100)
else:
plt.xlabel("Time(ms)", font1)
plt.xlim(0, max(maxts))
plt.legend(handles=ls, loc='best', prop=font2)
plt.title('file' + str(fid) + ' outer ' + str(outer))
plt.tight_layout()
save_name = savepath + "mu" + str(mu) + ".png"
print(save_name)
plt.savefig(save_name, transparent=True, dpi=100)
plt.clf()
def find_minerr(path):
min_errs = []
name = path + 'mid0_mu10_m' + str(1) + '.txt'
# print(name)
res = np.loadtxt(name)
min_errs.append(min(res[:, 1]))
for m in range(1, 7):
res = np.loadtxt(path + 'mid1_mu10_m' + str(m) + '.txt')
min_errs.append(min(res[:, 1]))
res = np.loadtxt(path + 'mid2_mu10_m' + str(m) + '.txt')
min_errs.append(min(res[:, 1]))
min_err = min(min_errs)
return min_err
# AA-DR each iters
if 0:
ytype = 'e'
for fid in range(1, 11):
# fid = 'monkey'
path = "D:/project/ADMMAA/sparseicp/testmu/0_p5.0_f" + str(fid) + '_'
savepath = 'D:/project/ADMMAA/sparseicp/figp05nreset/1_f' + str(fid) + '_'
# for is_iter in (1, 0):
is_iter = 1
outer = 1
nmid = 1
aa_admm_m = 6
aa_dr_m = 6
# for outer in (0, 10, 20, 30, 40, 50, 60, 70, 80, 83):
# for outer in (0, 10, 20, 30, 40, 50, 60):
# for outer in (0, 5, 10, 15, 20, 25):
# for outer in (0, 1, 2, 3, 4, 100, 101, 102, 103, 104, 191, 192, 193, 194, 195):
# for outer in (100000, 50000, 10000, 5000, 1000, 500, 100):
# for outer in (1000, 5000, 10000, 50000, 100000):
for outer in range(10, 11):
for is_iter in range(0, 2):
mid = str(nmid)
ls = []
maxts = []
min_err = find_minerr(path)
print(min_err)
# min_err = 0
# print(path + 'mid0_outer' + str(outer) + '.txt')
res1 = np.loadtxt(path + 'mid0_mu' + str(outer) + '_m1.txt')
# print(path + 'mid1_m' + str(aa_admm_m) + '_outer' + str(outer) + '.txt')
res2 = np.loadtxt(path + 'mid1_mu' + str(outer) + '_m' + str(aa_admm_m) + '.txt')
# print(path + 'res_mid2_m' + str(aa_dr_m) + '_outer' + str(outer) + '.txt')
res3 = np.loadtxt(path + 'mid2_mu' + str(outer) + '_m' + str(aa_dr_m) + '.txt')
# ADMM
(l1, maxt) = plot_err3(res1, is_iter, 0, 0, 0, min_err, ytype, '')
ls.append(l1)
maxts.append(maxt)
# AA-ADMM
(l1, maxt) = plot_err3(res2, is_iter, aa_admm_m, 0, 1, min_err, ytype, '')
ls.append(l1)
maxts.append(maxt)
## AA-DR
(l1, maxt) = plot_err3(res3, is_iter, aa_dr_m, 1, 2, min_err, ytype, '')
ls.append(l1)
maxts.append(maxt)
if is_iter:
plt.xlabel("#Iters", font1)
plt.xlim(0, 1500)
else:
plt.xlabel("Time(ms)", font1)
plt.xlim(0, max(maxts))
if ytype == 'e':
plt.ylabel('Energy', font1)
else:
plt.ylabel('Combined residual', font1)
plt.legend(handles=ls, loc='best', prop=font2)
plt.title('file' + str(fid) + ' mu ' + str(outer))
plt.tight_layout()
save_name = savepath + ytype + "AA" + str(aa_admm_m) + "_DR" + str(aa_dr_m) + "_t" + str(
is_iter) + "_outer_" + str(outer) + "_5.png"
print(save_name)
plt.savefig(save_name, transparent=True, dpi=150)
plt.clf()
# diff m each iters
if 0:
fid = 'monkey'
# for outer in (0, 10, 20, 30, 40, 50, 60, 70, 80, 83):
# for outer in (0, 10, 20, 30, 40, 50, 60):
# for outer in (0, 5, 10, 15, 20, 25):
for fid in range(1, 11):
for outer in range(10, 11):
path = "D:/project/ADMMAA/sparseicp/testmu/0_p5.0_f" + str(fid) + '_'
savepath = 'D:/project/ADMMAA/sparseicp/figp05nreset/1_f' + str(fid) + '_'
# for is_iter in (1, 0):
is_iter = 1
for is_iter in range(0, 2):
for nmid in range(1, 3):
mid = str(nmid)
ls = []
maxts = []
# min_err = find_minerr(path, outer)
min_err = 0
# if nmid > 3:
# res = np.loadtxt(path + 'mid3_m5_outer' + str(outer) + '.txt')
# else:
res = np.loadtxt(path + 'mid0_mu' + str(outer) + '_m1.txt')
(l1, maxt) = plot_errmore(res, is_iter, 0, 0, min_err)
ls.append(l1)
maxts.append(maxt)
for i in range(1, 7):
res = np.loadtxt(path + 'mid' + mid + '_mu' + str(outer)+ '_m' + str(i) + '.txt')
(l1, maxt) = plot_errmore(res, is_iter, i, nmid % 3 == 2, min_err)
ls.append(l1)
maxts.append(maxt)
if is_iter:
plt.xlabel("#Iters", font1)
plt.xlim(0, 2000)
else:
plt.xlabel("Time(ms)", font1)
plt.xlim(0, max(maxts))
plt.ylabel("Combined residual", font1)
plt.legend(handles=ls, loc='best', prop=font2)
plt.title('file' + str(fid) + ' mu ' + str(outer))
plt.tight_layout()
save_name = savepath + "mid_" + mid + str(is_iter) + "_outer_" + str(outer) + ".png"
print(save_name)
plt.savefig(save_name, transparent=True, dpi=150)
plt.clf()
# plt.show()
| 2.203125 | 2 |
mysite/apps/repo/migrations/0002_auto_20190811_2046.py | lirixiang123/question_repo | 0 | 12793248 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-08-11 12:46
from __future__ import unicode_literals
import ckeditor_uploader.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('repo', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='QuestionsCollection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now=True, verbose_name='收藏/取消时间')),
('status', models.BooleanField(default=True, verbose_name='收藏状态')),
],
options={
'verbose_name': '收藏记录',
'verbose_name_plural': '收藏记录',
},
),
migrations.AlterField(
model_name='questions',
name='answer',
field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='题目答案'),
),
migrations.AlterField(
model_name='questions',
name='content',
field=ckeditor_uploader.fields.RichTextUploadingField(null=True, verbose_name='题目详情'),
),
migrations.AddField(
model_name='questionscollection',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions_collection_set', to='repo.Questions', verbose_name='问题'),
),
migrations.AddField(
model_name='questionscollection',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions_collection_set', to=settings.AUTH_USER_MODEL, verbose_name='收藏者'),
),
]
| 1.742188 | 2 |
route.py | PeterRyder/EveTradeRoute | 0 | 12793249 | <gh_stars>0
class Route:
def __init__(self, c, s, e):
self.client = c
self.start = s
self.end = e
#self.start = self.get_system(s)
#self.end = self.get_system(e)
def __repr__(self):
return str(self)
def __str__(self):
system_ids = self.get_distance()
return str([self.get_system(id)["name"] for id in system_ids])
def get_system(self, s):
result = self.client.Universe.post_universe_names(
ids=[s]
).result()
return result[0]
def get_distance(self):
return self.client.Routes.get_route_origin_destination(
datasource="tranquility",
flag="shortest",
origin=self.start,
destination=self.end
).result()
| 2.78125 | 3 |
centrality.py | cerebis/MetaCarvel | 0 | 12793250 | import tqdm
import networkx as nx
import argparse
import numpy as np
import multiprocessing
import graph_tool as gt
from graph_tool.centrality import betweenness
parser = argparse.ArgumentParser()
parser.add_argument("-g", "--graph", help='bundled graph')
parser.add_argument("-l","--length",help="contig length")
parser.add_argument("-o","--output",help="output file")
args = parser.parse_args()
G = nx.Graph()
cpus = multiprocessing.cpu_count()
print('Using {} cpus'.format(cpus))
print('Loading bundled graph...')
with open(args.graph,'r') as f:
for line in tqdm.tqdm(f, desc='Reading bundled'):
attrs = line.split()
G.add_edge(attrs[0],attrs[2],mean=float(attrs[4]),stdev=float(attrs[5]),bsize=int(attrs[6]),ori=attrs[1]+attrs[3])
node_set = set(G.nodes())
print('Loading contig lengths...')
contig_length = {}
with open(args.length,'r') as f:
for line in tqdm.tqdm(f, desc='Reading lengths'):
attrs = line.split()
if attrs[0] in node_set:
contig_length[attrs[0]] = int(attrs[1])
del node_set
nx.set_node_attributes(G,'length',contig_length)
repeat_nodes = {}
def get_prop_type(value, key=None):
"""
Performs typing and value conversion for the graph_tool PropertyMap class.
If a key is provided, it also ensures the key is in a format that can be
used with the PropertyMap. Returns a tuple, (type name, value, key)
"""
if isinstance(key, unicode):
# Encode the key as ASCII
key = key.encode('ascii', errors='replace')
# Deal with the value
if isinstance(value, bool):
tname = 'bool'
elif isinstance(value, int):
tname = 'float'
value = float(value)
elif isinstance(value, float):
tname = 'float'
elif isinstance(value, unicode):
tname = 'string'
value = value.encode('ascii', errors='replace')
elif isinstance(value, dict):
tname = 'object'
else:
tname = 'string'
value = str(value)
return tname, value, key
def nx2gt(nxG):
"""
Converts a networkx graph to a graph-tool graph.
"""
# Phase 0: Create a directed or undirected graph-tool Graph
gtG = gt.Graph(directed=nxG.is_directed())
# Add the Graph properties as "internal properties"
for key, value in nxG.graph.items():
# Convert the value and key into a type for graph-tool
tname, value, key = get_prop_type(value, key)
prop = gtG.new_graph_property(tname) # Create the PropertyMap
gtG.graph_properties[key] = prop # Set the PropertyMap
gtG.graph_properties[key] = value # Set the actual value
# Phase 1: Add the vertex and edge property maps
# Go through all nodes and edges and add seen properties
# Add the node properties first
nprops = set() # cache keys to only add properties once
for node, data in nxG.nodes_iter(data=True):
# Go through all the properties if not seen and add them.
for key, val in data.items():
if key in nprops: continue # Skip properties already added
# Convert the value and key into a type for graph-tool
tname, _, key = get_prop_type(val, key)
prop = gtG.new_vertex_property(tname) # Create the PropertyMap
gtG.vertex_properties[key] = prop # Set the PropertyMap
# Add the key to the already seen properties
nprops.add(key)
# Also add the node id: in NetworkX a node can be any hashable type, but
# in graph-tool node are defined as indices. So we capture any strings
# in a special PropertyMap called 'id' -- modify as needed!
gtG.vertex_properties['id'] = gtG.new_vertex_property('string')
# Add the edge properties second
eprops = set() # cache keys to only add properties once
for src, dst, data in nxG.edges_iter(data=True):
# Go through all the edge properties if not seen and add them.
for key, val in data.items():
if key in eprops: continue # Skip properties already added
# Convert the value and key into a type for graph-tool
tname, _, key = get_prop_type(val, key)
prop = gtG.new_edge_property(tname) # Create the PropertyMap
gtG.edge_properties[key] = prop # Set the PropertyMap
# Add the key to the already seen properties
eprops.add(key)
# Phase 2: Actually add all the nodes and vertices with their properties
# Add the nodes
vertices = {} # vertex mapping for tracking edges later
for node, data in nxG.nodes_iter(data=True):
# Create the vertex and annotate for our edges later
v = gtG.add_vertex()
vertices[node] = v
# Set the vertex properties, not forgetting the id property
data['id'] = str(node)
for key, value in data.items():
gtG.vp[key][v] = value # vp is short for vertex_properties
# Add the edges
for src, dst, data in nxG.edges_iter(data=True):
# Look up the vertex structs from our vertices mapping and add edge.
e = gtG.add_edge(vertices[src], vertices[dst])
# Add the edge properties
for key, value in data.items():
gtG.ep[key][e] = value # ep is short for edge_properties
# Done, finally!
return gtG
def get_centrality(subg):
# centralities = nx.betweenness_centrality(subg)
# print(centralities)
_g = nx2gt(subg)
centralities, _ = betweenness(_g)
v = centralities.get_array()
mean = float(np.mean(v))
stdev = float(np.std(v))
for node in _g.vertices():
if centralities[node] >= mean + 3*stdev:
repeat_nodes[_g.vertex_properties['id'][node]] = centralities[node]
def centrality_wrapper(graph):
n_comp = nx.number_connected_components(graph)
print('The graph has {} components'.format(n_comp))
for subg in tqdm.tqdm(nx.connected_component_subgraphs(graph), total=n_comp, desc='Component'):
if len(subg.nodes()) >= 50:
get_centrality(subg)
G_copy = G.copy()
print('Writing output...')
ofile = open(args.output,'w')
for i in xrange(3):
centrality_wrapper(G_copy)
for node in tqdm.tqdm(repeat_nodes, desc='Checking repeats'):
if G_copy.has_node(node):
G_copy.remove_node(node)
ofile.write(str(node)+'\t'+str(repeat_nodes[node])+'\n')
#for u,v,data in G_copy.edges(data=True):
# print u +"\t"+data[u][v]['ori'][0]+v+"\t"+data[u][v]['ori'][1]+"\t"+str(data[u][v]["mean"])+"\t"+str(data[u][v]["stdev"])+"\t"+str(data[u][v]["bsize"])
#nx.write_gml(G_copy,args.output)
| 2.328125 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.