max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
apps/cancer_drug_response/data_gen.py | agave233/PaddleHelix | 454 | 12700237 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import numpy as np
import paddle
from pgl.utils.data import Dataset as BaseDataset
from pgl.utils.data import Dataloader
import pgl
from pgl.utils.logger import log
class Dataset(BaseDataset):
"""
Dataset for CDR(cancer drug response)
"""
def __init__(self, processed_data):
self.data = processed_data
self.keys = list(processed_data.keys())
self.num_samples = len(processed_data[self.keys[0]])
def __getitem__(self, idx):
return self.data[self.keys[0]][idx], self.data[self.keys[1]][idx], self.data[self.keys[2]][idx], \
self.data[self.keys[3]][idx], self.data[self.keys[4]][idx]
def get_data_loader(self, batch_size, num_workers=1,
shuffle=False, collate_fn=None):
"""Get dataloader.
Args:
batch_size (int): number of data items in a batch.
num_workers (int): number of parallel workers.
shuffle (int): whether to shuffle yield data.
collate_fn: callable function that processes batch data to a list of paddle tensor.
"""
return Dataloader(
self,
batch_size=batch_size,
num_workers=num_workers,
shuffle=shuffle,
collate_fn=collate_fn)
def __len__(self):
return self.num_samples
def collate_fn(batch_data):
"""
Collation function to distribute data to samples
:param batch_data: batch data
"""
graphs = []
mut, gexpr, met, Y = [], [], [], []
for g, mu, gex, me, y in batch_data:
graphs.append(g)
mut.append(mu)
gexpr.append(gex)
met.append(me)
Y.append(y)
return graphs, mut, gexpr, met, Y
|
pyflux/gpnarx/gpnarx.py | ThomasHoppe/pyflux | 2,091 | 12700243 | import sys
if sys.version_info < (3,):
range = xrange
import numpy as np
import pandas as pd
import scipy.linalg as la
import scipy.sparse as sp
import scipy.stats as ss
from scipy.stats import multivariate_normal
from .. import arma
from .. import output as op
from .. import tests as tst
from .. import tsm as tsm
from .. import data_check as dc
from .kernels import *
class GPNARX(tsm.TSM):
""" Inherits time series methods from TSM class.
**** GAUSSIAN PROCESS NONLINEAR AUTOREGRESSIVE (GP-NARX) MODELS ****
Parameters
----------
data : pd.DataFrame or np.array
Field to specify the time series data that will be used.
ar : int
Field to specify how many AR terms the model will have.
kernel : kernel object
For example, SquaredExponential() or OrnsteinUhlenbeck()
integ : int (default : 0)
Specifies how many time to difference the time series.
target : str (pd.DataFrame) or int (np.array)
Specifies which column name or array index to use. By default, first
column/array will be selected as the dependent variable.
"""
def __init__(self, data, ar, kernel, integ=0, target=None):
# Initialize TSM object
super(GPNARX,self).__init__('GPNARX')
# Latent variables
self.ar = ar
if ar < 1:
raise ValueError('Cannot have less than 1 AR term!')
self.integ = integ
self.max_lag = self.ar
self.model_name = 'GPNARX(' + str(self.ar) + ')'
self._z_hide = 0 # Whether to cutoff variance latent variables from results
self.supported_methods = ["MLE","PML","Laplace","M-H","BBVI"]
self.default_method = "MLE"
self.multivariate_model = False
# Format the data
self.data, self.data_name, self.is_pandas, self.index = dc.data_check(data,target)
self.data_original = self.data.copy()
# Difference data
for order in range(self.integ):
self.data = np.diff(self.data)
self.data_name = "Differenced " + self.data_name
self.index = self.index[self.integ:len(self.index)]
# Apply normalization
self.data_full = self.data.copy()
self.data = np.array(self.data_full[self.max_lag:self.data_full.shape[0]]) # adjust for lags
self._norm_mean = np.mean(self.data)
self._norm_std = np.std(self.data)
self.data = (self.data - self._norm_mean) / self._norm_std
self.data_full = (self.data_full - self._norm_mean) / self._norm_std
self.kernel = kernel
self.kernel.X = self.X().T
# Define latent variables
self._create_latent_variables()
self.neg_loglik = self.full_neg_loglik
def _alpha(self, L):
""" Covariance-derived term to construct expectations. See Rasmussen & Williams.
Parameters
----------
L : np.ndarray
Cholesky triangular
Returns
----------
np.ndarray (alpha)
"""
return la.cho_solve((L.T, True), la.cho_solve((L, True), np.transpose(self.data)))
def _construct_predict(self, beta, h):
""" Creates h-step ahead forecasts for the Gaussian process
Parameters
----------
beta : np.array
Contains untransformed starting values for the latent variables
h: int
How many steps ahead to forecast
Returns
----------
- predictions
- variance of predictions
"""
# Refactor this entire code in future
parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])])
Xstart = self.X().copy()
Xstart = [i for i in Xstart]
predictions = np.zeros(h)
variances = np.zeros(h)
for step in range(0,h):
Xstar = []
for lag in range(0,self.max_lag):
if lag == 0:
if step == 0:
Xstar.append([self.data[-1]])
Xstart[0] = np.append(Xstart[0],self.data[-1])
else:
Xstar.append([predictions[step-1]])
Xstart[0] = np.append(Xstart[0],predictions[step-1])
else:
Xstar.append([Xstart[lag-1][-2]])
Xstart[lag] = np.append(Xstart[lag],Xstart[lag-1][-2])
Kstar = self.kernel.Kstar(parm, np.transpose(np.array(Xstar)))
L = self._L(parm)
alpha = self._alpha(L)
predictions[step] = np.dot(np.transpose(Kstar), alpha)
v = la.cho_solve((L, True), Kstar)
variances[step] = self.kernel.Kstarstar(parm, np.transpose(np.array(Xstar))) - np.dot(v.T, v)
return predictions, variances, predictions - 1.98*np.power(variances,0.5), predictions + 1.98*np.power(variances,0.5)
def _create_latent_variables(self):
""" Creates model latent variables
Returns
----------
None (changes model attributes)
"""
# Create latent variables
for no, i in enumerate(self.kernel.build_latent_variables()):
self.latent_variables.add_z(i[0],i[1],i[2])
self.latent_variables.z_list[no].start = i[3]
self.z_no = len(self.kernel.build_latent_variables())
# Use an ARIMA model to find starting point for the initial noise latent variable
arma_start = arma.ARIMA(self.data, ar=self.ar, ma=0, integ=self.integ)
x = arma_start.fit()
arma_starting_values = arma_start.latent_variables.get_z_values()
self.latent_variables.z_list[0].start = np.log(np.exp(np.power(arma_starting_values[-1],2)))
def _L(self, parm):
""" Creates cholesky decomposition of covariance matrix
Parameters
----------
parm : np.array
Contains transformed latent variables
Returns
----------
The cholesky decomposition (L) of K
"""
return np.linalg.cholesky(self.kernel.K(parm) + np.identity(self.X().shape[1])*parm[0])
def X(self):
""" Creates design matrix of variables to use in GP regression
Returns
----------
The design matrix
"""
if self.ar == 1:
return np.array([self.data_full[(self.max_lag-1):-1]])
else:
for i in range(0,self.ar):
datapoint = self.data_full[(self.max_lag-i-1):-i-1]
if i == 0:
X = datapoint
else:
X = np.vstack((X,datapoint))
return X
def expected_values(self, beta):
""" Expected values of the function given the covariance matrix and hyperparameters
Parameters
----------
beta : np.ndarray
Contains untransformed values for latent variables
Returns
----------
The expected values of the function
"""
parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])])
L = self._L(parm)
alpha = self._alpha(L)
return np.dot(np.transpose(self.kernel.K(parm)), alpha)
def variance_values(self, beta):
""" Covariance matrix for the estimated function
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
Covariance matrix for the estimated function
"""
parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])])
L = self._L(parm)
v = la.cho_solve((L, True), self.kernel.K(parm))
return self.kernel.K(parm) - np.dot(v.T, v)
def full_neg_loglik(self, beta):
""" Creates the negative log marginal likelihood of the model
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
The negative log marginal logliklihood of the model
"""
parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])])
L = self._L(parm)
return -(-0.5*(np.dot(np.transpose(self.data),self._alpha(L))) - np.log(np.diag(L)).sum() - (self.data.shape[0]/2.0)*np.log(2.0*np.pi))
def plot_fit(self, intervals=True, **kwargs):
""" Plots the fit of the Gaussian process model to the data
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
intervals : Boolean
Whether to plot uncertainty intervals or not
Returns
----------
None (plots the fit of the function)
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
date_index = self.index[self.max_lag:]
expectation = self.expected_values(self.latent_variables.get_z_values())
variance = self.variance_values(self.latent_variables.get_z_values())
upper = expectation + 1.98*np.power(np.diag(variance),0.5)
lower = expectation - 1.98*np.power(np.diag(variance),0.5)
plt.figure(figsize=figsize)
plt.subplot(2, 2, 1)
plt.title(self.data_name + " Raw")
plt.plot(date_index,self.data*self._norm_std + self._norm_mean,'k')
plt.subplot(2, 2, 2)
plt.title(self.data_name + " Raw and Expected")
plt.plot(date_index,self.data*self._norm_std + self._norm_mean,'k',alpha=0.2)
plt.plot(date_index,self.expected_values(self.latent_variables.get_z_values())*self._norm_std + self._norm_mean,'b')
plt.subplot(2, 2, 3)
plt.title(self.data_name + " Raw and Expected (with intervals)")
if intervals == True:
plt.fill_between(date_index, lower*self._norm_std + self._norm_mean, upper*self._norm_std + self._norm_mean, alpha=0.2)
plt.plot(date_index,self.data*self._norm_std + self._norm_mean,'k',alpha=0.2)
plt.plot(date_index,self.expected_values(self.latent_variables.get_z_values())*self._norm_std + self._norm_mean,'b')
plt.subplot(2, 2, 4)
plt.title("Expected " + self.data_name + " (with intervals)")
if intervals == True:
plt.fill_between(date_index, lower*self._norm_std + self._norm_mean, upper*self._norm_std + self._norm_mean, alpha=0.2)
plt.plot(date_index,self.expected_values(self.latent_variables.get_z_values())*self._norm_std + self._norm_mean,'b')
plt.show()
def plot_predict(self, h=5, past_values=20, intervals=True,**kwargs):
""" Plots forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
past_values : int (default : 20)
How many past observations to show on the forecast graph?
intervals : Boolean
Would you like to show 95% prediction intervals for the forecast?
Returns
----------
- Plot of the forecast
- Error bars, forecasted_values, plot_values, plot_index
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
predictions, variance, lower, upper = self._construct_predict(self.latent_variables.get_z_values(),h)
full_predictions = np.append(self.data,predictions)
full_lower = np.append(self.data,lower)
full_upper = np.append(self.data,upper)
date_index = self.shift_dates(h)
# Plot values (how far to look back)
plot_values = full_predictions[-h-past_values:]*self._norm_std + self._norm_mean
plot_index = date_index[-h-past_values:]
# Lower and upper intervals
lower = np.append(full_predictions[-h-1],lower)
upper = np.append(full_predictions[-h-1],upper)
plt.figure(figsize=figsize)
if intervals == True:
plt.fill_between(date_index[-h-1:],
lower*self._norm_std + self._norm_mean,
upper*self._norm_std + self._norm_mean,
alpha=0.2)
plt.plot(plot_index,plot_values)
plt.title("Forecast for " + self.data_name)
plt.xlabel("Time")
plt.ylabel(self.data_name)
plt.show()
def predict_is(self, h=5, fit_once=True):
""" Makes dynamic in-sample predictions with the estimated model
Parameters
----------
h : int (default : 5)
How many steps would you like to forecast?
fit_once : boolean
(default: True) Fits only once before the in-sample prediction; if False, fits after every new datapoint
Returns
----------
- pd.DataFrame with predicted values
"""
predictions = []
for t in range(0,h):
x = GPNARX(ar=self.ar,kernel=self.kernel,integ=self.integ,
data=self.data_original[:-h+t])
if fit_once is False:
x.fit(printer=False)
if t == 0:
if fit_once is True:
x.fit(printer=False)
saved_lvs = x.latent_variables
predictions = x.predict(1)
else:
if fit_once is True:
x.latent_variables = saved_lvs
predictions = pd.concat([predictions,x.predict(1)])
predictions.rename(columns={0:self.data_name}, inplace=True)
predictions.index = self.index[-h:]
return predictions
def plot_predict_is(self, h=5, fit_once=True, **kwargs):
""" Plots forecasts with the estimated model against data
(Simulated prediction with data)
Parameters
----------
h : int (default : 5)
How many steps to forecast
fit_once : boolean
(default: True) Fits only once before the in-sample prediction; if False, fits after every new datapoint
Returns
----------
- Plot of the forecast against data
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
plt.figure(figsize=figsize)
date_index = self.index[-h:]
predictions = self.predict_is(h, fit_once=fit_once)
data = self.data[-h:]
plt.plot(date_index,data*self._norm_std + self._norm_mean,label='Data')
plt.plot(date_index,predictions,label='Predictions',c='black')
plt.title(self.data_name)
plt.legend(loc=2)
plt.show()
def predict(self, h=5):
""" Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
Returns
----------
- pd.DataFrame with predicted values
"""
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
predictions, _, _, _ = self._construct_predict(self.latent_variables.get_z_values(),h)
predictions = predictions*self._norm_std + self._norm_mean
date_index = self.shift_dates(h)
result = pd.DataFrame(predictions)
result.rename(columns={0:self.data_name}, inplace=True)
result.index = date_index[-h:]
return result |
tests/tracer/test_context.py | melancholy/dd-trace-py | 308 | 12700249 | import pytest
from ddtrace.context import Context
from ddtrace.span import Span
@pytest.mark.parametrize(
"ctx1,ctx2",
[
(Context(), Context()),
(Context(trace_id=123), Context(trace_id=123)),
(
Context(trace_id=123, span_id=321, dd_origin="synthetics", sampling_priority=2),
Context(trace_id=123, span_id=321, dd_origin="synthetics", sampling_priority=2),
),
],
)
def test_eq(ctx1, ctx2):
assert ctx1 == ctx2
@pytest.mark.parametrize(
"ctx1,ctx2",
[
(Context(), Span(None, "")),
(Context(), None),
(Context(), object()),
(None, Context()),
(Context(), 5),
(5, Context()),
(
Context(trace_id=123, span_id=321, dd_origin="synthetics", sampling_priority=2),
Context(trace_id=1234, span_id=321, dd_origin="synthetics", sampling_priority=2),
),
(
Context(trace_id=123, span_id=321, dd_origin="synthetics", sampling_priority=2),
Context(trace_id=123, span_id=3210, dd_origin="synthetics", sampling_priority=2),
),
(
Context(trace_id=123, span_id=321, dd_origin="synthetics", sampling_priority=2),
Context(trace_id=123, span_id=321, dd_origin="synthetics1", sampling_priority=2),
),
(
Context(trace_id=123, span_id=321, dd_origin="synthetics", sampling_priority=2),
Context(trace_id=123, span_id=321, dd_origin="synthetics", sampling_priority=0),
),
],
)
def test_not_eq(ctx1, ctx2):
assert ctx1 != ctx2
|
test/python/tests/test_primitives.py | bh107/bohrium | 236 | 12700257 | import numpy
from bohrium_api import _info
import util
class test_bh_opcodes:
def init(self):
for op in _info.op.values():
if op["name"] not in ["identity"] and op['elementwise']:
for type_sig in op["type_sig"]:
yield (op, type_sig)
@util.add_bh107_cmd
def test_ufunc(self, arg):
(op, type_sig) = arg
cmd = "R = bh.random.RandomState(42); "
for i, dtype in enumerate(type_sig[1:]):
cmd += "a%d = R.random_of_dtype(shape=10, dtype=np.%s, bohrium=BH); " % (i, dtype)
if op["name"] == "arccosh":
cmd += "a%d += 1;" % i
cmd += "res = M.%s(" % (op["name"])
for i in range(op["nop"]-1):
cmd += "a%d, " % i
cmd = cmd[:-2] + ");"
return cmd
class test_bh_operators:
def init(self):
for op in ['+', '-', '*', '/', '//', '%', '==', '<=', '>=', '!=', '<', '>']:
for dtype in ['float64', 'int64']:
yield (op, dtype)
@util.add_bh107_cmd
def test_arrays(self, arg):
(op, dtype) = arg
cmd = "R = bh.random.RandomState(42); "
cmd += "a1 = R.random_of_dtype(shape=10, dtype=np.%s, bohrium=BH); " % dtype
cmd += "a2 = R.random_of_dtype(shape=10, dtype=np.%s, bohrium=BH) + 1; " % dtype
cmd += "res = a1 %s a2" % op
return cmd
@util.add_bh107_cmd
def test_scalar_rhs(self, arg):
(op, dtype) = arg
cmd = "R = bh.random.RandomState(42); "
cmd += "a1 = R.random_of_dtype(shape=10, dtype=np.%s, bohrium=BH); " % dtype
cmd += "a2 = np.%s(42); " % dtype
cmd += "res = a1 %s a2" % op
return cmd
class test_bh_operators_lhs:
def init(self):
if numpy.__version__ >= "1.13":
for op in ['+', '-', '*', '/', '//', '%', '==', '<=', '>=', '!=', '<', '>']:
for dtype in ['float64', 'int64']:
yield (op, dtype)
else:
print("The version of NumPy is too old (<= 1.13), ignoring test")
@util.add_bh107_cmd
def test_scalar_lhs(self, arg):
(op, dtype) = arg
cmd = "R = bh.random.RandomState(42); "
cmd += "a1 = np.%s(42); " % dtype
cmd += "a2 = R.random_of_dtype(shape=10, dtype=np.%s, bohrium=BH) + 1; " % dtype
cmd += "res = a1 %s a2" % op
return cmd
class test_extra_binary_ops:
def init(self):
for op in ["true_divide", "floor_divide"]:
for dtype in ["float64", "int64", "uint64"]:
yield (op, dtype)
@util.add_bh107_cmd
def test_ufunc(self, arg):
(op, dtype) = arg
cmd = "R = bh.random.RandomState(42); "
cmd += "a0 = R.random_of_dtype(shape=10, dtype=np.%s, bohrium=BH); " % dtype
cmd += "a1 = R.random_of_dtype(shape=10, dtype=np.%s, bohrium=BH); " % dtype
cmd += "res = M.%s(a0, a1)" % op
return cmd
class test_power:
def init(self):
for op in ["power"]:
for dtype in ["float32", "float64"]:
yield (op, dtype)
@util.add_bh107_cmd
def test_ufunc(self, arg):
(op, dtype) = arg
cmd = "R = bh.random.RandomState(42); "
cmd += "a0 = R.random_of_dtype(shape=10, dtype=np.%s, bohrium=BH); " % dtype
cmd += "res = M.%s(a0, 1.42)" % op
return cmd
|
chats/migrations/0006_chatlog_coder.py | horacexd/clist | 166 | 12700259 | <gh_stars>100-1000
# Generated by Django 3.1.8 on 2021-05-23 17:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('true_coders', '0034_auto_20210411_1726'),
('chats', '0005_auto_20210523_1700'),
]
operations = [
migrations.AddField(
model_name='chatlog',
name='coder',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='true_coders.coder'),
),
]
|
venv/lib/python3.8/site-packages/braintree/android_pay_card.py | sakthipriya-07/BuildingConstructionMaterialsSupply | 182 | 12700287 | import braintree
from braintree.resource import Resource
# NEXT_MAJOR_VERSION - rename to GooglePayCard
class AndroidPayCard(Resource):
"""
A class representing Braintree Android Pay card objects.
"""
def __init__(self, gateway, attributes):
Resource.__init__(self, gateway, attributes)
if hasattr(self, 'expired'):
self.is_expired = self.expired
if "subscriptions" in attributes:
self.subscriptions = [braintree.subscription.Subscription(gateway, subscription) for subscription in self.subscriptions]
@property
def expiration_date(self):
return self.expiration_month + "/" + self.expiration_year
@property
def last_4(self):
return self.virtual_card_last_4
@property
def card_type(self):
return self.virtual_card_type
|
interpreter/code/integration.py | choosewhatulike/500lines | 26,185 | 12700322 | <reponame>choosewhatulike/500lines
# A file to test if pyvm works from the command line.
def it_works():
print("Success!")
it_works()
|
Contrib/TranslationToolkit/Convert_Language.py | buckmelanoma/MediaInfo | 743 | 12700347 | <filename>Contrib/TranslationToolkit/Convert_Language.py
# Type: Python 3 script
# Author: <NAME> <<EMAIL>>
# Date: Feb 28, 2020
# Notes: Put "Language.csv" in this folder and run the script.
# It generates "Language_parsed.csv" for translation.
# Download the latest file from "MediaInfo/Source/Resource".
# Date: Mar 1, 2020
# Update: Fix bug in note adding (keyword mode).
import csv
import sys
import os
#################################
######### Configuration #########
# edit the filename if the CSV file is placed elsewhere
language_file = 'Language.csv'
# output filename
output_file = 'Language_parsed.csv'
# enter your language codes here
# it should be one in
# ar;be;bg;ca;cs;da;de;es;eu;fa;fr;gl;gr;hu;it;ja;ko;lt;nl;
# pl;pt;pt-BR;ro;ru;sk;sq;sv;th;tr;uk;zh-CN;zh-HK;zh-TW;hr;hy;ka
# the first one is target language
# the others are reference languages (if any)
# English (en) is the default source language
lang_codes = ['zh-CN', 'ja']
# comments file
comments_file = 'Data_Comments.csv'
# notes file
notes_file = 'Data_Notes.csv'
######### Configuration #########
#################################
csv.register_dialect('MediaInfo', delimiter=';')
if not os.path.exists(language_file):
print('Error: Language.csv file does not exist!')
sys.exit(1)
if not lang_codes:
print('Error: No language code is specified!')
sys.exit(1)
dict_comments = {}
if os.path.exists(comments_file):
with open(comments_file, 'r', encoding='utf_8_sig') as f:
reader = csv.reader(f)
next(reader) # skip header
for row in reader:
# key, type or unit, comment
dict_comments[row[0]] = [row[1], row[2]]
else:
print('Info: Comments file does not exist. Ignoring comments.')
notes = False
dict_notes_key = {}
dict_notes_keyword = {}
dict_notes_key_keyword = {}
if os.path.exists(notes_file):
notes = True
with open(notes_file, 'r', encoding='utf_8_sig') as f:
reader = csv.reader(f)
next(reader) # skip header
for row in reader:
mode = row[1].lower().strip()
if mode == '' or mode == 'keyword':
dict_notes_keyword[row[0].lower().strip()] = row[2]
elif mode == 'key':
dict_notes_key[row[0]] = row[2]
elif mode == 'key_keyword':
dict_notes_key_keyword[row[0].strip()] = row[2]
else:
print('Info: Notes file does not exist. Ignoring notes.')
output_rows = []
with open(language_file, 'r', encoding='utf_8') as f:
reader = csv.reader(f, dialect='MediaInfo')
header = next(reader)
index_lang = []
lang_codes.insert(0, 'en')
for lang_code in lang_codes:
if lang_code not in header:
print(f'Error: Language code "{lang_code}" is not found in the language file!')
sys.exit(1)
index_lang.append(header.index(lang_code))
row_header = ['Key']
row_header.extend(lang_codes)
if dict_comments:
row_header.extend(['Type or Unit', 'Comment'])
if notes:
row_header.append('Notes')
output_rows.append(row_header)
for row in reader:
key = row[0]
row_string = [key]
for index in index_lang:
if row[index].startswith(' :'):
row_string.append(f'"{row[index]}"')
else:
row_string.append(row[index])
if dict_comments:
if key in dict_comments:
row_string.extend(dict_comments[key])
else:
row_string.extend(['', ''])
if notes:
row_notes = []
if key in dict_notes_key:
row_notes.append(dict_notes_key[key])
for key_keyword in dict_notes_key_keyword:
if key_keyword in key:
row_notes.append(dict_notes_key_keyword[key_keyword])
for keyword in dict_notes_keyword:
if keyword in row_string[1].lower():
row_notes.append(dict_notes_keyword[keyword])
row_string.append('; '.join(row_notes))
output_rows.append(row_string)
with open(output_file, 'w', encoding='utf_8_sig', newline='') as f:
writer = csv.writer(f)
writer.writerows(output_rows)
print('Info: Parsing completed!')
|
kpm/commands/deploy.py | ericchiang/kpm | 121 | 12700360 | import kpm.platforms.kubernetes
import kpm.formats
from kpm.commands.command_base import CommandBase, LoadVariables
class DeployCmd(CommandBase):
name = 'deploy'
help_message = "deploy a package on kubernetes"
def __init__(self, options):
super(DeployCmd, self).__init__(options)
self.package = options.package
self.registry_host = options.registry_host
self.shards = options.shards
self.force = options.force
self.dry_run = options.dry_run
self.namespace = options.namespace
self.api_proxy = options.api_proxy
self.version = options.version
self.version_parts = options.version_parts
self.tmpdir = options.tmpdir
self.variables = options.variables
self.target = options.platform
self.format = options.media_type
self.status = None
self._kub = None
@classmethod
def _add_arguments(cls, parser):
cls._add_registryhost_option(parser)
cls._add_mediatype_option(parser, default='kpm')
cls._add_packagename_option(parser)
cls._add_packageversion_option(parser)
parser.add_argument("--tmpdir", default="/tmp/", help="directory used to extract resources")
parser.add_argument("--dry-run", action='store_true', default=False,
help="do not create the resources on kubernetes")
parser.add_argument("--namespace", help="kubernetes namespace", default=None)
parser.add_argument("--api-proxy", help="kubectl proxy url", nargs="?",
const="http://localhost:8001")
parser.add_argument("-x", "--variables", help="variables", default={}, action=LoadVariables)
parser.add_argument("--shards", help=("Shards list/dict/count: eg. --shards=5 ;"
"--shards='[{\"name\": 1, \"name\": 2}]'"),
default=None)
parser.add_argument("--force", action='store_true', default=False,
help="force upgrade, delete and recreate resources")
parser.add_argument("--platform", default=None,
help=("[experimental] target platform to deploy"
"the package: [kubernetes, docker-compose]"))
def kub(self):
if self._kub is None:
self._kub = kpm.formats.kub_factory(self.format, self.package, convert_to=self.target,
endpoint=self.registry_host,
variables=self.variables, namespace=self.namespace,
shards=self.shards, version=self.version_parts)
return self._kub
def _call(self):
self.status = self.kub().deploy(dest=self.tmpdir, force=self.force, dry=self.dry_run,
proxy=self.api_proxy, fmt=self.output)
def _render_dict(self):
return self.status
def _render_console(self):
""" Handled by deploy """
if self.kub().target == "docker-compose":
return self.status
return ''
|
bsp/TC264D/PikaScript/main.py | GorgonMeducer/pikascript | 228 | 12700391 | import PikaStdLib
print('hello PikaScript @TC264') |
recipes/Python/52564_Curried_functions/recipe-52564.py | tdiprima/code | 2,023 | 12700402 | CO_VARARGS = 0x0004
CO_VARKEYWORDS = 0x0008
class Curry:
def __init__(self, f):
self.hasv = f.func_code.co_flags & CO_VARARGS
self.hask = f.func_code.co_flags & CO_VARKEYWORDS
self.defaults = f.func_defaults or ()
self.defnum = len(self.defaults)
self.f = f
self.argnum = f.func_code.co_argcount
self._reset()
def __call__(self, *a, **k):
if k and not self.hask:
raise TypeError, "%s got unexpected keyword argument '%s'" %\
(self.f.__name__, k.popitem()[0])
kargs = self.kargs
args = self.args
kargs.update(k)
totlen = len(args) + len(a)
if totlen > self.argnum:
if not self.hasv:
raise TypeError, "%s takes exactly %d argument%c (%d given)" % (self.f.__name__, self.argnum, ['s',''][self.argnum==1], totlen)
args += a
self._reset()
return self.f(*args, **kargs)
if totlen >= self.argnum - self.defnum:
num_defaults = totlen - defnum
args += a + self.defaults[defnum-num_defaults:]
self._reset()
return self.f(*args, **kargs)
self.args += a
return self
def _reset(self):
self.args, self.kargs = (), {}
|
djangosige/apps/vendas/migrations/0001_initial.py | CTECHSUL/SG | 330 | 12700407 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-06-25 17:50
from __future__ import unicode_literals
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('cadastro', '0001_initial'),
('estoque', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CondicaoPagamento',
fields=[
('id', models.AutoField(auto_created=True,
primary_key=True, serialize=False, verbose_name='ID')),
('descricao', models.CharField(max_length=255)),
('forma', models.CharField(choices=[('01', 'Dinheiro'), ('02', 'Cheque'), ('03', 'Cartão de Crédito'), ('04', 'Cartão de Débito'), ('05', 'Crédito Loja'), (
'10', 'Vale Alimentação'), ('11', 'Vale Refeição'), ('12', 'Vale Presente'), ('13', 'Vale Combustível'), ('99', 'Outros')], default='99', max_length=2)),
('n_parcelas', models.IntegerField()),
('dias_recorrencia', models.IntegerField(default=0)),
('parcela_inicial', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='ItensVenda',
fields=[
('id', models.AutoField(auto_created=True,
primary_key=True, serialize=False, verbose_name='ID')),
('quantidade', models.DecimalField(blank=True, decimal_places=2, max_digits=13,
null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('valor_unit', models.DecimalField(blank=True, decimal_places=2, max_digits=13,
null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('tipo_desconto', models.CharField(blank=True, choices=[
('0', 'Valor'), ('1', 'Percentual')], max_length=1, null=True)),
('desconto', models.DecimalField(blank=True, decimal_places=2, max_digits=13,
null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('subtotal', models.DecimalField(blank=True, decimal_places=2, max_digits=13,
null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('inf_ad_prod', models.CharField(
blank=True, max_length=500, null=True)),
('valor_rateio_frete', models.DecimalField(blank=True, decimal_places=2, max_digits=13,
null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('valor_rateio_despesas', models.DecimalField(blank=True, decimal_places=2, max_digits=13,
null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('valor_rateio_seguro', models.DecimalField(blank=True, decimal_places=2, max_digits=13,
null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('vbc_icms', models.DecimalField(blank=True, decimal_places=2, max_digits=13,
null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('vbc_icms_st', models.DecimalField(blank=True, decimal_places=2, max_digits=13,
null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('vbc_ipi', models.DecimalField(blank=True, decimal_places=2, max_digits=13,
null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('vicms', models.DecimalField(blank=True, decimal_places=2, max_digits=13,
null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('vicms_st', models.DecimalField(blank=True, decimal_places=2, max_digits=13,
null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('vipi', models.DecimalField(blank=True, decimal_places=2, max_digits=13, null=True,
validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('vfcp', models.DecimalField(blank=True, decimal_places=2, max_digits=13, null=True,
validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('vicmsufdest', models.DecimalField(blank=True, decimal_places=2, max_digits=13,
null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('vicmsufremet', models.DecimalField(blank=True, decimal_places=2, max_digits=13,
null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('vicms_deson', models.DecimalField(blank=True, decimal_places=2, max_digits=13,
null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('p_icms', models.DecimalField(blank=True, decimal_places=2, max_digits=5,
null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('p_icmsst', models.DecimalField(blank=True, decimal_places=2, max_digits=5,
null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('p_ipi', models.DecimalField(blank=True, decimal_places=2, max_digits=5, null=True,
validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('vq_bcpis', models.DecimalField(blank=True, decimal_places=2, max_digits=13,
null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('vq_bccofins', models.DecimalField(blank=True, decimal_places=2, max_digits=13,
null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('vpis', models.DecimalField(blank=True, decimal_places=2, max_digits=13, null=True,
validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('vcofins', models.DecimalField(blank=True, decimal_places=2, max_digits=13,
null=True, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('icms_incluido_preco', models.BooleanField(default=False)),
('icmsst_incluido_preco', models.BooleanField(default=False)),
('ipi_incluido_preco', models.BooleanField(default=False)),
('incluir_bc_icms', models.BooleanField(default=False)),
('incluir_bc_icmsst', models.BooleanField(default=False)),
('auto_calcular_impostos', models.BooleanField(default=True)),
('produto', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='venda_produto', to='cadastro.Produto')),
],
),
migrations.CreateModel(
name='Pagamento',
fields=[
('id', models.AutoField(auto_created=True,
primary_key=True, serialize=False, verbose_name='ID')),
('indice_parcela', models.IntegerField()),
('vencimento', models.DateField()),
('valor_parcela', models.DecimalField(decimal_places=2, max_digits=13, validators=[
django.core.validators.MinValueValidator(Decimal('0.00'))])),
],
),
migrations.CreateModel(
name='Venda',
fields=[
('id', models.AutoField(auto_created=True,
primary_key=True, serialize=False, verbose_name='ID')),
('ind_final', models.BooleanField(default=False)),
('mod_frete', models.CharField(choices=[('0', 'Por conta do emitente'), (
'1', 'Por conta do destinatário/remetente'), ('2', 'Por conta de terceiros'), ('9', 'Sem frete')], default='9', max_length=1)),
('movimentar_estoque', models.BooleanField(default=True)),
('data_emissao', models.DateField(blank=True, null=True)),
('vendedor', models.CharField(blank=True, max_length=255, null=True)),
('valor_total', models.DecimalField(decimal_places=2, default=Decimal(
'0.00'), max_digits=13, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('tipo_desconto', models.CharField(choices=[
('0', 'Valor'), ('1', 'Percentual')], default='0', max_length=1)),
('desconto', models.DecimalField(decimal_places=4, default=Decimal(
'0.00'), max_digits=15, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('despesas', models.DecimalField(decimal_places=2, default=Decimal(
'0.00'), max_digits=13, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('frete', models.DecimalField(decimal_places=2, default=Decimal(
'0.00'), max_digits=13, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('seguro', models.DecimalField(decimal_places=2, default=Decimal(
'0.00'), max_digits=13, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('impostos', models.DecimalField(decimal_places=2, default=Decimal(
'0.00'), max_digits=13, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('observacoes', models.CharField(
blank=True, max_length=1055, null=True)),
],
),
migrations.CreateModel(
name='OrcamentoVenda',
fields=[
('venda_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE,
parent_link=True, primary_key=True, serialize=False, to='vendas.Venda')),
('data_vencimento', models.DateField(blank=True, null=True)),
('status', models.CharField(choices=[
('0', 'Aberto'), ('1', 'Baixado'), ('2', 'Cancelado')], default='0', max_length=1)),
],
bases=('vendas.venda',),
),
migrations.CreateModel(
name='PedidoVenda',
fields=[
('venda_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE,
parent_link=True, primary_key=True, serialize=False, to='vendas.Venda')),
('data_entrega', models.DateField(blank=True, null=True)),
('status', models.CharField(choices=[('0', 'Aberto'), ('1', 'Faturado'), (
'2', 'Cancelado'), ('3', 'Importado por XML')], default='0', max_length=1)),
('orcamento', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL,
related_name='orcamento_pedido', to='vendas.OrcamentoVenda')),
],
bases=('vendas.venda',),
),
migrations.AddField(
model_name='venda',
name='cliente',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='venda_cliente', to='cadastro.Cliente'),
),
migrations.AddField(
model_name='venda',
name='cond_pagamento',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL,
related_name='venda_pagamento', to='vendas.CondicaoPagamento'),
),
migrations.AddField(
model_name='venda',
name='local_orig',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE,
related_name='venda_local_estoque', to='estoque.LocalEstoque'),
),
migrations.AddField(
model_name='venda',
name='transportadora',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='venda_transportadora', to='cadastro.Transportadora'),
),
migrations.AddField(
model_name='venda',
name='veiculo',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL,
related_name='venda_veiculo', to='cadastro.Veiculo'),
),
migrations.AddField(
model_name='pagamento',
name='venda_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='parcela_pagamento', to='vendas.Venda'),
),
migrations.AddField(
model_name='itensvenda',
name='venda_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='itens_venda', to='vendas.Venda'),
),
]
|
src/tools/nuscenes-devkit/eval/tracking/mot.py | jie311/TraDeS | 1,284 | 12700414 | <reponame>jie311/TraDeS<filename>src/tools/nuscenes-devkit/eval/tracking/mot.py
"""
nuScenes dev-kit.
Code written by <NAME>, <NAME> and <NAME>, 2019.
This code is based on:
py-motmetrics at:
https://github.com/cheind/py-motmetrics
"""
from collections import OrderedDict
from itertools import count
import motmetrics
import numpy as np
import pandas as pd
class MOTAccumulatorCustom(motmetrics.mot.MOTAccumulator):
def __init__(self):
super().__init__()
@staticmethod
def new_event_dataframe_with_data(indices, events):
"""
Create a new DataFrame filled with data.
This version overwrites the original in MOTAccumulator achieves about 2x speedups.
Params
------
indices: list
list of tuples (frameid, eventid)
events: list
list of events where each event is a list containing
'Type', 'OId', HId', 'D'
"""
idx = pd.MultiIndex.from_tuples(indices, names=['FrameId', 'Event'])
df = pd.DataFrame(events, index=idx, columns=['Type', 'OId', 'HId', 'D'])
return df
@staticmethod
def new_event_dataframe():
""" Create a new DataFrame for event tracking. """
idx = pd.MultiIndex(levels=[[], []], codes=[[], []], names=['FrameId', 'Event'])
cats = pd.Categorical([], categories=['RAW', 'FP', 'MISS', 'SWITCH', 'MATCH'])
df = pd.DataFrame(
OrderedDict([
('Type', pd.Series(cats)), # Type of event. One of FP (false positive), MISS, SWITCH, MATCH
('OId', pd.Series(dtype=object)),
# Object ID or -1 if FP. Using float as missing values will be converted to NaN anyways.
('HId', pd.Series(dtype=object)),
# Hypothesis ID or NaN if MISS. Using float as missing values will be converted to NaN anyways.
('D', pd.Series(dtype=float)), # Distance or NaN when FP or MISS
]),
index=idx
)
return df
@property
def events(self):
if self.dirty_events:
self.cached_events_df = MOTAccumulatorCustom.new_event_dataframe_with_data(self._indices, self._events)
self.dirty_events = False
return self.cached_events_df
@staticmethod
def merge_event_dataframes(dfs, update_frame_indices=True, update_oids=True, update_hids=True,
return_mappings=False):
"""Merge dataframes.
Params
------
dfs : list of pandas.DataFrame or MotAccumulator
A list of event containers to merge
Kwargs
------
update_frame_indices : boolean, optional
Ensure that frame indices are unique in the merged container
update_oids : boolean, unique
Ensure that object ids are unique in the merged container
update_hids : boolean, unique
Ensure that hypothesis ids are unique in the merged container
return_mappings : boolean, unique
Whether or not to return mapping information
Returns
-------
df : pandas.DataFrame
Merged event data frame
"""
mapping_infos = []
new_oid = count()
new_hid = count()
r = MOTAccumulatorCustom.new_event_dataframe()
for df in dfs:
if isinstance(df, MOTAccumulatorCustom):
df = df.events
copy = df.copy()
infos = {}
# Update index
if update_frame_indices:
next_frame_id = max(r.index.get_level_values(0).max() + 1,
r.index.get_level_values(0).unique().shape[0])
if np.isnan(next_frame_id):
next_frame_id = 0
copy.index = copy.index.map(lambda x: (x[0] + next_frame_id, x[1]))
infos['frame_offset'] = next_frame_id
# Update object / hypothesis ids
if update_oids:
oid_map = dict([oid, str(next(new_oid))] for oid in copy['OId'].dropna().unique())
copy['OId'] = copy['OId'].map(lambda x: oid_map[x], na_action='ignore')
infos['oid_map'] = oid_map
if update_hids:
hid_map = dict([hid, str(next(new_hid))] for hid in copy['HId'].dropna().unique())
copy['HId'] = copy['HId'].map(lambda x: hid_map[x], na_action='ignore')
infos['hid_map'] = hid_map
r = r.append(copy)
mapping_infos.append(infos)
if return_mappings:
return r, mapping_infos
else:
return r
|
tools/telemetry/telemetry/core/backends/chrome/websocket_unittest.py | iplo/Chain | 231 | 12700419 | <gh_stars>100-1000
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import socket
import unittest
from telemetry.core.backends.chrome import websocket
class TestWebSocket(unittest.TestCase):
def testExports(self):
self.assertNotEqual(websocket.create_connection, None)
self.assertNotEqual(websocket.WebSocketException, None)
self.assertNotEqual(websocket.WebSocketTimeoutException, None)
def testSockOpts(self):
ws = websocket.create_connection('ws://echo.websocket.org')
self.assertNotEquals(
ws.sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR), 0)
ws = websocket.create_connection(
'ws://echo.websocket.org',
sockopt=[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)])
self.assertNotEquals(
ws.sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR), 0)
self.assertNotEquals(
ws.sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY), 0)
|
terrascript/resource/NetApp/netapp_cloudmanager.py | mjuenema/python-terrascript | 507 | 12700424 | # terrascript/resource/NetApp/netapp_cloudmanager.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:22:08 UTC)
import terrascript
class netapp_cloudmanager_aggregate(terrascript.Resource):
pass
class netapp_cloudmanager_anf_volume(terrascript.Resource):
pass
class netapp_cloudmanager_aws_fsx(terrascript.Resource):
pass
class netapp_cloudmanager_cifs_server(terrascript.Resource):
pass
class netapp_cloudmanager_connector_aws(terrascript.Resource):
pass
class netapp_cloudmanager_connector_azure(terrascript.Resource):
pass
class netapp_cloudmanager_connector_gcp(terrascript.Resource):
pass
class netapp_cloudmanager_cvo_aws(terrascript.Resource):
pass
class netapp_cloudmanager_cvo_azure(terrascript.Resource):
pass
class netapp_cloudmanager_cvo_gcp(terrascript.Resource):
pass
class netapp_cloudmanager_cvs_gcp_volume(terrascript.Resource):
pass
class netapp_cloudmanager_nss_account(terrascript.Resource):
pass
class netapp_cloudmanager_snapmirror(terrascript.Resource):
pass
class netapp_cloudmanager_volume(terrascript.Resource):
pass
__all__ = [
"netapp_cloudmanager_aggregate",
"netapp_cloudmanager_anf_volume",
"netapp_cloudmanager_aws_fsx",
"netapp_cloudmanager_cifs_server",
"netapp_cloudmanager_connector_aws",
"netapp_cloudmanager_connector_azure",
"netapp_cloudmanager_connector_gcp",
"netapp_cloudmanager_cvo_aws",
"netapp_cloudmanager_cvo_azure",
"netapp_cloudmanager_cvo_gcp",
"netapp_cloudmanager_cvs_gcp_volume",
"netapp_cloudmanager_nss_account",
"netapp_cloudmanager_snapmirror",
"netapp_cloudmanager_volume",
]
|
ssseg/modules/models/backbones/fastscnn.py | zhizhangxian/sssegmentation | 411 | 12700443 | '''
Function:
Implementation of FastSCNN
Author:
<NAME>
'''
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from .bricks import BuildNormalization, BuildActivation, DepthwiseSeparableConv2d, InvertedResidual
'''model urls'''
model_urls = {}
'''Pooling Pyramid Module used in PSPNet'''
class PoolingPyramidModule(nn.ModuleList):
def __init__(self, pool_scales, in_channels, out_channels, norm_cfg, act_cfg, align_corners, **kwargs):
super(PoolingPyramidModule, self).__init__()
self.pool_scales = pool_scales
self.in_channels = in_channels
self.out_channels = out_channels
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.align_corners = align_corners
for pool_scale in pool_scales:
self.append(nn.Sequential(
nn.AdaptiveAvgPool2d(pool_scale),
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=False),
BuildNormalization(norm_cfg['type'], (out_channels, norm_cfg['opts'])),
BuildActivation(act_cfg['type'], **act_cfg['opts']),
))
'''forward'''
def forward(self, x):
ppm_outs = []
for ppm in self:
ppm_out = ppm(x)
upsampled_ppm_out = F.interpolate(
input=ppm_out,
size=x.shape[2:],
mode='bilinear',
align_corners=self.align_corners
)
ppm_outs.append(upsampled_ppm_out)
return ppm_outs
'''Learning to downsample module'''
class LearningToDownsample(nn.Module):
def __init__(self, in_channels, dw_channels, out_channels, norm_cfg=None, act_cfg=None, dw_act_cfg=None):
super(LearningToDownsample, self).__init__()
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.dw_act_cfg = dw_act_cfg
dw_channels1, dw_channels2 = dw_channels
self.conv = nn.Sequential(
nn.Conv2d(in_channels, dw_channels1, kernel_size=3, stride=2, padding=1, bias=False),
BuildNormalization(norm_cfg['type'], (dw_channels1, norm_cfg['opts'])),
BuildActivation(act_cfg['type'], **act_cfg['opts']),
)
self.dsconv1 = DepthwiseSeparableConv2d(
in_channels=dw_channels1,
out_channels=dw_channels2,
kernel_size=3,
stride=2,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
dw_act_cfg=self.dw_act_cfg,
)
self.dsconv2 = DepthwiseSeparableConv2d(
in_channels=dw_channels2,
out_channels=out_channels,
kernel_size=3,
stride=2,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
dw_act_cfg=self.dw_act_cfg,
)
'''forward'''
def forward(self, x):
x = self.conv(x)
x = self.dsconv1(x)
x = self.dsconv2(x)
return x
'''Global feature extractor module'''
class GlobalFeatureExtractor(nn.Module):
def __init__(self, in_channels=64, block_channels=(64, 96, 128), out_channels=128, expand_ratio=6, num_blocks=(3, 3, 3), strides=(2, 2, 1),
pool_scales=(1, 2, 3, 6), norm_cfg=None, act_cfg=None, align_corners=False):
super(GlobalFeatureExtractor, self).__init__()
# set attrs
assert len(block_channels) == len(num_blocks) == 3
self.act_cfg = act_cfg
self.norm_cfg = norm_cfg
# define modules
self.bottleneck1 = self.makelayer(in_channels, block_channels[0], num_blocks[0], strides[0], expand_ratio)
self.bottleneck2 = self.makelayer(block_channels[0], block_channels[1], num_blocks[1], strides[1], expand_ratio)
self.bottleneck3 = self.makelayer(block_channels[1], block_channels[2], num_blocks[2], strides[2], expand_ratio)
self.ppm = PoolingPyramidModule(pool_scales, block_channels[2], block_channels[2] // 4, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, align_corners=align_corners)
self.out = nn.Sequential(
nn.Conv2d(block_channels[2] * 2, out_channels, kernel_size=3, stride=1, padding=1, bias=False),
BuildNormalization(norm_cfg['type'], (out_channels, norm_cfg['opts'])),
BuildActivation(act_cfg['type'], **act_cfg['opts']),
)
'''make layer'''
def makelayer(self, in_channels, out_channels, blocks, stride=1, expand_ratio=6):
layers = [
InvertedResidual(in_channels, out_channels, stride, expand_ratio, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
]
for i in range(1, blocks):
layers.append(
InvertedResidual(out_channels, out_channels, 1, expand_ratio, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
)
return nn.Sequential(*layers)
'''forward'''
def forward(self, x):
x = self.bottleneck1(x)
x = self.bottleneck2(x)
x = self.bottleneck3(x)
x = torch.cat([x, *self.ppm(x)], dim=1)
x = self.out(x)
return x
'''Feature fusion module'''
class FeatureFusionModule(nn.Module):
def __init__(self, higher_in_channels, lower_in_channels, out_channels, norm_cfg=None, dwconv_act_cfg=None, conv_act_cfg=None, align_corners=False):
super(FeatureFusionModule, self).__init__()
# set attrs
self.norm_cfg = norm_cfg
self.dwconv_act_cfg = dwconv_act_cfg
self.conv_act_cfg = conv_act_cfg
self.align_corners = align_corners
# define modules
self.dwconv = nn.Sequential(
nn.Conv2d(lower_in_channels, out_channels, kernel_size=3, stride=1, padding=1, groups=out_channels, bias=False),
BuildNormalization(norm_cfg['type'], (out_channels, norm_cfg['opts'])),
BuildActivation(dwconv_act_cfg['type'], **dwconv_act_cfg['opts']),
)
self.conv_lower_res = nn.Sequential(
nn.Conv2d(out_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=False),
BuildNormalization(norm_cfg['type'], (out_channels, norm_cfg['opts'])),
)
self.conv_higher_res = nn.Sequential(
nn.Conv2d(higher_in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=False),
BuildNormalization(norm_cfg['type'], (out_channels, norm_cfg['opts'])),
)
self.act = BuildActivation(conv_act_cfg['type'], **conv_act_cfg['opts'])
'''forward'''
def forward(self, higher_res_feature, lower_res_feature):
lower_res_feature = F.interpolate(lower_res_feature, size=higher_res_feature.size()[2:], mode='bilinear', align_corners=self.align_corners)
lower_res_feature = self.dwconv(lower_res_feature)
lower_res_feature = self.conv_lower_res(lower_res_feature)
higher_res_feature = self.conv_higher_res(higher_res_feature)
out = higher_res_feature + lower_res_feature
return self.act(out)
'''FastSCNN'''
class FastSCNN(nn.Module):
def __init__(self, in_channels=3, downsample_dw_channels=(32, 48), global_in_channels=64, global_block_channels=(64, 96, 128), global_block_strides=(2, 2, 1), global_out_channels=128,
higher_in_channels=64, lower_in_channels=128, fusion_out_channels=128, out_indices=(0, 1, 2), norm_cfg=None, act_cfg=None, align_corners=False, dw_act_cfg=None, **kwargs):
super(FastSCNN, self).__init__()
assert global_in_channels == higher_in_channels, 'Global Input Channels must be the same with Higher Input Channels...'
assert global_out_channels == lower_in_channels, 'Global Output Channels must be the same with Lower Input Channels...'
# set attrs
self.in_channels = in_channels
self.downsample_dw_channels1 = downsample_dw_channels[0]
self.downsample_dw_channels2 = downsample_dw_channels[1]
self.global_in_channels = global_in_channels
self.global_block_channels = global_block_channels
self.global_block_strides = global_block_strides
self.global_out_channels = global_out_channels
self.higher_in_channels = higher_in_channels
self.lower_in_channels = lower_in_channels
self.fusion_out_channels = fusion_out_channels
self.out_indices = out_indices
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.align_corners = align_corners
self.dw_act_cfg = dw_act_cfg
# define modules
self.learning_to_downsample = LearningToDownsample(
in_channels=in_channels,
dw_channels=downsample_dw_channels,
out_channels=global_in_channels,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
dw_act_cfg=self.dw_act_cfg
)
self.global_feature_extractor = GlobalFeatureExtractor(
in_channels=global_in_channels,
block_channels=global_block_channels,
out_channels=global_out_channels,
strides=self.global_block_strides,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
align_corners=self.align_corners,
)
self.feature_fusion = FeatureFusionModule(
higher_in_channels=higher_in_channels,
lower_in_channels=lower_in_channels,
out_channels=fusion_out_channels,
norm_cfg=self.norm_cfg,
dwconv_act_cfg=self.act_cfg,
conv_act_cfg=self.act_cfg,
align_corners=self.align_corners,
)
'''forward'''
def forward(self, x):
higher_res_features = self.learning_to_downsample(x)
lower_res_features = self.global_feature_extractor(higher_res_features)
fusion_output = self.feature_fusion(higher_res_features, lower_res_features)
outs = [higher_res_features, lower_res_features, fusion_output]
outs = [outs[i] for i in self.out_indices]
return tuple(outs)
'''build fastscnn'''
def BuildFastSCNN(fastscnn_type=None, **kwargs):
# assert whether support
assert fastscnn_type is None
# parse args
default_args = {
'in_channels': 3,
'downsample_dw_channels': (32, 48),
'global_in_channels': 64,
'global_block_channels': (64, 96, 128),
'global_block_strides': (2, 2, 1),
'global_out_channels': 128,
'higher_in_channels': 64,
'lower_in_channels': 128,
'fusion_out_channels': 128,
'out_indices': (0, 1, 2),
'norm_cfg': None,
'act_cfg': {'type': 'relu', 'opts': {'inplace': True}},
'align_corners': False,
'dw_act_cfg': {'type': 'relu', 'opts': {'inplace': True}},
'pretrained': False,
'pretrained_model_path': '',
}
for key, value in kwargs.items():
if key in default_args: default_args.update({key: value})
# obtain args for instanced fastscnn
fastscnn_args = default_args.copy()
# obtain the instanced fastscnn
model = FastSCNN(**fastscnn_args)
# load weights of pretrained model
if default_args['pretrained'] and os.path.exists(default_args['pretrained_model_path']):
checkpoint = torch.load(default_args['pretrained_model_path'])
if 'state_dict' in checkpoint: state_dict = checkpoint['state_dict']
else: state_dict = checkpoint
model.load_state_dict(state_dict, strict=False)
elif default_args['pretrained']:
checkpoint = model_zoo.load_url(model_urls[fastscnn_type])
if 'state_dict' in checkpoint: state_dict = checkpoint['state_dict']
else: state_dict = checkpoint
model.load_state_dict(state_dict, strict=False)
# return the model
return model |
examples/plugins/js-beautify-1.7.5/python/cssbeautifier/__init__.py | ErMandeep/keditor | 676 | 12700454 | <filename>examples/plugins/js-beautify-1.7.5/python/cssbeautifier/__init__.py
#
# The MIT License (MIT)
# Copyright (c) 2007-2017 <NAME>, <NAME>, and contributors.
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import sys
import re
import copy
from jsbeautifier.__version__ import __version__
from cssbeautifier.css.options import BeautifierOptions
from cssbeautifier.css.beautifier import Beautifier
def default_options():
return BeautifierOptions()
def beautify(string, opts=default_options()):
b = Beautifier(string, opts)
return b.beautify()
def beautify_file(file_name, opts=default_options()):
if file_name == '-': # stdin
stream = sys.stdin
else:
stream = open(file_name)
content = ''.join(stream.readlines())
b = Beautifier(content, opts)
return b.beautify()
def usage(stream=sys.stdout):
print("cssbeautifier.py@" + __version__ + """
CSS beautifier (http://jsbeautifier.org/)
""", file=stream)
if stream == sys.stderr:
return 1
else:
return 0
|
examples/tutorials/lesson3/dqn/algorithm.py | jkren6/PARL | 3,172 | 12700458 | <filename>examples/tutorials/lesson3/dqn/algorithm.py<gh_stars>1000+
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#-*- coding: utf-8 -*-
import copy
import paddle.fluid as fluid
import parl
from parl import layers
class DQN(parl.Algorithm):
def __init__(self, model, act_dim=None, gamma=None, lr=None):
""" DQN algorithm
Args:
model (parl.Model): 定义Q函数的前向网络结构
act_dim (int): action空间的维度,即有几个action
gamma (float): reward的衰减因子
lr (float): learning_rate,学习率.
"""
self.model = model
self.target_model = copy.deepcopy(model)
assert isinstance(act_dim, int)
assert isinstance(gamma, float)
assert isinstance(lr, float)
self.act_dim = act_dim
self.gamma = gamma
self.lr = lr
def predict(self, obs):
""" 使用self.model的value网络来获取 [Q(s,a1),Q(s,a2),...]
"""
return self.model.value(obs)
def learn(self, obs, action, reward, next_obs, terminal):
""" 使用DQN算法更新self.model的value网络
"""
# 从target_model中获取 max Q' 的值,用于计算target_Q
next_pred_value = self.target_model.value(next_obs)
best_v = layers.reduce_max(next_pred_value, dim=1)
best_v.stop_gradient = True # 阻止梯度传递
terminal = layers.cast(terminal, dtype='float32')
target = reward + (1.0 - terminal) * self.gamma * best_v
pred_value = self.model.value(obs) # 获取Q预测值
# 将action转onehot向量,比如:3 => [0,0,0,1,0]
action_onehot = layers.one_hot(action, self.act_dim)
action_onehot = layers.cast(action_onehot, dtype='float32')
# 下面一行是逐元素相乘,拿到action对应的 Q(s,a)
# 比如:pred_value = [[2.3, 5.7, 1.2, 3.9, 1.4]], action_onehot = [[0,0,0,1,0]]
# ==> pred_action_value = [[3.9]]
pred_action_value = layers.reduce_sum(
layers.elementwise_mul(action_onehot, pred_value), dim=1)
# 计算 Q(s,a) 与 target_Q的均方差,得到loss
cost = layers.square_error_cost(pred_action_value, target)
cost = layers.reduce_mean(cost)
optimizer = fluid.optimizer.Adam(learning_rate=self.lr) # 使用Adam优化器
optimizer.minimize(cost)
return cost
def sync_target(self):
""" 把 self.model 的模型参数值同步到 self.target_model
"""
self.model.sync_weights_to(self.target_model)
|
forge/blade/hook/__init__.py | jarbus/neural-mmo | 1,450 | 12700476 | <reponame>jarbus/neural-mmo<gh_stars>1000+
from .modules import modules
|
pyhealth/utils/utility.py | pyvonpyton/PyHealth | 485 | 12700505 | # -*- coding: utf-8 -*-
"""A set of utility functions to support outlier detection.
"""
# Author: <NAME> <<EMAIL>>
# License: BSD 2 clause
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import pandas as pd
from numpy import percentile
import numbers
import sklearn
from sklearn.metrics import precision_score
from sklearn.preprocessing import StandardScaler
from sklearn.utils import column_or_1d
from sklearn.utils import check_array
from sklearn.utils import check_consistent_length
from sklearn.utils import check_random_state
from sklearn.utils.random import sample_without_replacement
MAX_INT = np.iinfo(np.int32).max
MIN_INT = -1 * MAX_INT
def make_dirs_if_not_exists(save_dir):
# make saving directory if needed
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
def read_csv_to_df(file_loc, header_lower=True, usecols=None, dtype=None,
low_memory=True, encoding=None):
"""Read in csv files with necessary processing
Parameters
----------
file_loc
header_lower
low_memory
Returns
-------
"""
if dtype != None:
df = pd.read_csv(file_loc, usecols=usecols, dtype=dtype,
low_memory=low_memory, encoding=encoding)
else:
df = pd.read_csv(file_loc, usecols=usecols, low_memory=low_memory,
encoding=encoding)
if header_lower:
df.columns = df.columns.str.lower()
return df
def read_excel_to_df(file_loc, header_lower=True, usecols=None, dtype=None,
low_memory=True, encoding=None):
"""Read in excel files with necessary processing
Parameters
----------
file_loc
header_lower
low_memory
Returns
-------
"""
if dtype != None:
df = pd.read_excel(file_loc, usecols=usecols, dtype=dtype,
low_memory=low_memory, encoding=encoding)
else:
df = pd.read_excel(file_loc, usecols=usecols, low_memory=low_memory,
encoding=encoding)
if header_lower:
df.columns = df.columns.str.lower()
return df
def check_parameter(param, low=MIN_INT, high=MAX_INT, param_name='',
include_left=False, include_right=False):
"""Check if an input is within the defined range.
Parameters
----------
param : int, float
The input parameter to check.
low : int, float
The lower bound of the range.
high : int, float
The higher bound of the range.
param_name : str, optional (default='')
The name of the parameter.
include_left : bool, optional (default=False)
Whether includes the lower bound (lower bound <=).
include_right : bool, optional (default=False)
Whether includes the higher bound (<= higher bound).
Returns
-------
within_range : bool or raise errors
Whether the parameter is within the range of (low, high)
"""
# param, low and high should all be numerical
if not isinstance(param, (numbers.Integral, np.integer, np.float)):
raise TypeError('{param_name} is set to {param} Not numerical'.format(
param=param, param_name=param_name))
if not isinstance(low, (numbers.Integral, np.integer, np.float)):
raise TypeError('low is set to {low}. Not numerical'.format(low=low))
if not isinstance(high, (numbers.Integral, np.integer, np.float)):
raise TypeError('high is set to {high}. Not numerical'.format(
high=high))
# at least one of the bounds should be specified
if low is MIN_INT and high is MAX_INT:
raise ValueError('Neither low nor high bounds is undefined')
# if wrong bound values are used
if low > high:
raise ValueError(
'Lower bound > Higher bound')
# value check under different bound conditions
if (include_left and include_right) and (param < low or param > high):
raise ValueError(
'{param_name} is set to {param}. '
'Not in the range of [{low}, {high}].'.format(
param=param, low=low, high=high, param_name=param_name))
elif (include_left and not include_right) and (
param < low or param >= high):
raise ValueError(
'{param_name} is set to {param}. '
'Not in the range of [{low}, {high}).'.format(
param=param, low=low, high=high, param_name=param_name))
elif (not include_left and include_right) and (
param <= low or param > high):
raise ValueError(
'{param_name} is set to {param}. '
'Not in the range of ({low}, {high}].'.format(
param=param, low=low, high=high, param_name=param_name))
elif (not include_left and not include_right) and (
param <= low or param >= high):
raise ValueError(
'{param_name} is set to {param}. '
'Not in the range of ({low}, {high}).'.format(
param=param, low=low, high=high, param_name=param_name))
else:
return True
|
examples/kde/kde_example_1d.py | michaelnowotny/cocos | 101 | 12700518 | <filename>examples/kde/kde_example_1d.py
from contexttimer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as ss
import cocos.numerics as cn
import cocos.device as cd
from cocos.scientific.kde import gaussian_kde
n = 10000 # number of data points
grid_size = n+1 # number of points at which to evaluate the kde
R = 10 # number of repetitions for performance benchmark
if __name__ == '__main__':
# generate random sample
points = np.random.randn(n)
# generate grid at which to evaluate the sample
grid = np.linspace(-5.0, 5.0, grid_size)
# construct and evaluate scipy gaussian kde object
gaussian_kde_scipy = ss.kde.gaussian_kde(points)
density_estimate_scipy = gaussian_kde_scipy.evaluate(grid)
# construct and evaluate cocos gaussian kde object using gpu evaluation
gaussian_kde_cocos = gaussian_kde(cn.array(points), gpu=True)
density_estimate_cocos = np.array(gaussian_kde_cocos.evaluate(grid))
# verify that results are numerically close
print(f'maximum absolute difference between results gpu using Cocos and cpu using SciPy: '
f'{np.max(abs(density_estimate_cocos - density_estimate_scipy))}')
if np.allclose(density_estimate_cocos, density_estimate_scipy):
print('estimates from cocos and scipy are numerically close')
else:
print('estimates from cocos and scipy deviate by more than the default tolerance')
# plot kernel density estimates
plt.plot(grid, density_estimate_cocos, label='gaussian kernel density estimated using Cocos')
plt.plot(grid, density_estimate_scipy, label='gaussian kernel density estimated using SciPy')
plt.legend(loc=1)
plt.show()
# run benchmark comparing cpu performance using SciPy with gpu performance using Cocos
with Timer() as scipy_timer:
for _ in range(R):
gaussian_kde_scipy.evaluate(grid)
print(f'Time to evaluate gaussian kde on cpu using scipy was {scipy_timer.elapsed / R} seconds')
with Timer() as cocos_timer:
for _ in range(R):
gaussian_kde_cocos.evaluate(grid)
cd.sync()
print(f'Time to evaluate gaussian kde on gpu using cocos was {cocos_timer.elapsed / R} seconds')
print(f'speedup on gpu is {scipy_timer.elapsed/cocos_timer.elapsed}')
|
toolbox/pcl_library.py | zekunhao1995/DualSDF | 107 | 12700531 | import numpy as np
def calc_area(vertex):
vec_a = vertex[:,1] - vertex[:,0]
vec_b = vertex[:,2] - vertex[:,0]
normal = np.cross(vec_a, vec_b)
area = np.absolute(np.linalg.norm(normal, ord=2, axis=1))*0.5
return area
def uniform_sample_on_triangle(triangle):
while True:
rn = np.random.rand(2)
if np.sum(rn) <= 1.0:
break
return rn[0]*(triangle[1]-triangle[0]) + rn[1]*(triangle[2]-triangle[0]) + triangle[0]
# mesh
def mesh2pcl(triangle_collection, numpoints):
area_collection = calc_area(triangle_collection)
total_area = np.sum(area_collection)
print("Triangle count: {}".format(triangle_collection.shape[0]))
#print("Total surface area: {}".format(total_area))
area_collection /= total_area
# sample k points
# note that this will give an error if self.area_collection.shape[0] = 0 (implies empty shape)
sampled_triangles = np.random.choice(area_collection.shape[0], size=numpoints, p=area_collection)
# Sample one random uvs on each triangle
rand_uv = np.random.rand(numpoints, 2)
oob_idx = np.sum(rand_uv, axis=-1) > 1.0
rand_uv[oob_idx,:] = -rand_uv[oob_idx,:] + 1.0
sampled_triangle_collection = triangle_collection[sampled_triangles,:,:]
sampled_points = rand_uv[:,[0]] * (sampled_triangle_collection[:,1,:] - sampled_triangle_collection[:,0,:]) \
+ rand_uv[:,[1]] * (sampled_triangle_collection[:,2,:] - sampled_triangle_collection[:,0,:]) \
+ sampled_triangle_collection[:,0,:]
return sampled_points.astype(np.float32)
|
distribute/distributed_server.py | bupticybee/icyChessZero | 272 | 12700532 | import tornado.ioloop
import tornado.web
import argparse
import os
import sys
currentpath = os.path.dirname(os.path.realpath(__file__))
project_basedir = os.path.join(currentpath,'..')
sys.path.append(project_basedir)
from config import conf
datadir = conf.distributed_datadir
parser = argparse.ArgumentParser(description="mcts self play script")
parser.add_argument('--verbose', '-v', help='verbose mode',type=bool,default=False)
parser.add_argument('--datadir', '-d' ,type=str,help="data dir to store chess plays",default=datadir)
args = parser.parse_args()
datadir = args.datadir
class TestHandler(tornado.web.RequestHandler):
def get(self):
self.write("OK")
class ChessSubmitHandler(tornado.web.RequestHandler):
def post(self):
name = self.get_argument("name")
content = self.get_argument("content")
print("receive {}".format(name))
if args.verbose == True:
print(name,content)
with open(os.path.join(datadir,name),'w',encoding='utf-8') as whdl:
whdl.write(content)
self.write("OK")
class BestWeightNameHandler(tornado.web.RequestHandler):
def get(self):
filelist = os.listdir(conf.distributed_server_weight_dir)
filelist = [i[:-6] for i in filelist if '.index' in i and conf.noup_flag not in i]
self.write(sorted(filelist)[-1])
class ModelGetHandler(tornado.web.RequestHandler):
def get(self):
name = self.get_argument("name")
model_f = self.get_argument("model_f")
file_name = os.path.join(conf.distributed_server_weight_dir,"{}.{}".format(name,model_f))
self.set_header("Content-Type",'application/octet-stream')
self.set_header('Content-Disposition','attachment; filename={}'.format("{}.{}".format(name,model_f)))
with open(file_name,'rb') as f:
while True:
data = f.read(1024)
if not data:
break;
self.write(data)
self.finish()
def make_app():
return tornado.web.Application([
(r"/test", TestHandler),
(r"/submit_chess", ChessSubmitHandler),
(r"/best_weight", BestWeightNameHandler),
(r"/model_get", ModelGetHandler),
])
if __name__ == "__main__":
app = make_app()
app.listen(conf.port)
tornado.ioloop.IOLoop.current().start()
|
tests/terraform/checks/resource/azure/test_SynapseWorkspaceEnablesManagedVirtualNetworks.py | jamesholland-uk/checkov | 4,013 | 12700553 | <reponame>jamesholland-uk/checkov
import unittest
import hcl2
from checkov.terraform.checks.resource.azure.SynapseWorkspaceEnablesManagedVirtualNetworks import check
from checkov.common.models.enums import CheckResult
class TestSynapseWorkspaceEnablesManagedVirtualNetworks(unittest.TestCase):
def test_failure_1(self):
hcl_res = hcl2.loads("""
resource "azurerm_synapse_workspace" "example" {
name = "example"
resource_group_name = azurerm_resource_group.example.name
location = azurerm_resource_group.example.location
storage_data_lake_gen2_filesystem_id = azurerm_storage_data_lake_gen2_filesystem.example.id
sql_administrator_login = "sqladminuser"
sql_administrator_login_password = "<PASSWORD>!"
managed_virtual_network_enabled = false
aad_admin {
login = "<NAME>"
object_id = "00000000-0000-0000-0000-000000000000"
tenant_id = "00000000-0000-0000-0000-000000000000"
}
tags = {
Env = "production"
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_synapse_workspace']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_failure_2(self):
hcl_res = hcl2.loads("""
resource "azurerm_synapse_workspace" "example" {
name = "example"
resource_group_name = azurerm_resource_group.example.name
location = azurerm_resource_group.example.location
storage_data_lake_gen2_filesystem_id = azurerm_storage_data_lake_gen2_filesystem.example.id
sql_administrator_login = "sqladminuser"
sql_administrator_login_password = "<PASSWORD>!"
aad_admin {
login = "AzureAD Admin"
object_id = "00000000-0000-0000-0000-000000000000"
tenant_id = "00000000-0000-0000-0000-000000000000"
}
tags = {
Env = "production"
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_synapse_workspace']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
hcl_res = hcl2.loads("""
resource "azurerm_synapse_workspace" "example" {
name = "example"
resource_group_name = azurerm_resource_group.example.name
location = azurerm_resource_group.example.location
storage_data_lake_gen2_filesystem_id = azurerm_storage_data_lake_gen2_filesystem.example.id
sql_administrator_login = "sqladminuser"
sql_administrator_login_password = "<PASSWORD>!"
managed_virtual_network_enabled = true
aad_admin {
login = "AzureAD Admin"
object_id = "00000000-0000-0000-0000-000000000000"
tenant_id = "00000000-0000-0000-0000-000000000000"
}
tags = {
Env = "production"
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_synapse_workspace']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
|
tests/layer_tests/tensorflow_tests/test_tf_ELU.py | monroid/openvino | 2,406 | 12700558 | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import pytest
from common.layer_test_class import check_ir_version
from common.tf_layer_test_class import CommonTFLayerTest
from unit_tests.utils.graph import build_graph
class TestELU(CommonTFLayerTest):
def create_elu_net(self, shape, ir_version):
"""
Tensorflow net IR net
Input->ELU => Input->ELU
"""
#
# Create Tensorflow model
#
import tensorflow as tf
tf.compat.v1.reset_default_graph()
# Create the graph and model
with tf.compat.v1.Session() as sess:
shapes = shape.copy()
# reshaping
if len(shapes) >= 4:
shapes.append(shapes.pop(1))
input = tf.compat.v1.placeholder(tf.float32, shapes, 'Input')
tf.nn.elu(input, name='Operation')
tf.compat.v1.global_variables_initializer()
tf_net = sess.graph_def
#
# Create reference IR net
# Please, specify 'type': 'Input' for input node
# Moreover, do not forget to validate ALL layer attributes!!!
#
ref_net = None
if check_ir_version(10, None, ir_version):
nodes_attributes = {
'input': {'kind': 'op', 'type': 'Parameter'},
'input_data': {'shape': shape, 'kind': 'data'},
'ELU': {'kind': 'op', 'type': 'Elu'},
'ELU_data': {'shape': shape, 'kind': 'data'},
'result': {'kind': 'op', 'type': 'Result'}
}
ref_net = build_graph(nodes_attributes,
[('input', 'input_data'),
('input_data', 'ELU'),
('ELU', 'ELU_data'),
('ELU_data', 'result')
])
return tf_net, ref_net
test_data_precommit = [dict(shape=[4, 6, 8, 10, 12])]
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_elu_precommit(self, params, ie_device, precision, ir_version, temp_dir):
if ie_device == 'GPU':
pytest.skip("5D tensors is not supported on GPU")
self._test(*self.create_elu_net(**params, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
test_data = [dict(shape=[10, 12]),
dict(shape=[8, 10, 12]),
dict(shape=[6, 8, 10, 12]),
dict(shape=[4, 6, 8, 10, 12])]
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_elu(self, params, ie_device, precision, ir_version, temp_dir):
if ie_device == 'GPU':
pytest.skip("5D tensors is not supported on GPU")
self._test(*self.create_elu_net(**params, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
|
parsimonious/tests/test_nodes.py | andreabenini/parsimonious | 1,253 | 12700578 | # -*- coding: utf-8 -*-
from nose import SkipTest
from nose.tools import eq_, ok_, assert_raises, assert_in
from parsimonious import Grammar, NodeVisitor, VisitationError, rule
from parsimonious.expressions import Literal
from parsimonious.nodes import Node
class HtmlFormatter(NodeVisitor):
"""Visitor that turns a parse tree into HTML fragments"""
grammar = Grammar("""bold_open = '(('""") # just partial
def visit_bold_open(self, node, visited_children):
return '<b>'
def visit_bold_close(self, node, visited_children):
return '</b>'
def visit_text(self, node, visited_children):
"""Return the text verbatim."""
return node.text
def visit_bold_text(self, node, visited_children):
return ''.join(visited_children)
class ExplosiveFormatter(NodeVisitor):
"""Visitor which raises exceptions"""
def visit_boom(self, node, visited_children):
raise ValueError
def test_visitor():
"""Assert a tree gets visited correctly."""
grammar = Grammar(r'''
bold_text = bold_open text bold_close
text = ~'[a-zA-Z 0-9]*'
bold_open = '(('
bold_close = '))'
''')
text = '((o hai))'
tree = Node(grammar['bold_text'], text, 0, 9,
[Node(grammar['bold_open'], text, 0, 2),
Node(grammar['text'], text, 2, 7),
Node(grammar['bold_close'], text, 7, 9)])
eq_(grammar.parse(text), tree)
result = HtmlFormatter().visit(tree)
eq_(result, '<b>o hai</b>')
def test_visitation_exception():
assert_raises(VisitationError,
ExplosiveFormatter().visit,
Node(Literal(''), '', 0, 0))
def test_str():
"""Test str and unicode of ``Node``."""
n = Node(Literal('something', name='text'), 'o hai', 0, 5)
good = '<Node called "text" matching "o hai">'
eq_(str(n), good)
def test_repr():
"""Test repr of ``Node``."""
s = u'hai ö'
boogie = u'böogie'
n = Node(Literal(boogie), s, 0, 3, children=[
Node(Literal(' '), s, 3, 4), Node(Literal(u'ö'), s, 4, 5)])
eq_(repr(n),
str("""s = {hai_o}\nNode({boogie}, s, 0, 3, children=[Node({space}, s, 3, 4), Node({o}, s, 4, 5)])""").format(
hai_o=repr(s),
boogie=repr(Literal(boogie)),
space=repr(Literal(" ")),
o=repr(Literal(u"ö")),
)
)
def test_parse_shortcut():
"""Exercise the simple case in which the visitor takes care of parsing."""
eq_(HtmlFormatter().parse('(('), '<b>')
def test_match_shortcut():
"""Exercise the simple case in which the visitor takes care of matching."""
eq_(HtmlFormatter().match('((other things'), '<b>')
class CoupledFormatter(NodeVisitor):
@rule('bold_open text bold_close')
def visit_bold_text(self, node, visited_children):
return ''.join(visited_children)
@rule('"(("')
def visit_bold_open(self, node, visited_children):
return '<b>'
@rule('"))"')
def visit_bold_close(self, node, visited_children):
return '</b>'
@rule('~"[a-zA-Z 0-9]*"')
def visit_text(self, node, visited_children):
"""Return the text verbatim."""
return node.text
def test_rule_decorator():
"""Make sure the @rule decorator works."""
eq_(CoupledFormatter().parse('((hi))'), '<b>hi</b>')
def test_rule_decorator_subclassing():
"""Make sure we can subclass and override visitor methods without blowing
away the rules attached to them."""
class OverridingFormatter(CoupledFormatter):
def visit_text(self, node, visited_children):
"""Return the text capitalized."""
return node.text.upper()
@rule('"not used"')
def visit_useless(self, node, visited_children):
"""Get in the way. Tempt the metaclass to pave over the
superclass's grammar with a new one."""
raise SkipTest("I haven't got around to making this work yet.")
eq_(OverridingFormatter().parse('((hi))'), '<b>HI</b>')
class PrimalScream(Exception):
pass
def test_unwrapped_exceptions():
class Screamer(NodeVisitor):
grammar = Grammar("""greeting = 'howdy'""")
unwrapped_exceptions = (PrimalScream,)
def visit_greeting(self, thing, visited_children):
raise PrimalScream('This should percolate up!')
assert_raises(PrimalScream, Screamer().parse, 'howdy')
def test_node_inequality():
node = Node(Literal('12345'), 'o hai', 0, 5)
ok_(node != 5)
ok_(node != None)
ok_(node != Node(Literal('23456'), 'o hai', 0, 5))
ok_(not (node != Node(Literal('12345'), 'o hai', 0, 5)))
def test_generic_visit_NotImplementedError_unnamed_node():
"""
Test that generic_visit provides informative error messages
when visitors are not defined.
Regression test for https://github.com/erikrose/parsimonious/issues/110
"""
class MyVisitor(NodeVisitor):
grammar = Grammar(r'''
bar = "b" "a" "r"
''')
unwrapped_exceptions = (NotImplementedError, )
with assert_raises(NotImplementedError) as e:
MyVisitor().parse('bar')
assert_in("No visitor method was defined for this expression: 'b'", str(e.exception))
def test_generic_visit_NotImplementedError_named_node():
"""
Test that generic_visit provides informative error messages
when visitors are not defined.
"""
class MyVisitor(NodeVisitor):
grammar = Grammar(r'''
bar = myrule myrule myrule
myrule = ~"[bar]"
''')
unwrapped_exceptions = (NotImplementedError, )
with assert_raises(NotImplementedError) as e:
MyVisitor().parse('bar')
assert_in("No visitor method was defined for this expression: myrule = ~'[bar]'", str(e.exception))
|
TauAnalysis/MCEmbeddingTools/python/DYToMuMuGenFilter_cfi.py | ckamtsikis/cmssw | 852 | 12700608 | <filename>TauAnalysis/MCEmbeddingTools/python/DYToMuMuGenFilter_cfi.py
import FWCore.ParameterSet.Config as cms
dYToMuMuGenFilter = cms.EDFilter("DYToMuMuGenFilter",
inputTag = cms.InputTag("prunedGenParticles"))
|
models/vision/detection/awsdet/utils/image/transforms/__init__.py | piyushghai/deep-learning-models | 129 | 12700615 | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# -*- coding: utf-8 -*-
from .colorspace import (bgr2gray, bgr2hls, bgr2hsv, bgr2rgb, gray2bgr,
gray2rgb, hls2bgr, hsv2bgr, iminvert, posterize,
rgb2bgr, rgb2gray, solarize)
__all__ = [
'solarize', 'posterize', 'bgr2gray', 'rgb2gray', 'gray2bgr', 'gray2rgb',
'bgr2rgb', 'rgb2bgr', 'bgr2hsv', 'hsv2bgr', 'bgr2hls', 'hls2bgr',]
|
python-sdk/nuimages/utils/test_nuimages.py | bjajoh/nuscenes-devkit | 1,284 | 12700628 | # nuScenes dev-kit.
# Code written by <NAME>, 2020.
import os
import unittest
from nuimages import NuImages
class TestNuImages(unittest.TestCase):
def test_load(self):
"""
Loads up NuImages.
This is intended to simply run the NuImages class to check for import errors, typos, etc.
"""
assert 'NUIMAGES' in os.environ, 'Set NUIMAGES env. variable to enable tests.'
nuim = NuImages(version='v1.0-mini', dataroot=os.environ['NUIMAGES'], verbose=False)
# Trivial assert statement
self.assertEqual(nuim.table_root, os.path.join(os.environ['NUIMAGES'], 'v1.0-mini'))
if __name__ == '__main__':
unittest.main()
|
deer/agent.py | jhardy0/deer | 373 | 12700635 | """
This module contains classes used to define the standard behavior of the agent.
It relies on the controllers, the chosen training/test policy and the learning algorithm
to specify its behavior in the environment.
"""
import os
import numpy as np
import copy
import sys
import joblib
from warnings import warn
from .experiment import base_controllers as controllers
from .helper import tree
from deer.policies import EpsilonGreedyPolicy
class NeuralAgent(object):
"""The NeuralAgent class wraps a learning algorithm (such as a deep Q-network) for training and testing in a given environment.
Attach controllers to it in order to conduct an experiment (when to train the agent, when to test,...).
Parameters
-----------
environment : object from class Environment
The environment in which the agent interacts
learning_algo : object from class LearningAlgo
The learning algorithm associated to the agent
replay_memory_size : int
Size of the replay memory. Default : 1000000
replay_start_size : int
Number of observations (=number of time steps taken) in the replay memory before starting learning.
Default: minimum possible according to environment.inputDimensions().
batch_size : int
Number of tuples taken into account for each iteration of gradient descent. Default : 32
random_state : numpy random number generator
Default : random seed.
exp_priority : float
The exponent that determines how much prioritization is used, default is 0 (uniform priority).
One may check out Schaul et al. (2016) - Prioritized Experience Replay.
train_policy : object from class Policy
Policy followed when in training mode (mode -1)
test_policy : object from class Policy
Policy followed when in other modes than training (validation and test modes)
only_full_history : boolean
Whether we wish to train the neural network only on full histories or we wish to fill with zeroes the
observations before the beginning of the episode
"""
def __init__(self, environment, learning_algo, replay_memory_size=1000000, replay_start_size=None, batch_size=32, random_state=np.random.RandomState(), exp_priority=0, train_policy=None, test_policy=None, only_full_history=True):
inputDims = environment.inputDimensions()
if replay_start_size == None:
replay_start_size = max(inputDims[i][0] for i in range(len(inputDims)))
elif replay_start_size < max(inputDims[i][0] for i in range(len(inputDims))) :
raise AgentError("Replay_start_size should be greater than the biggest history of a state.")
self._controllers = []
self._environment = environment
self._learning_algo = learning_algo
self._replay_memory_size = replay_memory_size
self._replay_start_size = replay_start_size
self._batch_size = batch_size
self._random_state = random_state
self._exp_priority = exp_priority
self._only_full_history = only_full_history
self._dataset = DataSet(environment, max_size=replay_memory_size, random_state=random_state, use_priority=self._exp_priority, only_full_history=self._only_full_history)
self._tmp_dataset = None # Will be created by startTesting() when necessary
self._mode = -1
self._totalModeNbrEpisode = 0
self._total_mode_reward = 0
self._training_loss_averages = []
self._Vs_on_last_episode = []
self._in_episode = False
self._selected_action = -1
self._state = []
for i in range(len(inputDims)):
self._state.append(np.zeros(inputDims[i], dtype=float))
if (train_policy==None):
self._train_policy = EpsilonGreedyPolicy(learning_algo, environment.nActions(), random_state, 0.1)
else:
self._train_policy = train_policy
if (test_policy==None):
self._test_policy = EpsilonGreedyPolicy(learning_algo, environment.nActions(), random_state, 0.)
else:
self._test_policy = test_policy
self.gathering_data=True # Whether the agent is gathering data or not
self.sticky_action=1 # Number of times the agent is forced to take the same action as part of one actual time step
def setControllersActive(self, toDisable, active):
""" Activate controller
"""
for i in toDisable:
self._controllers[i].setActive(active)
def setLearningRate(self, lr):
""" Set the learning rate for the gradient descent
"""
self._learning_algo.setLearningRate(lr)
def learningRate(self):
""" Get the learning rate
"""
return self._learning_algo.learningRate()
def setDiscountFactor(self, df):
""" Set the discount factor
"""
self._learning_algo.setDiscountFactor(df)
def discountFactor(self):
""" Get the discount factor
"""
return self._learning_algo.discountFactor()
def overrideNextAction(self, action):
""" Possibility to override the chosen action. This possibility should be used on the signal OnActionChosen.
"""
self._selected_action = action
def avgBellmanResidual(self):
""" Returns the average training loss on the epoch
"""
if (len(self._training_loss_averages) == 0):
return -1
return np.average(self._training_loss_averages)
def avgEpisodeVValue(self):
""" Returns the average V value on the episode (on time steps where a non-random action has been taken)
"""
if (len(self._Vs_on_last_episode) == 0):
return -1
if(np.trim_zeros(self._Vs_on_last_episode)!=[]):
return np.average(np.trim_zeros(self._Vs_on_last_episode))
else:
return 0
def totalRewardOverLastTest(self):
""" Returns the average sum of rewards per episode and the number of episode
"""
return self._total_mode_reward/self._totalModeNbrEpisode, self._totalModeNbrEpisode
def attach(self, controller):
if (isinstance(controller, controllers.Controller)):
self._controllers.append(controller)
else:
raise TypeError("The object you try to attach is not a Controller.")
def detach(self, controllerIdx):
return self._controllers.pop(controllerIdx)
def mode(self):
return self._mode
def startMode(self, mode, epochLength):
if self._in_episode:
raise AgentError("Trying to start mode while current episode is not yet finished. This method can be "
"called only *between* episodes for testing and validation.")
elif mode == -1:
raise AgentError("Mode -1 is reserved and means 'training mode'; use resumeTrainingMode() instead.")
else:
self._mode = mode
self._total_mode_reward = 0.
del self._tmp_dataset
self._tmp_dataset = DataSet(self._environment, self._random_state, max_size=self._replay_memory_size, only_full_history=self._only_full_history)
def resumeTrainingMode(self):
self._mode = -1
def summarizeTestPerformance(self):
if self._mode == -1:
raise AgentError("Cannot summarize test performance outside test environment.")
self._environment.summarizePerformance(self._tmp_dataset, self._learning_algo, train_data_set=self._dataset)
def train(self):
"""
This function selects a random batch of data (with self._dataset.randomBatch) and performs a
Q-learning iteration (with self._learning_algo.train).
"""
# We make sure that the number of elements in the replay memory
# is strictly superior to self._replay_start_size before taking
# a random batch and perform training
if self._dataset.n_elems <= self._replay_start_size:
return
try:
if hasattr(self._learning_algo, 'nstep'):
observations, actions, rewards, terminals, rndValidIndices = self._dataset.randomBatch_nstep(self._batch_size, self._learning_algo.nstep, self._exp_priority)
loss, loss_ind = self._learning_algo.train(observations, actions, rewards, terminals)
else:
states, actions, rewards, next_states, terminals, rndValidIndices = self._dataset.randomBatch(self._batch_size, self._exp_priority)
loss, loss_ind = self._learning_algo.train(states, actions, rewards, next_states, terminals)
self._training_loss_averages.append(loss)
if (self._exp_priority):
self._dataset.updatePriorities(pow(loss_ind,self._exp_priority)+0.0001, rndValidIndices[1])
except SliceError as e:
warn("Training not done - " + str(e), AgentWarning)
def dumpNetwork(self, fname, nEpoch=-1):
""" Dump the network
Parameters
-----------
fname : string
Name of the file where the network will be dumped
nEpoch : int
Epoch number (Optional)
"""
try:
os.mkdir("nnets")
except Exception:
pass
basename = "nnets/" + fname
for f in os.listdir("nnets/"):
if fname in f:
os.remove("nnets/" + f)
all_params = self._learning_algo.getAllParams()
if (nEpoch>=0):
joblib.dump(all_params, basename + ".epoch={}".format(nEpoch))
else:
joblib.dump(all_params, basename, compress=True)
def setNetwork(self, fname, nEpoch=-1):
""" Set values into the network
Parameters
-----------
fname : string
Name of the file where the values are
nEpoch : int
Epoch number (Optional)
"""
basename = "nnets/" + fname
if (nEpoch>=0):
all_params = joblib.load(basename + ".epoch={}".format(nEpoch))
else:
all_params = joblib.load(basename)
self._learning_algo.setAllParams(all_params)
def run(self, n_epochs, epoch_length):
"""
This function encapsulates the inference and the learning.
If the agent is in train mode (mode = -1):
It starts by calling the controllers method "onStart",
Then it runs a given number of epochs where an epoch is made up of one or many episodes (called with
agent._runEpisode) and where an epoch ends up after the number of steps reaches the argument "epoch_length".
It ends up by calling the controllers method "end".
If the agent is on non train mode (mode > -1):
This function runs a number of epochs in non train mode (mode > -1), thus without controllers.
Parameters
-----------
n_epochs : int
number of epochs
epoch_length : int
maximum number of steps for a given epoch
"""
if(self._mode==-1):
self._run_train(n_epochs, epoch_length)
else:
self._run_non_train(n_epochs, epoch_length)
def _run_train(self, n_epochs, epoch_length):
"""
This function encapsulates the whole process of the learning.
It starts by calling the controllers method "onStart",
Then it runs a given number of epochs where an epoch is made up of one or many episodes (called with
agent._runEpisode) and where an epoch ends up after the number of steps reaches the argument "epoch_length".
It ends up by calling the controllers method "end".
Parameters
-----------
n_epochs : int
number of epochs
epoch_length : int
maximum number of steps for a given epoch
"""
for c in self._controllers: c.onStart(self)
i = 0
while i < n_epochs:
nbr_steps_left=epoch_length
self._training_loss_averages = []
while nbr_steps_left > 0: # run new episodes until the number of steps left for the epoch has reached 0
nbr_steps_left = self._runEpisode(nbr_steps_left)
i += 1
for c in self._controllers: c.onEpochEnd(self)
self._environment.end()
for c in self._controllers: c.onEnd(self)
def _run_non_train(self, n_epochs, epoch_length):
"""
This function runs a number of epochs in non train mode (id > -1).
Parameters
-----------
n_epochs : int
number of epochs
epoch_length : int
maximum number of steps for a given epoch
"""
for c in self._controllers: c.onStart(self)
i = 0
while i < n_epochs:
nbr_steps_left=epoch_length
self._totalModeNbrEpisode=0
while nbr_steps_left > 0:
self._totalModeNbrEpisode += 1
nbr_steps_left = self._runEpisode(nbr_steps_left)
i += 1
for c in self._controllers: c.onEpochEnd(self)
self._environment.end()
for c in self._controllers: c.onEnd(self)
def _runEpisode(self, maxSteps):
"""
This function runs an episode of learning. An episode ends up when the environment method "inTerminalState"
returns True (or when the number of steps reaches the argument "maxSteps")
Parameters
-----------
maxSteps : int
maximum number of steps before automatically ending the episode
"""
self._in_episode = True
initState = self._environment.reset(self._mode)
inputDims = self._environment.inputDimensions()
for i in range(len(inputDims)):
if inputDims[i][0] > 1:
self._state[i][1:] = initState[i][1:]
self._Vs_on_last_episode = []
is_terminal=False
reward=0
while maxSteps > 0:
maxSteps -= 1
if(self.gathering_data==True or self._mode!=-1):
obs = self._environment.observe()
for i in range(len(obs)):
self._state[i][0:-1] = self._state[i][1:]
self._state[i][-1] = obs[i]
V, action, reward = self._step()
self._Vs_on_last_episode.append(V)
if self._mode != -1:
self._total_mode_reward += reward
is_terminal = self._environment.inTerminalState() # If the transition ends up in a terminal state, mark transition as terminal
# Note that the new obs will not be stored, as it is unnecessary.
if(maxSteps>0):
self._addSample(obs, action, reward, is_terminal)
else:
self._addSample(obs, action, reward, True) # If the episode ends because max number of steps is reached, mark the transition as terminal
for c in self._controllers: c.onActionTaken(self)
if is_terminal:
break
self._in_episode = False
for c in self._controllers: c.onEpisodeEnd(self, is_terminal, reward)
return maxSteps
def _step(self):
"""
This method is called at each time step and performs one action in the environment.
Returns
-------
V : float
Estimated value function of current state.
action : int
The id of the action selected by the agent.
reward : float
Reward obtained for the transition
"""
action, V = self._chooseAction()
reward=0
for i in range(self.sticky_action):
reward += self._environment.act(action)
return V, action, reward
def _addSample(self, ponctualObs, action, reward, is_terminal):
if self._mode != -1:
self._tmp_dataset.addSample(ponctualObs, action, reward, is_terminal, priority=1)
else:
self._dataset.addSample(ponctualObs, action, reward, is_terminal, priority=1)
def _chooseAction(self):
if self._mode != -1:
# Act according to the test policy if not in training mode
action, V = self._test_policy.action(self._state, mode=self._mode, dataset=self._dataset)
else:
if self._dataset.n_elems > self._replay_start_size:
# follow the train policy
action, V = self._train_policy.action(self._state, mode=None, dataset=self._dataset) #is self._state the only way to store/pass the state?
else:
# Still gathering initial data: choose dummy action
action, V = self._train_policy.randomAction()
for c in self._controllers: c.onActionChosen(self, action)
return action, V
class AgentError(RuntimeError):
"""Exception raised for errors when calling the various Agent methods at wrong times.
Attributes:
expr -- input expression in which the error occurred
msg -- explanation of the error
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class AgentWarning(RuntimeWarning):
"""Warning issued of the various Agent methods.
Attributes:
expr -- input expression in which the error occurred
msg -- explanation of the error
"""
class DataSet(object):
"""A replay memory consisting of circular buffers for observations, actions, rewards and terminals."""
def __init__(self, env, random_state=None, max_size=1000000, use_priority=False, only_full_history=True):
"""Initializer.
Parameters
-----------
inputDims : list of tuples
Each tuple relates to one of the observations where the first value is the history size considered for this
observation and the rest describes the shape of each punctual observation (e.g., scalar, vector or matrix).
See base_classes.Environment.inputDimensions() documentation for more info.
random_state : Numpy random number generator
If None, a new one is created with default numpy seed.
max_size : float
The replay memory maximum size. Default : 1000000
"""
self._batch_dimensions = env.inputDimensions()
self._max_history_size = np.max([self._batch_dimensions[i][0] for i in range (len(self._batch_dimensions))])
self._size = max_size
self._use_priority = use_priority
self._only_full_history = only_full_history
if ( isinstance(env.nActions(),int) ):
self._actions = CircularBuffer(max_size, dtype="int8")
else:
self._actions = CircularBuffer(max_size, dtype='object')
self._rewards = CircularBuffer(max_size)
self._terminals = CircularBuffer(max_size, dtype="bool")
if (self._use_priority):
self._prioritiy_tree = tree.SumTree(max_size)
self._translation_array = np.zeros(max_size)
self._observations = np.zeros(len(self._batch_dimensions), dtype='object')
# Initialize the observations container if necessary
for i in range(len(self._batch_dimensions)):
self._observations[i] = CircularBuffer(max_size, elemShape=self._batch_dimensions[i][1:], dtype=env.observationType(i))
if (random_state == None):
self._random_state = np.random.RandomState()
else:
self._random_state = random_state
self.n_elems = 0
self.sticky_action=1 # Number of times the agent is forced to take the same action as part of one actual time step
def actions(self):
"""Get all actions currently in the replay memory, ordered by time where they were taken."""
return self._actions.getSlice(0)
def rewards(self):
"""Get all rewards currently in the replay memory, ordered by time where they were received."""
return self._rewards.getSlice(0)
def terminals(self):
"""Get all terminals currently in the replay memory, ordered by time where they were observed.
terminals[i] is True if actions()[i] lead to a terminal state (i.e. corresponded to a terminal
transition), and False otherwise.
"""
return self._terminals.getSlice(0)
def observations(self):
"""Get all observations currently in the replay memory, ordered by time where they were observed.
"""
ret = np.zeros_like(self._observations)
for input in range(len(self._observations)):
ret[input] = self._observations[input].getSlice(0)
return ret
def updatePriorities(self, priorities, rndValidIndices):
"""
"""
for i in range( len(rndValidIndices) ):
self._prioritiy_tree.update(rndValidIndices[i], priorities[i])
def randomBatch(self, batch_size, use_priority):
"""Returns a batch of states, actions, rewards, terminal status, and next_states for a number batch_size of randomly
chosen transitions. Note that if terminal[i] == True, then next_states[s][i] == np.zeros_like(states[s][i]) for
each s.
Parameters
-----------
batch_size : int
Number of transitions to return.
use_priority : Boolean
Whether to use prioritized replay or not
Returns
-------
states : numpy array of objects
Each object is a numpy array that relates to one of the observations
with size [batch_size * history size * size of punctual observation (which is 2D,1D or scalar)]).
States are taken randomly in the data with the only constraint that they are complete regarding the history size
for each observation.
actions : numpy array of integers [batch_size]
actions[i] is the action taken after having observed states[:][i].
rewards : numpy array of floats [batch_size]
rewards[i] is the reward obtained for taking actions[i-1].
next_states : numpy array of objects
Each object is a numpy array that relates to one of the observations
with size [batch_size * history size * size of punctual observation (which is 2D,1D or scalar)]).
terminals : numpy array of booleans [batch_size]
terminals[i] is True if the transition leads to a terminal state and False otherwise
Throws
-------
SliceError
If a batch of this batch_size could not be built based on current data set (not enough data or all
trajectories are too short).
"""
if (self._max_history_size + self.sticky_action - 1 >= self.n_elems):
raise SliceError(
"Not enough elements in the dataset to create a "
"complete state. {} elements in dataset; requires {}"
.format(self.n_elems, self._max_history_size))
if (self._use_priority):
#FIXME : take into account the case where self._only_full_history is false
rndValidIndices, rndValidIndices_tree = self._randomPrioritizedBatch(batch_size)
if (rndValidIndices.size == 0):
raise SliceError("Could not find a state with full histories")
else:
rndValidIndices = np.zeros(batch_size, dtype='int32')
if (self._only_full_history):
for i in range(batch_size): # TODO: multithread this loop?
rndValidIndices[i] = self._randomValidStateIndex(self._max_history_size+self.sticky_action-1)
else:
for i in range(batch_size): # TODO: multithread this loop?
rndValidIndices[i] = self._randomValidStateIndex(minimum_without_terminal=self.sticky_action)
actions = self._actions.getSliceBySeq(rndValidIndices)
rewards = self._rewards.getSliceBySeq(rndValidIndices)
terminals = self._terminals.getSliceBySeq(rndValidIndices)
states = np.zeros(len(self._batch_dimensions), dtype='object')
next_states = np.zeros_like(states)
# We calculate the first terminal index backward in time and set it
# at maximum to the value self._max_history_size+self.sticky_action-1
first_terminals=[]
for rndValidIndex in rndValidIndices:
first_terminal=1
while first_terminal<self._max_history_size+self.sticky_action-1:
if (self._terminals[rndValidIndex-first_terminal]==True or first_terminal>rndValidIndex):
break
first_terminal+=1
first_terminals.append(first_terminal)
for input in range(len(self._batch_dimensions)):
states[input] = np.zeros((batch_size,) + self._batch_dimensions[input], dtype=self._observations[input].dtype)
next_states[input] = np.zeros_like(states[input])
for i in range(batch_size):
slice=self._observations[input].getSlice(rndValidIndices[i]-self.sticky_action+2-min(self._batch_dimensions[input][0],first_terminals[i]+self.sticky_action-1), rndValidIndices[i]+1)
if (len(slice)==len(states[input][i])):
states[input][i] = slice
else:
for j in range(len(slice)):
states[input][i][-j-1]=slice[-j-1]
# If transition leads to terminal, we don't care about next state
if rndValidIndices[i] >= self.n_elems - 1 or terminals[i]:
next_states[input][i] = np.zeros_like(states[input][i])
else:
slice=self._observations[input].getSlice(rndValidIndices[i]+2-min(self._batch_dimensions[input][0],first_terminals[i]+1), rndValidIndices[i]+2)
if (len(slice)==len(states[input][i])):
next_states[input][i] = slice
else:
for j in range(len(slice)):
next_states[input][i][-j-1]=slice[-j-1]
#next_states[input][i] = self._observations[input].getSlice(rndValidIndices[i]+2-min(self._batch_dimensions[input][0],first_terminal), rndValidIndices[i]+2)
if (self._use_priority):
return states, actions, rewards, next_states, terminals, [rndValidIndices, rndValidIndices_tree]
else:
return states, actions, rewards, next_states, terminals, rndValidIndices
def randomBatch_nstep(self, batch_size, nstep, use_priority):
"""Return corresponding states, actions, rewards, terminal status, and next_states for a number batch_size of randomly
chosen transitions. Note that if terminal[i] == True, then next_states[s][i] == np.zeros_like(states[s][i]) for
each s.
Parameters
-----------
batch_size : int
Number of transitions to return.
nstep : int
Number of transitions to be considered for each element
use_priority : Boolean
Whether to use prioritized replay or not
Returns
-------
states : numpy array of objects
Each object is a numpy array that relates to one of the observations
with size [batch_size * (history size+nstep-1) * size of punctual observation (which is 2D,1D or scalar)]).
States are taken randomly in the data with the only constraint that they are complete regarding the history size
for each observation.
actions : numpy array of integers [batch_size, nstep]
actions[i] is the action taken after having observed states[:][i].
rewards : numpy array of floats [batch_size, nstep]
rewards[i] is the reward obtained for taking actions[i-1].
next_states : numpy array of objects
Each object is a numpy array that relates to one of the observations
with size [batch_size * (history size+nstep-1) * size of punctual observation (which is 2D,1D or scalar)]).
terminals : numpy array of booleans [batch_size, nstep]
terminals[i] is True if the transition leads to a terminal state and False otherwise
Throws
-------
SliceError
If a batch of this size could not be built based on current data set (not enough data or all
trajectories are too short).
"""
if (self._max_history_size + self.sticky_action - 1 >= self.n_elems):
raise SliceError(
"Not enough elements in the dataset to create a "
"complete state. {} elements in dataset; requires {}"
.format(self.n_elems, self._max_history_size))
if (self._use_priority):
#FIXME : take into account the case where self._only_full_history is false
rndValidIndices, rndValidIndices_tree = self._randomPrioritizedBatch(batch_size)
if (rndValidIndices.size == 0):
raise SliceError("Could not find a state with full histories")
else:
rndValidIndices = np.zeros(batch_size, dtype='int32')
if (self._only_full_history):
for i in range(batch_size): # TODO: multithread this loop?
rndValidIndices[i] = self._randomValidStateIndex(self._max_history_size+self.sticky_action*nstep-1)
else:
for i in range(batch_size): # TODO: multithread this loop?
rndValidIndices[i] = self._randomValidStateIndex(minimum_without_terminal=self.sticky_action*nstep)
actions=np.zeros((batch_size,(nstep)*self.sticky_action), dtype=int)
rewards=np.zeros((batch_size,(nstep)*self.sticky_action))
terminals=np.zeros((batch_size,(nstep)*self.sticky_action))
for i in range(batch_size):
actions[i] = self._actions.getSlice(rndValidIndices[i]-self.sticky_action*nstep+1,rndValidIndices[i]+self.sticky_action)
rewards[i] = self._rewards.getSlice(rndValidIndices[i]-self.sticky_action*nstep+1,rndValidIndices[i]+self.sticky_action)
terminals[i] = self._terminals.getSlice(rndValidIndices[i]-self.sticky_action*nstep+1,rndValidIndices[i]+self.sticky_action)
observations = np.zeros(len(self._batch_dimensions), dtype='object')
# We calculate the first terminal index backward in time and set it
# at maximum to the value self._max_history_size+self.sticky_action-1
first_terminals=[]
for rndValidIndex in rndValidIndices:
first_terminal=1
while first_terminal<self._max_history_size+self.sticky_action*nstep-1:
if (self._terminals[rndValidIndex-first_terminal]==True or first_terminal>rndValidIndex):
break
first_terminal+=1
first_terminals.append(first_terminal)
batch_dimensions=copy.deepcopy(self._batch_dimensions)
for input in range(len(self._batch_dimensions)):
batch_dimensions[input]=tuple( x + y for x, y in zip(self._batch_dimensions[input],(self.sticky_action*(nstep+1)-1,0,0)) )
observations[input] = np.zeros((batch_size,) + batch_dimensions[input], dtype=self._observations[input].dtype)
for i in range(batch_size):
slice=self._observations[input].getSlice(rndValidIndices[i]-self.sticky_action*nstep+2-min(self._batch_dimensions[input][0],first_terminals[i]-self.sticky_action*nstep+1), rndValidIndices[i]+self.sticky_action+1)
if (len(slice)==len(observations[input][i])):
observations[input][i] = slice
else:
for j in range(len(slice)):
observations[input][i][-j-1]=slice[-j-1]
# If transition leads to terminal, we don't care about next state
if terminals[i][-1]:#rndValidIndices[i] >= self.n_elems - 1 or terminals[i]:
observations[input][rndValidIndices[i]:rndValidIndices[i]+self.sticky_action+1] = 0
if (self._use_priority):
return observations, actions, rewards, terminals, [rndValidIndices, rndValidIndices_tree]
else:
return observations, actions, rewards, terminals, rndValidIndices
def _randomValidStateIndex(self, minimum_without_terminal):
""" Returns the index corresponding to a timestep that is valid
"""
index_lowerBound = minimum_without_terminal - 1
# We try out an index in the acceptable range of the replay memory
index = self._random_state.randint(index_lowerBound, self.n_elems-1)
# Check if slice is valid wrt terminals
# The selected index may correspond to a terminal transition but not
# the previous minimum_without_terminal-1 transition
firstTry = index
startWrapped = False
while True:
i = index-1
processed = 0
for _ in range(minimum_without_terminal-1):
if (i < 0 or self._terminals[i]):
break;
i -= 1
processed += 1
if (processed < minimum_without_terminal - 1):
# if we stopped prematurely, shift slice to the left and try again
index = i
if (index < index_lowerBound):
startWrapped = True
index = self.n_elems - 1
if (startWrapped and index <= firstTry):
raise SliceError("Could not find a state with full histories")
else:
# else index was ok according to terminals
return index
def _randomPrioritizedBatch(self, batch_size):
indices_tree = self._prioritiy_tree.getBatch(batch_size, self._random_state, self)
indices_replay_mem=np.zeros(indices_tree.size,dtype='int32')
for i in range(len(indices_tree)):
indices_replay_mem[i]= int(self._translation_array[indices_tree[i]] \
- self._actions.getLowerBound())
return indices_replay_mem, indices_tree
def addSample(self, obs, action, reward, is_terminal, priority):
"""Store the punctual observations, action, reward, is_terminal and priority in the dataset.
Parameters
-----------
obs : ndarray
An ndarray(dtype='object') where obs[s] corresponds to the punctual observation s before the
agent took action [action].
action : int
The action taken after having observed [obs].
reward : float
The reward associated to taking this [action].
is_terminal : bool
Tells whether [action] lead to a terminal state (i.e. corresponded to a terminal transition).
priority : float
The priority to be associated with the sample
"""
# Store observations
for i in range(len(self._batch_dimensions)):
self._observations[i].append(obs[i])
# Update tree and translation table
if (self._use_priority):
index = self._actions.getIndex()
if (index >= self._size):
ub = self._actions.getUpperBound()
true_size = self._actions.getTrueSize()
tree_ind = index%self._size
if (ub == true_size):
size_extension = true_size - self._size
# New index
index = self._size - 1
tree_ind = -1
# Shift translation array
self._translation_array -= size_extension + 1
tree_ind = np.where(self._translation_array==tree_ind)[0][0]
else:
tree_ind = index
self._prioritiy_tree.update(tree_ind)
self._translation_array[tree_ind] = index
# Store rest of sample
self._actions.append(action)
self._rewards.append(reward)
self._terminals.append(is_terminal)
if (self.n_elems < self._size):
self.n_elems += 1
class CircularBuffer(object):
def __init__(self, size, elemShape=(), extension=0.1, dtype="float32"):
self._size = size
self._data = np.zeros((int(size+extension*size),) + elemShape, dtype=dtype)
self._trueSize = self._data.shape[0]
self._lb = 0
self._ub = size
self._cur = 0
self.dtype = dtype
def append(self, obj):
if self._cur > self._size: #> instead of >=
self._lb += 1
self._ub += 1
if self._ub >= self._trueSize:
# Rolling array without copying whole array (for memory constraints)
# basic command: self._data[0:self._size-1] = self._data[self._lb:] OR NEW self._data[0:self._size] = self._data[self._lb-1:]
n_splits=10
for i in range(n_splits):
self._data[i*(self._size)//n_splits:(i+1)*(self._size)//n_splits] = self._data[(self._lb-1)+i*(self._size)//n_splits:(self._lb-1)+(i+1)*(self._size)//n_splits]
self._lb = 0
self._ub = self._size
self._cur = self._size #OLD self._size - 1
self._data[self._cur] = obj
self._cur += 1
def __getitem__(self, i):
return self._data[self._lb + i]
def getSliceBySeq(self, seq):
return self._data[seq + self._lb]
def getSlice(self, start, end=sys.maxsize):
if end == sys.maxsize:
return self._data[self._lb+start:self._cur]
else:
return self._data[self._lb+start:self._lb+end]
def getLowerBound(self):
return self._lb
def getUpperBound(self):
return self._ub
def getIndex(self):
return self._cur
def getTrueSize(self):
return self._trueSize
class SliceError(LookupError):
"""Exception raised for errors when getting slices from CircularBuffers.
Attributes:
expr -- input expression in which the error occurred
msg -- explanation of the error
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
if __name__ == "__main__":
pass
|
packages/pegasus-python/src/Pegasus/service/ensembles/views.py | ahnitz/pegasus | 127 | 12700644 | import json
import logging
import os
import re
import subprocess
from pathlib import Path
from flask import g, make_response, request, url_for
from Pegasus.db import connection
from Pegasus.db.ensembles import (
EMError,
Ensembles,
EnsembleWorkflowStates,
Triggers,
TriggerType,
)
from Pegasus.service.ensembles import api, emapp
from Pegasus.service.lifecycle import authenticate
log = logging.getLogger(__name__)
def connect():
log.debug("Connecting to database")
g.master_db_url = g.user.get_master_db_url()
g.session = connection.connect(
g.master_db_url, connect_args={"check_same_thread": False}
)
def disconnect():
if "conn" in g:
log.debug("Disconnecting from database")
g.session.close()
@emapp.errorhandler(Exception)
def handle_error(e):
return api.json_api_error(e)
emapp.before_request(authenticate)
emapp.before_request(connect)
@emapp.teardown_request
def teardown_request(exception):
disconnect()
@emapp.route("/ensembles", methods=["GET"])
def route_list_ensembles():
dao = Ensembles(g.session)
ensembles = dao.list_ensembles(g.user.username)
result = [e.get_object() for e in ensembles]
return api.json_response(result)
@emapp.route("/ensembles", methods=["POST"])
def route_create_ensemble():
name = request.form.get("name", None)
if name is None:
raise EMError("Specify ensemble name")
max_running = request.form.get("max_running", 1)
max_planning = request.form.get("max_planning", 1)
dao = Ensembles(g.session)
dao.create_ensemble(g.user.username, name, max_running, max_planning)
g.session.commit()
return api.json_created(url_for("route_get_ensemble", name=name, _external=True))
@emapp.route("/ensembles/<string:name>", methods=["GET"])
def route_get_ensemble(name):
dao = Ensembles(g.session)
e = dao.get_ensemble(g.user.username, name)
result = e.get_object()
return api.json_response(result)
@emapp.route("/ensembles/<string:name>", methods=["PUT", "POST"])
def route_update_ensemble(name):
dao = Ensembles(g.session)
e = dao.get_ensemble(g.user.username, name)
max_running = request.form.get("max_running", None)
if max_running is not None:
e.set_max_running(max_running)
max_planning = request.form.get("max_planning", None)
if max_planning is not None:
e.set_max_planning(max_planning)
state = request.form.get("state", None)
if state is not None:
if state != e.state:
# TODO Do the necessary state transition
e.set_state(state)
e.set_updated()
g.session.commit()
return api.json_response(e.get_object())
@emapp.route("/ensembles/<string:name>/workflows", methods=["GET"])
def route_list_ensemble_workflows(name):
dao = Ensembles(g.session)
e = dao.get_ensemble(g.user.username, name)
result = [w.get_object() for w in dao.list_ensemble_workflows(e.id)]
return api.json_response(result)
@emapp.route("/ensembles/<string:ensemble>/workflows", methods=["POST"])
def route_create_ensemble_workflow(ensemble):
dao = Ensembles(g.session)
e = dao.get_ensemble(g.user.username, ensemble)
name = request.form.get("name", None)
if name is None:
raise EMError("Specify ensemble workflow 'name'")
priority = request.form.get("priority", 0)
basedir = request.form.get("basedir")
if basedir is None:
raise EMError("Specify 'basedir' where plan command should be executed")
plan_command = request.form.get("plan_command")
if plan_command is None:
raise EMError("Specify 'plan_command' that should be executed to plan workflow")
dao.create_ensemble_workflow(e.id, name, basedir, priority, plan_command)
g.session.commit()
return api.json_created(
url_for("route_get_ensemble_workflow", ensemble=ensemble, workflow=name)
)
@emapp.route(
"/ensembles/<string:ensemble>/workflows/<string:workflow>", methods=["GET"]
)
def route_get_ensemble_workflow(ensemble, workflow):
dao = Ensembles(g.session)
e = dao.get_ensemble(g.user.username, ensemble)
w = dao.get_ensemble_workflow(e.id, workflow)
result = w.get_detail_object()
return api.json_response(result)
@emapp.route(
"/ensembles/<string:ensemble>/workflows/<string:workflow>", methods=["PUT", "POST"]
)
def route_update_ensemble_workflow(ensemble, workflow):
dao = Ensembles(g.session)
e = dao.get_ensemble(g.user.username, ensemble)
w = dao.get_ensemble_workflow(e.id, workflow)
priority = request.form.get("priority", None)
if priority is not None:
w.set_priority(priority)
state = request.form.get("state", None)
if state is not None:
w.change_state(state)
w.set_updated()
g.session.commit()
return api.json_response(w.get_detail_object())
@emapp.route(
"/ensembles/<string:ensemble>/workflows/<string:workflow>/analyze", methods=["GET"]
)
def route_analyze_ensemble_workflow(ensemble, workflow):
dao = Ensembles(g.session)
e = dao.get_ensemble(g.user.username, ensemble)
w = dao.get_ensemble_workflow(e.id, workflow)
report = "".join(analyze(w))
resp = make_response(report, 200)
resp.headers["Content-Type"] = "text/plain"
return resp
def analyze(workflow):
w = workflow
yield "Workflow state is %s\n" % w.state
yield "Plan command is: %s\n" % w.plan_command
logfile = w.get_logfile()
if os.path.isfile(logfile):
yield "Workflow log:\n"
for l in open(w.get_logfile(), "rb"):
yield "LOG: %s" % l.decode()
else:
yield "No workflow log available\n"
if w.submitdir is None or not os.path.isdir(w.submitdir):
yield "No submit directory available\n"
else:
yield "pegasus-analyzer output is:\n"
p = subprocess.Popen(
["pegasus-analyzer", w.submitdir],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
out, err = p.communicate()
out = out.decode()
for l in out.split("\n"):
yield "ANALYZER: %s\n" % l
rc = p.wait()
yield "ANALYZER: Exited with code %d\n" % rc
if w.state == EnsembleWorkflowStates.PLAN_FAILED:
yield "Planner failure detected\n"
elif w.state == EnsembleWorkflowStates.RUN_FAILED:
yield "pegasus-run failure detected\n"
elif w.state == EnsembleWorkflowStates.FAILED:
yield "Workflow failure detected\n"
# --- trigger related routes ---------------------------------------------------
@emapp.route("/ensembles/<string:ensemble>/triggers", methods=["GET"])
def route_list_triggers(ensemble):
dao = Triggers(g.session)
triggers = dao.list_triggers_by_ensemble(g.user.username, ensemble)
return api.json_response([Triggers.get_object(t) for t in triggers])
@emapp.route("/ensembles/<string:ensemble>/triggers/<string:trigger>", methods=["GET"])
def route_get_trigger(ensemble, trigger):
raise NotImplementedError("TODO")
# TODO: checks for correct data should be done here on the backend
"""
# error response format
{
id: "id", <-- unique id to a request, it has been added as request.uid (use this when logging)
code: "UNPROCESSABLE_ENTITY", <-- capitalized versions of errors that json schema would return
"message": "Err description",
"errors": [
{
code: "MIN_LEN_ERR",
message": "Err description",
path: [ field_name ],
},
..
],
"warnings": [ .. ]
}
"""
@emapp.route("/ensembles/<string:ensemble>/triggers/cron", methods=["POST"])
def route_create_cron_trigger(ensemble):
# verify that ensemble exists for user
e_dao = Ensembles(g.session)
# raises EMError code 404 if does not exist
ensemble_id = e_dao.get_ensemble(g.user.username, ensemble).id
# validate trigger
trigger = request.form.get("trigger", type=str)
if not trigger or len(trigger) == 0:
raise EMError("trigger name must be a non-empty string")
# validate workflow_script
workflow_script = request.form.get("workflow_script", type=str)
if not workflow_script or len(workflow_script) == 0:
raise EMError("workflow_script name must be a non-empty string")
if not Path(workflow_script).is_absolute():
raise EMError("workflow_script must be given as an absolute path")
# validate workflow_args
can_decode = True
try:
workflow_args = json.loads(request.form.get("workflow_args"))
except json.JSONDecodeError:
can_decode = False
if not can_decode or not isinstance(workflow_args, list):
raise EMError("workflow_args must be given as a list serialized to json")
# validate interval
try:
interval = to_seconds(request.form.get("interval", type=str))
except ValueError:
raise EMError(
"interval must be given as `<int> <s|m|h|d>` and be greater than 0 seconds"
)
# validate timeout
try:
timeout = request.form.get("timeout", type=str, default=None)
if timeout is not None:
timeout = to_seconds(timeout)
except ValueError:
raise EMError(
"timeout must be given as `<int> <s|m|h|d>` and be greater than 0 seconds"
)
kwargs = {
"ensemble_id": ensemble_id,
"trigger": trigger,
"trigger_type": TriggerType.CRON.value,
"workflow_script": workflow_script,
"workflow_args": workflow_args,
"interval": interval,
"timeout": timeout,
}
# create trigger entry in db
t_dao = Triggers(g.session)
t_dao.insert_trigger(**kwargs)
# return response success
return api.json_created(
url_for("route_get_trigger", ensemble=ensemble, trigger=trigger)
)
@emapp.route("/ensembles/<string:ensemble>/triggers/file_pattern", methods=["POST"])
def route_create_file_pattern_trigger(ensemble):
# verify that ensemble exists for user
e_dao = Ensembles(g.session)
# raises EMError code 404 if does not exist
ensemble_id = e_dao.get_ensemble(g.user.username, ensemble).id
# validate trigger
trigger = request.form.get("trigger", type=str)
if not trigger or len(trigger) == 0:
raise EMError("trigger name must be a non-empty string")
# validate workflow_script
workflow_script = request.form.get("workflow_script", type=str)
if not workflow_script or len(workflow_script) == 0:
raise EMError("workflow_script name must be a non-empty string")
if not Path(workflow_script).is_absolute():
raise EMError("workflow_script must be given as an absolute path")
# validate workflow_args
can_decode = True
try:
workflow_args = json.loads(request.form.get("workflow_args"))
except json.JSONDecodeError:
can_decode = False
if not can_decode or not isinstance(workflow_args, list):
raise EMError("workflow_args must be given as a list serialized to json")
# validate interval
try:
interval = to_seconds(request.form.get("interval", type=str))
except ValueError:
raise EMError(
"interval must be given as `<int> <s|m|h|d>` and be greater than 0 seconds"
)
# validate timeout
try:
timeout = request.form.get("timeout", type=str, default=None)
if timeout is not None:
timeout = to_seconds(timeout)
except ValueError:
raise EMError(
"timeout must be given as `<int> <s|m|h|d>` and be greater than 0 seconds"
)
# validate file_patterns
can_decode = True
try:
file_patterns = json.loads(request.form.get("file_patterns"))
except json.JSONDecodeError:
can_decode = False
if not can_decode or not isinstance(file_patterns, list):
raise EMError("file_patterns must be given as a list serialized to json")
if len(file_patterns) < 1:
raise EMError("file_patterns must contain at least one file pattern")
for fp in file_patterns:
if not Path(fp).is_absolute():
raise EMError(
"each file pattern must be given as an absolute path (e.g. '/inputs/*.txt"
)
kwargs = {
"ensemble_id": ensemble_id,
"trigger": trigger,
"trigger_type": TriggerType.FILE_PATTERN.value,
"workflow_script": workflow_script,
"workflow_args": workflow_args,
"interval": interval,
"timeout": timeout,
"file_patterns": file_patterns,
}
# create trigger entry in db
t_dao = Triggers(g.session)
t_dao.insert_trigger(**kwargs)
# return response success
return api.json_created(
url_for("route_get_trigger", ensemble=ensemble, trigger=trigger)
)
@emapp.route(
"/ensembles/<string:ensemble>/triggers/<string:trigger>", methods=["DELETE"]
)
def route_delete_trigger(ensemble, trigger):
# verify that ensemble exists for user
e_dao = Ensembles(g.session)
# raises EMError code 404 if does not exist
ensemble_id = e_dao.get_ensemble(g.user.username, ensemble).id
# update trigger state to be STOPPED so that the TriggerManager can
# handle it appropriately
t_dao = Triggers(g.session)
# make sure get_trigger raises 404 if nothing found
trigger_id = t_dao.get_trigger(ensemble_id, trigger)._id
t_dao.update_state(ensemble_id, trigger_id, "STOPPED")
return api.json_response(
{
"message": "ensemble: {}, trigger: {} marked for deletion".format(
ensemble, trigger
)
},
status_code=202,
)
def to_seconds(value: str) -> int:
"""Convert time unit given as '<int> <s|m|h|d>` to seconds.
:param value: input str
:type value: str
:raises ValueError: value must be given as '<int> <s|m|h|d>
:raises ValueError: value must be > 0s
:return: value given in seconds
:rtype: int
"""
value = value.strip()
pattern = re.compile(r"\d+ *[sSmMhHdD]")
if not pattern.fullmatch(value):
raise ValueError(
"invalid interval: {}, interval must be given as '<int> <s|m|h|d>'".format(
value
)
)
num = int(value[0 : len(value) - 1])
unit = value[-1].lower()
as_seconds = {"s": 1, "m": 60, "h": 60 * 60, "d": 60 * 60 * 24}
result = as_seconds[unit] * num
if result <= 0:
raise ValueError(
"invalid interval: {}, interval must be greater than 0 seconds".format(
result
)
)
return result
|
Alignment/APEEstimation/test/apeTreeCreateDefault_cfg.py | ckamtsikis/cmssw | 852 | 12700661 | <reponame>ckamtsikis/cmssw
########################################################################
###
### Read out APEs from .db files and convert them to trees
### that can be read by the APE validation plot tools.
###
### Intended to provide a straightforward comparison of
### measured APE values to values stored in .db files
###
########################################################################
###
### HOW TO USE:
### 1. Run the default setup procedure for the APE
### tool (including creation of a TrackerTree)
### 2. Configure the apeTreeCreateDefault tool below
### and run it with cmsRun
### 3. Use output file in validation, for example in
### macros/commandsDrawComparison.C
###
########################################################################
import FWCore.ParameterSet.Config as cms
from Alignment.APEEstimation.SectorBuilder_cff import *
import os
##
## User options
##
# Run number to use for data in case one uses a multi-IOV object
theFirstRun = 1
# Which global tag to use
theGlobalTag = 'auto:phase1_2017_realistic'
# Source from which to get the APE object
theSource = 'frontier://FrontierProd/CMS_CONDITIONS'
# Tag to extract the APE object
theTag = 'TrackerAlignmentExtendedErrors_Upgrade2017_pseudoAsymptotic_v3'
# Name and path of output File
theOutputFile = 'defaultAPE.root'
# Sector definitions, RecentSectors is the typical granularity
theSectors = RecentSectors
##
## Process definition
##
process = cms.Process("ApeTreeCreateDefault")
##
## Message Logger
##
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.DefaultAPETree=dict()
process.MessageLogger.SectorBuilder=dict()
process.MessageLogger.cerr.INFO.limit = 0
process.MessageLogger.cerr.default.limit = 0
process.MessageLogger.cerr.DefaultAPETree = cms.untracked.PSet(limit = cms.untracked.int32(-1))
process.MessageLogger.cerr.SectorBuilder = cms.untracked.PSet(limit = cms.untracked.int32(-1))
##
## Process options
##
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True),
)
##
## Input Files
##
process.source = cms.Source("EmptySource", firstRun = cms.untracked.uint32(theFirstRun))
##
## Number of Events
##
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) )
### Load desired default APEs
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, theGlobalTag, '')
from CondCore.CondDB.CondDB_cfi import *
CondDBAlignmentError = CondDB.clone(connect = cms.string(theSource))
process.myTrackerAlignmentErr = cms.ESSource("PoolDBESSource",
CondDBAlignmentError,
timetype = cms.string("runnumber"),
toGet = cms.VPSet(
cms.PSet(
record = cms.string('TrackerAlignmentErrorExtendedRcd'),
tag = cms.string(theTag)
)
)
)
process.es_prefer_trackerAlignmentErr = cms.ESPrefer("PoolDBESSource","myTrackerAlignmentErr")
##
## Define Sequence
##
process.ApeTreeCreateDefaultSequence = cms.Sequence()
process.ApeTreeCreateDefault = cms.EDAnalyzer('ApeTreeCreateDefault',
resultFile = cms.string(theOutputFile),
trackerTreeFile = cms.string(os.environ['CMSSW_BASE'] + '/src/Alignment/TrackerAlignment/hists/TrackerTree.root'),
sectors = theSectors,
)
process.ApeTreeCreateDefaultSequence *= process.ApeTreeCreateDefault
##
## Path
##
process.p = cms.Path(
process.ApeTreeCreateDefaultSequence
)
|
samples/openapi3/server/petstore/python-flask-python2/openapi_server/models/upload_form.py | MalcolmScoffable/openapi-generator | 11,868 | 12700673 | <gh_stars>1000+
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server import util
class UploadForm(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, additional_metadata=None, file=None): # noqa: E501
"""UploadForm - a model defined in OpenAPI
:param additional_metadata: The additional_metadata of this UploadForm. # noqa: E501
:type additional_metadata: str
:param file: The file of this UploadForm. # noqa: E501
:type file: file
"""
self.openapi_types = {
'additional_metadata': str,
'file': file
}
self.attribute_map = {
'additional_metadata': 'additionalMetadata',
'file': 'file'
}
self._additional_metadata = additional_metadata
self._file = file
@classmethod
def from_dict(cls, dikt):
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The UploadForm of this UploadForm. # noqa: E501
:rtype: UploadForm
"""
return util.deserialize_model(dikt, cls)
@property
def additional_metadata(self):
"""Gets the additional_metadata of this UploadForm.
Additional data to pass to server # noqa: E501
:return: The additional_metadata of this UploadForm.
:rtype: str
"""
return self._additional_metadata
@additional_metadata.setter
def additional_metadata(self, additional_metadata):
"""Sets the additional_metadata of this UploadForm.
Additional data to pass to server # noqa: E501
:param additional_metadata: The additional_metadata of this UploadForm.
:type additional_metadata: str
"""
self._additional_metadata = additional_metadata
@property
def file(self):
"""Gets the file of this UploadForm.
file to upload # noqa: E501
:return: The file of this UploadForm.
:rtype: file
"""
return self._file
@file.setter
def file(self, file):
"""Sets the file of this UploadForm.
file to upload # noqa: E501
:param file: The file of this UploadForm.
:type file: file
"""
if file is None:
raise ValueError("Invalid value for `file`, must not be `None`") # noqa: E501
self._file = file
|
test/kernel/integration/grub/test.py | jaeh/IncludeOS | 3,673 | 12700731 | <reponame>jaeh/IncludeOS
#!/usr/bin/env python3
from builtins import str
import sys
import os
import subprocess
from vmrunner import vmrunner
vm = vmrunner.vms[0];
if len(sys.argv) == 1:
# Build, run and clean
vm.cmake()
# Cmake changes to build dir
os.chdir("..")
# Use grubify-script
grubify = "grubiso.sh"
#TODO MOVE to cmake ?
# Boot the image
if len(sys.argv) > 1:
# Create the GRUB image
subprocess.check_call(["bash",grubify,str(sys.argv[1])])
else:
# Create the GRUB image
subprocess.check_call(["bash",grubify,"build/service"])
vm.boot(multiboot = False)
|
gallery/seismic/convolutional_model.py | XuesongDing/fatiando | 179 | 12700743 | <filename>gallery/seismic/convolutional_model.py<gh_stars>100-1000
r"""
Synthetic seismograms using the convolutional model
---------------------------------------------------
The simplest way to get a seismogram (in time x offset) is through the
convolutional model
.. math::
trace(t) = wavelet(t) \ast reflectivity(t)
Module :mod:`fatiando.seismic.conv` defines functions for doing this
convolution, calculating the required reflectivity, and converting from depth a
model into time.
"""
import numpy as np
import matplotlib.pyplot as plt
from fatiando.seismic import conv
from fatiando.vis import mpl
# Define the parameters of our depth model
n_samples, n_traces = [600, 100]
velocity = 1500*np.ones((n_samples, n_traces))
# We'll put two interfaces in depth
velocity[150:, :] = 2000
velocity[400:, :] = 3500
dt = 2e-3
# We need to convert the depth model we made above into time
vel_l = conv.depth_2_time(velocity, velocity, dt=dt, dz=1)
# and we'll assume the density is homogeneous
rho_l = 2200*np.ones(np.shape(vel_l))
# With that, we can calculate the reflectivity model in time
rc = conv.reflectivity(vel_l, rho_l)
# and finally perform our convolution
synt = conv.convolutional_model(rc, 30, conv.rickerwave, dt=dt)
# We can use the utility function in fatiando.vis.mpl to plot the seismogram
fig, axes = plt.subplots(1, 2, figsize=(8, 5))
ax = axes[0]
ax.set_title("Velocity model (in depth)")
tmp = ax.imshow(velocity, extent=[0, n_traces, n_samples, 0],
cmap="copper", aspect='auto', origin='upper')
fig.colorbar(tmp, ax=ax, pad=0, aspect=50)
ax.set_xlabel('Trace')
ax.set_ylabel('Depth (m)')
ax = axes[1]
ax.set_title("Synthetic seismogram")
mpl.seismic_wiggle(synt[:, ::20], dt, scale=1)
mpl.seismic_image(synt, dt, cmap="RdBu_r", aspect='auto')
ax.set_xlabel('Trace')
ax.set_ylabel('Time (s)')
plt.tight_layout()
plt.show()
|
tests/simple_page/test_link.py | kejkz/webium | 152 | 12700768 | from nose.tools import eq_
from tests import get_url
from tests.simple_page import SimplePageTest
class TestClick(SimplePageTest):
def test_link(self):
eq_(self.page.icon_link.get_href(), get_url('icon.gif'))
|
examples/api/python/quickstart.py | zyedidia/boolector | 209 | 12700774 | <filename>examples/api/python/quickstart.py
import os
import pyboolector
from pyboolector import Boolector, BoolectorException
if __name__ == "__main__":
try:
# Create Boolector instance
btor = Boolector()
# Enable model generation
btor.Set_opt(pyboolector.BTOR_OPT_MODEL_GEN, True)
# Create bit-vector sort of size 8
bvsort8 = btor.BitVecSort(8)
# Create expressions
x = btor.Var(bvsort8, "x")
y = btor.Var(bvsort8, "y")
zero = btor.Const(0, 8)
hundred = btor.Const(100, 8)
# 0 < x
ult_x = btor.Ult(zero, x)
btor.Assert(ult_x)
# x <= 100
ulte_x = btor.Ulte(x, hundred)
btor.Assert(ulte_x)
# 0 < y
ult_y = btor.Ult(zero, y)
btor.Assert(ult_y)
# y <= 100
ulte_y = btor.Ulte(y, hundred)
btor.Assert(ulte_y)
# x * y
mul = btor.Mul(x, y)
# x * y < 100
ult = btor.Ult(mul, hundred)
btor.Assert(ult)
umulo = btor.Umulo(x, y)
numulo = btor.Not(umulo) # prevent overflow
btor.Assert(numulo)
res = btor.Sat()
print("Expect: sat")
print("Boolector: ", end='')
if res == btor.SAT:
print("sat")
elif res == btor.UNSAT:
print("unsat")
else:
print("unknown")
print("")
# prints "x: 00000100"
print("assignment of {}: {}".format(x.symbol, x.assignment))
# prints: "y: 00010101"
print("assignment of {}: {}".format(y.symbol, y.assignment))
print("")
print("Print model in BTOR format:")
btor.Print_model("btor")
print("")
print("Print model in SMT-LIBv2 format:")
btor.Print_model("smt2")
print("")
except BoolectorException as e:
print("Caught exception: " + str(e))
|
tests/unittests/test_dataloader.py | mj-kh/speechbrain | 3,913 | 12700786 | import torch
import pytest
def test_saveable_dataloader(tmpdir, device):
from speechbrain.dataio.dataloader import SaveableDataLoader
save_file = tmpdir + "/dataloader.ckpt"
dataset = torch.randn(10, 1, device=device)
dataloader = SaveableDataLoader(dataset, collate_fn=None)
data_iterator = iter(dataloader)
first_item = next(data_iterator)
assert first_item == dataset[0]
# Save here:
dataloader._speechbrain_save(save_file)
second_item = next(data_iterator)
assert second_item == dataset[1]
# Now make a new dataloader and recover:
new_dataloader = SaveableDataLoader(dataset, collate_fn=None)
new_dataloader._speechbrain_load(save_file, end_of_epoch=False, device=None)
new_data_iterator = iter(new_dataloader)
second_second_item = next(new_data_iterator)
assert second_second_item == second_item
def test_saveable_dataloader_multiprocess(tmpdir):
# Same test as above, but with multiprocess dataloading
from speechbrain.dataio.dataloader import SaveableDataLoader
save_file = tmpdir + "/dataloader.ckpt"
dataset = torch.randn(10, 1)
for num_parallel in [1, 2, 3, 4]:
dataloader = SaveableDataLoader(
dataset, num_workers=num_parallel, collate_fn=None
) # Note num_workers
data_iterator = iter(dataloader)
first_item = next(data_iterator)
assert first_item == dataset[0]
# Save here, note that this overwrites.
dataloader._speechbrain_save(save_file)
second_item = next(data_iterator)
assert second_item == dataset[1]
# Cleanup needed for MacOS (open file limit)
del data_iterator
del dataloader
# Now make a new dataloader and recover:
new_dataloader = SaveableDataLoader(
dataset, num_workers=num_parallel, collate_fn=None
)
new_dataloader._speechbrain_load(
save_file, end_of_epoch=False, device=None
)
new_data_iterator = iter(new_dataloader)
second_second_item = next(new_data_iterator)
assert second_second_item == second_item
del new_data_iterator
del new_dataloader
def test_looped_loader(tmpdir):
# Tests that LoopedLoader will raise StopIteration appropriately
# And that it can recover and keep the place.
from speechbrain.dataio.dataloader import LoopedLoader
save_file = tmpdir + "/loopedloader.ckpt"
data = range(3)
dataloader = LoopedLoader(data, epoch_length=2)
data_iterator = iter(dataloader)
assert next(data_iterator) == 0
# Save here, 1 to go:
dataloader.save(save_file)
assert next(data_iterator) == 1
with pytest.raises(StopIteration):
next(data_iterator)
# And it can be continued past the range:
assert next(data_iterator) == 2
assert next(data_iterator) == 0
# And again it raises:
with pytest.raises(StopIteration):
next(data_iterator)
# Now make a new dataloader and recover:
new_dataloader = LoopedLoader(data, epoch_length=2)
new_dataloader.load(save_file, end_of_epoch=False, device=None)
new_data_iterator = iter(new_dataloader)
next(new_data_iterator)
with pytest.raises(StopIteration):
next(new_data_iterator)
|
flack/api/messages.py | Pythonian/flack | 533 | 12700796 | from flask import request, abort, jsonify, g
from .. import db
from ..auth import token_auth, token_optional_auth
from ..models import Message
from ..utils import timestamp, url_for
from ..tasks import async_task
from . import api
@api.route('/messages', methods=['POST'])
@token_auth.login_required
@async_task
def new_message():
"""
Post a new message.
This endpoint is requires a valid user token.
"""
msg = Message.create(request.get_json() or {})
db.session.add(msg)
db.session.commit()
r = jsonify(msg.to_dict())
r.status_code = 201
r.headers['Location'] = url_for('api.get_message', id=msg.id)
return r
@api.route('/messages', methods=['GET'])
@token_optional_auth.login_required
def get_messages():
"""
Return list of messages.
This endpoint is publicly available, but if the client has a token it
should send it, as that indicates to the server that the user is online.
"""
since = int(request.args.get('updated_since', '0'))
day_ago = timestamp() - 24 * 60 * 60
if since < day_ago:
# do not return more than a day worth of messages
since = day_ago
msgs = Message.query.filter(Message.updated_at > since).order_by(
Message.updated_at)
return jsonify({'messages': [msg.to_dict() for msg in msgs.all()]})
@api.route('/messages/<id>', methods=['GET'])
@token_optional_auth.login_required
def get_message(id):
"""
Return a message.
This endpoint is publicly available, but if the client has a token it
should send it, as that indicates to the server that the user is online.
"""
return jsonify(Message.query.get_or_404(id).to_dict())
@api.route('/messages/<id>', methods=['PUT'])
@token_auth.login_required
@async_task
def edit_message(id):
"""
Modify an existing message.
This endpoint is requires a valid user token.
Note: users are only allowed to modify their own messages.
"""
msg = Message.query.get_or_404(id)
if msg.user != g.current_user:
abort(403)
msg.from_dict(request.get_json() or {})
db.session.add(msg)
db.session.commit()
return '', 204
|
tests/core/test_run_files.py | Mattlk13/dd-agent | 1,172 | 12700805 | <reponame>Mattlk13/dd-agent
# stdlib
import os
import unittest
# 3p
import mock
# project
from checks.check_status import AgentStatus
class TestRunFiles(unittest.TestCase):
""" Tests that runfiles (.pid, .sock, .pickle etc.) are written to internal agent folders"""
# Mac run directory expected location
_my_dir = os.path.dirname(os.path.abspath(__file__))
_mac_run_dir = '/'.join(_my_dir.split('/')[:-4]) or '/'
_linux_run_dir = '/opt/datadog-agent/run'
@mock.patch('os.path.isdir', return_value=True)
@mock.patch('checks.check_status._windows_commondata_path', return_value="C:\Windows\App Data")
@mock.patch('utils.platform.Platform.is_win32', return_value=True)
def test_agent_status_pickle_file_win32(self, *mocks):
''' Test pickle file location on win32 '''
expected_path = os.path.join('C:\Windows\App Data', 'Datadog', 'AgentStatus.pickle')
# check AgentStatus pickle created
self.assertEqual(AgentStatus._get_pickle_path(), expected_path)
@mock.patch('utils.pidfile.PidFile.get_dir', return_value=_mac_run_dir)
@mock.patch('utils.platform.Platform.is_win32', return_value=False)
@mock.patch('utils.platform.Platform.is_mac', return_value=True)
def test_agent_status_pickle_file_mac_dmg(self, *mocks):
''' Test pickle file location when running a Mac DMG install '''
expected_path = os.path.join(self._mac_run_dir, 'AgentStatus.pickle')
self.assertEqual(AgentStatus._get_pickle_path(), expected_path)
@mock.patch('utils.pidfile.tempfile.gettempdir', return_value='/a/test/tmp/dir')
@mock.patch('utils.pidfile.PidFile.get_dir', return_value='')
@mock.patch('utils.platform.Platform.is_win32', return_value=False)
@mock.patch('utils.platform.Platform.is_mac', return_value=True)
def test_agent_status_pickle_file_mac_source(self, *mocks):
''' Test pickle file location when running a Mac source install '''
expected_path = os.path.join('/a/test/tmp/dir', 'AgentStatus.pickle')
self.assertEqual(AgentStatus._get_pickle_path(), expected_path)
@mock.patch('os.path.isdir', return_value=True)
@mock.patch('utils.pidfile.PidFile.get_dir', return_value=_linux_run_dir)
@mock.patch('utils.platform.Platform.is_win32', return_value=False)
@mock.patch('utils.platform.Platform.is_mac', return_value=False)
def test_agent_status_pickle_file_linux(self, *mocks):
''' Test pickle file location when running on Linux '''
expected_path = os.path.join('/opt/datadog-agent/run', 'AgentStatus.pickle')
self.assertEqual(AgentStatus._get_pickle_path(), expected_path)
|
pdaugment/midi_preprocess/steps/filter_and_merge.py | hongwen-sun/muzic | 1,903 | 12700819 | import os
import subprocess
from multiprocessing.pool import Pool
import miditoolkit
import pandas as pd
import pretty_midi
from tqdm import tqdm
import numpy as np
import pickle
from copy import deepcopy
from midi_preprocess.utils.hparams import hparams
import midi_preprocess.steps.track_separate as tc
def filter_and_merge(processed_data_dir, instru2program):
base_dir = 'midi_preprocess'
melody_model = pickle.load(open(f'{base_dir}/model/melody_model_new', 'rb'))
bass_model = pickle.load(open(f'{base_dir}/model/bass_model', 'rb'))
chord_model = pickle.load(open(f'{base_dir}/model/chord_model', 'rb'))
df = pd.read_csv(open(f'{processed_data_dir}/meta.csv'))
print(f"| load #midi infos: {df.shape[0]}.")
pool = Pool(int(os.getenv('N_PROC', os.cpu_count())))
save_dir = f'{processed_data_dir}/midi_recog_tracks'
subprocess.check_call(f'rm -rf "{save_dir}"', shell=True)
futures = [pool.apply_async(filter_recog_merge_job, args=[
midi_info['path'], midi_info, instru2program, save_dir, melody_model, bass_model, chord_model
]) for idx, midi_info in df.iterrows()]
pool.close()
merged_infos = []
for f, (idx, midi_info) in zip(tqdm(futures), df.iterrows()):
res = f.get()
merged_info = {}
merged_info.update(midi_info)
if isinstance(res, str):
merged_info['msg'] = res
else:
merged_info['msg'] = ''
merged_info.update(res)
merged_infos.append(merged_info)
df = pd.DataFrame(merged_infos)
df = df.set_index(['id'])
df.to_csv(f'{processed_data_dir}/meta.csv')
pool.join()
n_merged = len([x for x in merged_infos if x['msg'] == ''])
print(f"| merged #midi: {n_merged}")
def predict_track_with_model(midi_path, melody_model, bass_model, chord_model):
try:
ret = tc.cal_file_features(midi_path) # remove empty track and calculate the features
features, pm = ret
except Exception as e:
features = None
pm = pretty_midi.PrettyMIDI(midi_path)
if features is None and pm is None:
pm = pretty_midi.PrettyMIDI(midi_path)
if features is None or features.shape[0] == 0:
return pm, [], []
features = tc.add_labels(features) # add label
tc.remove_file_duplicate_tracks(features, pm) # delete duplicate track
features = tc.predict_labels(features, melody_model, bass_model, chord_model) # predict lead, bass, chord
predicted_melody_tracks_idx = np.where(features.melody_predict)[0]
predicted_bass_tracks_idx = np.where(features.bass_predict)[0]
melody_tracks_idx = np.concatenate((predicted_melody_tracks_idx, np.where(features.is_melody)[0]))
bass_tracks_idx = np.concatenate((predicted_bass_tracks_idx, np.where(features.is_bass)[0]))
return pm, melody_tracks_idx, bass_tracks_idx
def filter_recog_merge_job(midi_path, midi_info, instru2program, save_dir,
melody_model, bass_model, chord_model):
filter_msg = filter_tracks(midi_info)
if filter_msg != '':
return filter_msg
pm, melody_tracks_idx, bass_tracks_idx = predict_track_with_model(midi_path, melody_model, bass_model, chord_model)
if pm is None:
return 'pm is None'
pm_new = deepcopy(pm)
pm_new.instruments = []
for i, instru_old in enumerate(pm.instruments):
program_old = instru_old.program
instru = deepcopy(instru_old)
if i in melody_tracks_idx and 'MUMIDI_' not in instru.name or instru.name == 'MUMIDI_Lead':
instru.name = 'Lead'
elif i in bass_tracks_idx and 'MUMIDI_' not in instru.name or instru.name == 'MUMIDI_Bass':
instru.name = 'Bass'
elif instru_old.is_drum and 'MUMIDI_' not in instru.name or instru.name == 'MUMIDI_Drums': # drum
instru.name = 'Drums'
elif program_old // 8 == 0 and 'MUMIDI_' not in instru.name or instru.name == 'MUMIDI_Piano': # piano
instru.name = 'Piano'
elif program_old // 8 == 3 and 'MUMIDI_' not in instru.name or instru.name == 'MUMIDI_Guitar': # guitar
instru.name = 'Guitar'
elif 40 <= program_old <= 54 and 'MUMIDI_' not in instru.name or instru.name == 'MUMIDI_Strings': # string
instru.name = 'Strings'
elif 73 <= program_old <= 88: # Lead
instru.name = 'Lead'
elif program_old // 8 == 4: # Bass
instru.name = 'Bass'
else:
instru.name = 'UnRec'
instru.program = instru_old.program
pm_new.instruments.append(instru)
os.makedirs(save_dir, exist_ok=True)
out_path = f"{save_dir}/{midi_info['id']}.mid"
pm_new.write(out_path)
merged_midi_info = get_merged_midi_info(out_path, instru2program)
filter_msg = filter_tracks(midi_info)
if filter_msg != '':
return '[merged]' + filter_msg
return merged_midi_info
def filter_tracks(midi_info):
# filter out too long n_beats > 10000, and too short n_beats < 16
if midi_info['n_beats'] > hparams['max_n_beats'] or midi_info['n_beats'] < hparams['min_n_beats']:
return 'invalid beats'
if midi_info['n_notes'] < hparams['min_n_notes']:
return 'invalid n_notes'
if midi_info['n_pitches'] < hparams['min_n_pitches']:
return 'Invalid pitches'
if midi_info['cross_bar_rate'] > hparams['max_cross_bar_rate']:
return 'Invalid cross_bar'
return ''
def get_merged_midi_info(midi_fn, instru2program):
try:
mf = miditoolkit.MidiFile(midi_fn)
except KeyboardInterrupt:
raise
except Exception as e:
return str(e)
# merge tracks
track_lists_to_merge = get_tracks_to_merge(mf, instru2program)
n_merge_track = [len(x) for x in track_lists_to_merge]
available_instrs = list(set([x2 for x in track_lists_to_merge for x2 in x])) # Important for 6 tracks
# notes
all_vels = [x1.velocity for i, x in enumerate(mf.instruments) if i in available_instrs for x1 in
x.notes] # all instruments note connection in a line
all_pitches = [x1.pitch for i, x in enumerate(mf.instruments) if i in available_instrs for x1 in x.notes]
n_notes = len(all_vels) # numbers of notes
if n_notes == 0:
return 'empty tracks'
n_beats = max([x1.end for i, x in enumerate(mf.instruments)
if i in available_instrs for x1 in x.notes]) // mf.ticks_per_beat + 1
n_instru = len(mf.instruments)
n_pitches = len(set(all_pitches)) # pitch classes
vel_mean = np.mean(all_vels)
vel_std = np.std(all_vels)
n_cross_bar = 0
for i, instru in enumerate(mf.instruments):
if i not in available_instrs:
continue
for n in instru.notes:
start_beat = n.start / mf.ticks_per_beat
end_beat = n.end / mf.ticks_per_beat
if (start_beat + 0.25) // 4 < (end_beat - 0.25) // 4 and start_beat % 4 > 0.125:
n_cross_bar += 1
return {
'path_recog_tracks': midi_fn,
# velocity
'vel_mean': vel_mean,
'vel_std': vel_std,
# stats
'n_notes': n_notes,
'n_instru': n_instru,
'n_beats': n_beats,
'n_pitches': n_pitches,
'n_cross_bar': n_cross_bar,
# tracks
'n_tracks': n_merge_track,
'track_lists_to_merge': track_lists_to_merge,
}
def get_tracks_to_merge(mf, instru2program):
track_lists_to_merge = [[] for _ in range(6)]
instru_order = {v: k for k, v in enumerate(instru2program.keys())}
for idx, instr in enumerate(mf.instruments):
instr_name = instr.name
if instr_name in instru_order:
track_lists_to_merge[instru_order[instr_name]].append(idx)
return track_lists_to_merge |
challenge_4/python/bryantpq/tree.py | rchicoli/2017-challenges | 271 | 12700832 | <filename>challenge_4/python/bryantpq/tree.py
class Node:
def __init__(self, value):
self.value = value
self.l = None
self.r = None
class BinaryTree:
def __init__(self):
self.root = None
def draw(self):
'''Prints a preorder traversal of the tree'''
self._draw(self.root)
print()
def _draw(self, node):
print("(", end="")
if node != None:
print(str(node.value) + ", ", end="")
self._draw(node.l)
print(", ", end="")
self._draw(node.r)
print(")", end="")
def invert(self):
self.root = self._invert(self.root)
def _invert(self, node):
# find lowest point where nodes can be swapped
# swap nodes
if node:
node.l = self._invert(node.l)
node.r = self._invert(node.r)
temp = node.l
node.l = node.r
node.r = temp
return node
def add(self, vals):
for val in vals:
if self.root == None:
self.root = Node(val)
else:
self._add(self.root, val)
def _add(self, node, val):
if val < node.value:
if node.l == None:
node.l = Node(val)
else:
self._add(node.l, val)
else:
if node.r == None:
node.r = Node(val)
else:
self._add(node.r, val)
def main():
t = BinaryTree()
t.add([4, 2, 7, 1, 3, 6, 9, 11])
t.draw()
t.invert()
t.draw()
if __name__ == "__main__":
main()
|
tests/test_guard.py | tjensen/steam | 727 | 12700853 | <gh_stars>100-1000
import unittest
import mock
from steam import guard as g
class TCguard(unittest.TestCase):
def test_generate_twofactor_code_for_time(self):
code = g.generate_twofactor_code_for_time(b'superdupersecret', timestamp=3000030)
self.assertEqual(code, 'YRGQJ')
code = g.generate_twofactor_code_for_time(b'superdupersecret', timestamp=3000029)
self.assertEqual(code, '94R9D')
def test_generate_confirmation_key(self):
key = g.generate_confirmation_key(b'itsmemario', '', 100000)
self.assertEqual(key, b'\<KEY>')
key = g.generate_confirmation_key(b'itsmemario', 'allow', 100000)
self.assertEqual(key, b"<KEY>")
|
tools/face/vis_html_det.py | AruniRC/detectron-self-train | 128 | 12700870 | mport os.path as osp
from html4vision import Col, imagetable
# table description
cols = [
Col('id1', 'ID'), # make a column of 1-based indices
Col('img', 'vgg16', 'det_vgg16_Flickr_vis-0.70/*.jpg'), # specify image content for column 2
]
imagetable(cols, outfile='det_vgg16_Flickr_vis-0.70.html', title='Flickr conf pert-adapt',
style=None)
|
tests/nlu_core_tests/component_tests/classifier_tests/question_tests.py | milyiyo/nlu | 480 | 12700880 | <filename>tests/nlu_core_tests/component_tests/classifier_tests/question_tests.py
import unittest
from nlu import *
class TestQuestions(unittest.TestCase):
def test_questions_model(self):
pipe = nlu.load('questions',verbose=True)
data = ['I love pancaces. I hate Mondays', 'I love Fridays']
df = pipe.predict(data, output_level='sentence')
for c in df.columns: print(df[c])
df = pipe.predict(['I love pancaces. I hate Mondays', 'I love Fridays'], output_level='document')
for c in df.columns: print(df[c])
if __name__ == '__main__':
unittest.main()
|
contrib/linux/actions/dig.py | momokuri-3/st2 | 4,920 | 12700895 | #! /usr/bin/python
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import locale
import subprocess
import random
import sys
from st2common.runners.base_action import Action
class DigAction(Action):
def run(self, rand, count, nameserver, hostname, queryopts):
opt_list = []
output = []
cmd_args = ["dig"]
if nameserver:
nameserver = "@" + nameserver
cmd_args.append(nameserver)
if isinstance(queryopts, str) and "," in queryopts:
opt_list = queryopts.split(",")
else:
opt_list.append(queryopts)
cmd_args.extend(["+" + option for option in opt_list])
cmd_args.append(hostname)
try:
raw_result = subprocess.Popen(
cmd_args, stderr=subprocess.PIPE, stdout=subprocess.PIPE
).communicate()[0]
if sys.version_info >= (3,):
# This function might call getpreferred encoding unless we pass
# do_setlocale=False.
encoding = locale.getpreferredencoding(do_setlocale=False)
result_list_str = raw_result.decode(encoding)
else:
result_list_str = str(raw_result)
result_list = list(filter(None, result_list_str.split("\n")))
# NOTE: Python3 supports the FileNotFoundError, the errono.ENOENT is for py2 compat
# for Python3:
# except FileNotFoundError as e:
except OSError as e:
if e.errno == errno.ENOENT:
return (
False,
"Can't find dig installed in the path (usually /usr/bin/dig). If "
"dig isn't installed, you can install it with 'sudo yum install "
"bind-utils' or 'sudo apt install dnsutils'",
)
else:
raise e
if int(count) > len(result_list) or count <= 0:
count = len(result_list)
output = result_list[0:count]
if rand is True:
random.shuffle(output)
return output
|
tests/autoscaling/test_pause_service_autoscaler.py | sobolevn/paasta | 1,711 | 12700901 | import mock
import paasta_tools.paastaapi.models as paastamodels
from paasta_tools.autoscaling.pause_service_autoscaler import (
delete_service_autoscale_pause_time,
)
from paasta_tools.autoscaling.pause_service_autoscaler import (
get_service_autoscale_pause_time,
)
from paasta_tools.autoscaling.pause_service_autoscaler import (
update_service_autoscale_pause_time,
)
@mock.patch("paasta_tools.autoscaling.pause_service_autoscaler.client", autospec=True)
def test_get_service_autoscale_pause_time_error(mock_client):
mock_client.get_paasta_oapi_client.return_value = None
return_code = get_service_autoscale_pause_time("cluster1")
assert return_code == 1
mock_client.get_paasta_oapi_client.assert_called_with(
cluster="cluster1", http_res=True
)
mock_api = mock.Mock()
mock_client.get_paasta_oapi_client.return_value = mock.Mock(default=mock_api)
mock_api.get_service_autoscaler_pause.return_value = (
None,
500,
None,
)
return_code = get_service_autoscale_pause_time("cluster1")
assert return_code == 2
@mock.patch("builtins.print", autospec=True)
@mock.patch("paasta_tools.autoscaling.pause_service_autoscaler.time", autospec=True)
@mock.patch("paasta_tools.autoscaling.pause_service_autoscaler.client", autospec=True)
def test_get_service_autoscale_pause_time_not(mock_client, mock_time, mock_print):
mock_api = mock.Mock()
mock_client.get_paasta_oapi_client.return_value = mock.Mock(default=mock_api)
mock_api.get_service_autoscaler_pause.return_value = ("3", 200, None)
mock_time.time.return_value = 4
return_code = get_service_autoscale_pause_time("cluster1")
mock_print.assert_called_with("Service autoscaler is not paused")
assert return_code == 0
@mock.patch(
"paasta_tools.autoscaling.pause_service_autoscaler.print_paused_message",
autospec=True,
)
@mock.patch("paasta_tools.autoscaling.pause_service_autoscaler.time", autospec=True)
@mock.patch("paasta_tools.autoscaling.pause_service_autoscaler.client", autospec=True)
def test_get_service_autoscale_pause_time_paused(
mock_client, mock_time, mock_print_paused_message
):
mock_api = mock.Mock()
mock_client.get_paasta_oapi_client.return_value = mock.Mock(default=mock_api)
mock_api.get_service_autoscaler_pause.return_value = ("3", 200, None)
mock_time.time.return_value = 2
return_code = get_service_autoscale_pause_time("cluster1")
mock_print_paused_message.assert_called_with(3.0)
assert return_code == 0
@mock.patch("paasta_tools.autoscaling.pause_service_autoscaler.client", autospec=True)
def test_update_service_autoscale_pause_time(mock_client):
mock_client.get_paasta_oapi_client.return_value = None
return_code = update_service_autoscale_pause_time("cluster1", "2")
assert return_code == 1
mock_client.get_paasta_oapi_client.assert_called_with(
cluster="cluster1", http_res=True
)
mock_api = mock.Mock()
mock_client.get_paasta_oapi_client.return_value = mock.Mock(default=mock_api)
mock_api.update_service_autoscaler_pause = mock_update = mock.Mock()
mock_update.return_value = (None, 500, None)
return_code = update_service_autoscale_pause_time("cluster1", "3")
mock_update.assert_called_once_with(
paastamodels.InlineObject(minutes=3), _return_http_data_only=False
)
assert return_code == 2
mock_update.return_value = (None, 200, None)
return_code = update_service_autoscale_pause_time("cluster1", "2")
assert return_code == 0
@mock.patch("paasta_tools.autoscaling.pause_service_autoscaler.client", autospec=True)
@mock.patch("paasta_tools.paastaapi.apis.DefaultApi", autospec=True)
def test_delete_service_autoscale_pause_time(mock_default_api, mock_client):
mock_client.get_paasta_oapi_client.return_value = None
return_code = delete_service_autoscale_pause_time("cluster1")
assert return_code == 1
mock_client.get_paasta_oapi_client.assert_called_with(
cluster="cluster1", http_res=True
)
mock_api = mock.Mock()
mock_client.get_paasta_oapi_client.return_value = mock.Mock(default=mock_api)
mock_api.delete_service_autoscaler_pause = mock_delete = mock.Mock()
mock_delete.return_value = (None, 500, None)
return_code = delete_service_autoscale_pause_time("cluster1")
mock_delete.assert_called_once_with(_return_http_data_only=False)
assert return_code == 2
mock_delete.return_value = (None, 200, None)
return_code = delete_service_autoscale_pause_time("cluster1")
assert return_code == 0
|
ConvKB/train.py | MedyG/kg-reeval | 104 | 12700908 | import tensorflow as tf
import numpy as np
np.random.seed(1234)
import os
import time
import datetime
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from builddata import *
from model import ConvKB
# Parameters
# ==================================================
parser = ArgumentParser("ConvKB", formatter_class=ArgumentDefaultsHelpFormatter, conflict_handler='resolve')
parser.add_argument("--data", default="../data/", help="Data sources.")
parser.add_argument("--run_folder", default="../", help="Data sources.")
parser.add_argument("--name", default="WN18RR", help="Name of the dataset.")
parser.add_argument("--embedding_dim", default=50, type=int, help="Dimensionality of character embedding")
parser.add_argument("--filter_sizes", default="1", help="Comma-separated filter sizes")
parser.add_argument("--num_filters", default=500, type=int, help="Number of filters per filter size")
parser.add_argument("--dropout_keep_prob", default=1.0, type=float, help="Dropout keep probability")
parser.add_argument("--l2_reg_lambda", default=0.001, type=float, help="L2 regularization lambda")
parser.add_argument("--learning_rate", default=0.0001, type=float, help="Learning rate")
parser.add_argument("--is_trainable", default=True, type=bool, help="")
parser.add_argument("--batch_size", default=128, type=int, help="Batch Size")
parser.add_argument("--neg_ratio", default=1.0, type=float, help="Number of negative triples generated by positive")
parser.add_argument("--num_epochs", default=201, type=int, help="Number of training epochs")
parser.add_argument("--saveStep", default=200, type=int, help="")
parser.add_argument("--allow_soft_placement", default=True, type=bool, help="Allow device soft device placement")
parser.add_argument("--log_device_placement", default=False, type=bool, help="Log placement of ops on devices")
parser.add_argument("--model_name", default='wn18rr', help="")
parser.add_argument("--useConstantInit", action='store_true')
parser.add_argument("--model_index", default='200', help="")
parser.add_argument("--seed", default=1234, type=int, help="")
parser.add_argument("--num_splits", default=8, type=int, help="Split the validation set into 8 parts for a faster evaluation")
parser.add_argument("--testIdx", default=1, type=int, help="From 0 to 7. Index of one of 8 parts")
parser.add_argument("--decode", action='store_false')
args = parser.parse_args()
print(args)
# Load data
print("Loading data...")
train, valid, test, words_indexes, indexes_words, \
headTailSelector, entity2id, id2entity, relation2id, id2relation = build_data(path=args.data, name=args.name)
data_size = len(train)
train_batch = Batch_Loader(train, words_indexes, indexes_words, headTailSelector, \
entity2id, id2entity, relation2id, id2relation, batch_size=args.batch_size,
neg_ratio=args.neg_ratio)
entity_array = np.array(list(train_batch.indexes_ents.keys()))
lstEmbed = []
#Using the pre-trained embeddings.
print("Using pre-trained model.")
lstEmbed = np.empty([len(words_indexes), args.embedding_dim]).astype(np.float32)
initEnt, initRel = init_norm_Vector(args.data + args.name + '/relation2vec' + str(args.embedding_dim) + '.init',
args.data + args.name + '/entity2vec' + str(args.embedding_dim) + '.init',
args.embedding_dim)
for _word in words_indexes:
if _word in relation2id:
index = relation2id[_word]
_ind = words_indexes[_word]
lstEmbed[_ind] = initRel[index]
elif _word in entity2id:
index = entity2id[_word]
_ind = words_indexes[_word]
lstEmbed[_ind] = initEnt[index]
else:
print('*****************Error********************!')
break
lstEmbed = np.array(lstEmbed, dtype=np.float32)
assert len(words_indexes) % (len(entity2id) + len(relation2id)) == 0
print("Loading data... finished!")
x_valid = np.array(list(valid.keys())).astype(np.int32)
y_valid = np.array(list(valid.values())).astype(np.float32)
x_test = np.array(list(test.keys())).astype(np.int32)
y_test = np.array(list(test.values())).astype(np.float32)
# Training
# ==================================================
with tf.Graph().as_default():
tf.set_random_seed(args.seed)
session_conf = tf.ConfigProto(allow_soft_placement=args.allow_soft_placement,
log_device_placement=args.log_device_placement)
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
with sess.as_default():
global_step = tf.Variable(0, name="global_step", trainable=False)
cnn = ConvKB(
sequence_length=x_valid.shape[1], # 3
num_classes=y_valid.shape[1], # 1
pre_trained=lstEmbed,
embedding_size=args.embedding_dim,
filter_sizes=list(map(int, args.filter_sizes.split(","))),
num_filters=args.num_filters,
vocab_size=len(words_indexes),
l2_reg_lambda=args.l2_reg_lambda,
is_trainable=args.is_trainable,
useConstantInit=args.useConstantInit)
optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
# optimizer = tf.train.RMSPropOptimizer(learning_rate=args.learning_rate)
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=args.learning_rate)
grads_and_vars = optimizer.compute_gradients(cnn.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
out_dir = os.path.abspath(os.path.join(args.run_folder, "runs", args.model_name))
print("Writing to {}\n".format(out_dir))
# Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
# Initialize all variables
sess.run(tf.global_variables_initializer())
def train_step(x_batch, y_batch):
"""
A single training step
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: args.dropout_keep_prob,
}
_, step, loss = sess.run([train_op, global_step, cnn.loss], feed_dict)
if step % 10000 == 0: print(step)
num_batches_per_epoch = int((data_size - 1) / args.batch_size) + 1
for epoch in range(args.num_epochs):
for batch_num in range(num_batches_per_epoch):
x_batch, y_batch = train_batch()
train_step(x_batch, y_batch)
current_step = tf.train.global_step(sess, global_step)
if epoch >= 0:
if epoch % args.saveStep == 0:
path = cnn.saver.save(sess, checkpoint_prefix, global_step=epoch)
print("Saved model checkpoint to {}\n".format(path))
|
aztk/spark/models/plugins/hdfs/configuration.py | Geims83/aztk | 161 | 12700948 | import os
from aztk.models.plugins.plugin_configuration import PluginConfiguration, PluginPort, PluginTargetRole
from aztk.models.plugins.plugin_file import PluginFile
dir_path = os.path.dirname(os.path.realpath(__file__))
class HDFSPlugin(PluginConfiguration):
def __init__(self):
super().__init__(
name="hdfs",
ports=[
PluginPort(name="File system metadata operations", internal=8020),
PluginPort(name="File system metadata operations(Backup)", internal=9000),
PluginPort(name="Datanode data transfer", internal=50010),
PluginPort(name="Datanode IPC metadata operations", internal=50020),
PluginPort(name="Namenode", internal=50070, public=True),
PluginPort(name="Datanodes", internal=50075, public=True),
],
target_role=PluginTargetRole.All,
execute="hdfs.sh",
files=[PluginFile("hdfs.sh", os.path.join(dir_path, "hdfs.sh"))],
)
|
config/config.py | LegionChang/CoTNet | 360 | 12700976 | <gh_stars>100-1000
import os
from yacs.config import CfgNode as CN
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, DEFAULT_CROP_PCT
_C = CN()
_C.root_dir = os.getcwd() # root dir
_C.seed = -1.0 # random seed (default: 42)
_C.logger_name = 'log' # log name
_C.amp = False # use NVIDIA amp for mixed precision training
_C.num_gpus = 1
_C.distributed = False
# data
_C.data_loader = CN()
_C.data_loader.data_path = '' # path to dataset, data_dir
_C.data_loader.batch_size = 32 # input batch size for training (default: 32)
_C.data_loader.vbatch_size = 32 # validation batch size
_C.data_loader.workers = 0 # how many training processes to use (default: 1)
_C.data_loader.pin_mem = False # Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.
_C.data_loader.prefetcher = True # enable fast prefetcher
_C.data_loader.use_multi_epochs_loader = False # use the multi-epochs-loader to save time at the beginning of every epoch
_C.data_loader.dataset = 'imagenet' # imagenet, cifar10, cifar100
# model
_C.model = CN()
_C.model.name = 'resnet50' # Name of model to train
_C.model.pretrained = False # Start with pretrained version of specified network (if avail)
_C.model.initial_checkpoint = '' # Initialize model from this checkpoint (default: none)
_C.model.resume = '' # Resume full model and optimizer state from checkpoint (default: none)
_C.model.no_resume_opt = False # prevent resume of optimizer state when resuming model
_C.model.num_classes = 1000 # number of label classes (default: 1000)
_C.model.gp = 'avg' # Type of global pool, "avg", "max", "avgmax", "avgmaxc" (default: "avg")
_C.model.drop = 0.0 # Dropout rate (default: 0.)
_C.model.drop_path = 0.0 # Drop path rate (default None)
_C.model.drop_block = 0.0 # Drop block rate (default None)
_C.model.model_ema = False # Enable tracking moving average of model weights
_C.model.model_ema_force_cpu = False # Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.
_C.model.model_ema_decay = 0.9998 # decay factor for model weights moving average (default: 0.9998)
_C.model.block_name = 'type1'
# BN
_C.BN = CN()
_C.BN.bn_tf = False # Use Tensorflow BatchNorm defaults for models that support it (default: False)
_C.BN.bn_momentum = -1.0 # BatchNorm momentum override (if not None) default None
_C.BN.bn_eps = -1.0 # BatchNorm epsilon override (if not None) default None
_C.BN.sync_bn = False # Enable NVIDIA Apex or Torch synchronized BatchNorm.
_C.BN.dist_bn = '' # Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")
_C.BN.split_bn = False # Enable separate BN layers per augmentation split.
# augmentation
_C.augmentation = CN()
_C.augmentation.no_aug = False
_C.augmentation.scale = [0.08, 1.0]
_C.augmentation.ratio = [0.75, 1.333333333333]
_C.augmentation.hflip = 0.5
_C.augmentation.vflip = 0.0
_C.augmentation.interpolation = '' # Image resize interpolation type (overrides model)
_C.augmentation.color_jitter = 0.4 # Color jitter factor (default: 0.4)
_C.augmentation.aa = '' # Use AutoAugment policy. "v0" or "original". (default None)
_C.augmentation.aug_splits = 0 # Number of augmentation splits (default: 0, valid: 0 or >=2)
_C.augmentation.reprob = 0.0 # Random erase prob (default: 0.)
_C.augmentation.remode = 'const' # Random erase mode (default: "const")
_C.augmentation.recount = 1 # Random erase count (default: 1)
_C.augmentation.resplit = False # Do not random erase first (clean) augmentation split
_C.augmentation.mixup = 0.0 # mixup alpha, mixup enabled if > 0. (default: 0.)
_C.augmentation.mixup_off_epoch = 0 # turn off mixup after this epoch, disabled if 0 (default: 0)
_C.augmentation.cutmix = 0.0
_C.augmentation.cutmix_minmax = []
_C.augmentation.mixup_prob = 1.0
_C.augmentation.mixup_switch_prob = 0.5
_C.augmentation.mixup_mode = 'batch'
_C.augmentation.train_interpolation = 'random' # Training interpolation (random, bilinear, bicubic default: "random")
_C.augmentation.tta = 0 # Test/inference time augmentation (oversampling) factor. 0=None (default: 0)
_C.augmentation.img_size = -1 # Image patch size (default: None => model default)
_C.augmentation.crop_pct = -1.0 # Input image center crop percent (for validation only)
_C.augmentation.mean = [] # Override mean pixel value of dataset
_C.augmentation.std = [] # Override std deviation of of dataset
# loss
_C.loss = CN()
_C.loss.jsd = False # Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.
_C.loss.smoothing = 0.1 # label smoothing (default: 0.1)
# solver
_C.solver = CN()
_C.solver.opt = 'sgd' # Optimizer (default: "sgd")
_C.solver.opt_eps = 1e-8 # Optimizer Epsilon (default: 1e-8)
_C.solver.momentum = 0.9 # SGD momentum (default: 0.9)
_C.solver.weight_decay = 0.0001 # weight decay (default: 0.0001)
_C.solver.sched = 'step' # LR scheduler (default: "step")
_C.solver.lr = 0.01 # learning rate (default: 0.01)
_C.solver.lr_noise = [] # learning rate noise on/off epoch percentages default None
_C.solver.lr_noise_pct = 0.67 # learning rate noise limit percent (default: 0.67)
_C.solver.lr_noise_std = 1.0 # learning rate noise std-dev (default: 1.0)
_C.solver.lr_cycle_mul = 1.0 # learning rate cycle len multiplier (default: 1.0)
_C.solver.lr_cycle_limit = 1 # learning rate cycle limit
_C.solver.warmup_lr = 0.0001 # warmup learning rate (default: 0.0001)
_C.solver.min_lr = 1e-5 # lower lr bound for cyclic schedulers that hit 0 (1e-5)
_C.solver.epochs = 200 # number of epochs to train (default: 2)
_C.solver.start_epoch = -1 # manual epoch number (useful on restarts) default None
_C.solver.decay_epochs = 30 # epoch interval to decay LR
_C.solver.warmup_epochs = 3 # epochs to warmup LR, if scheduler supports
_C.solver.cooldown_epochs = 10 # epochs to cooldown LR at min_lr, after cyclic schedule ends
_C.solver.patience_epochs = 10 # patience epochs for Plateau LR scheduler (default: 10)
_C.solver.decay_rate = 0.1 # LR decay rate (default: 0.1)
_C.solver.log_interval = 50 # how many batches to wait before logging training status
_C.solver.recovery_interval = 0 # how many batches to wait before writing recovery checkpoint
_C.solver.clip_grad = -1.0
_C.solver.clip_mode = 'norm'
_C.solver.use_swa = False
_C.solver.swa_start = 75
_C.solver.swa_freq = 1
# eval
_C.eval = CN()
_C.eval.eval_metric = 'top1' # Best metric (default: "top1")
def pop_unused_value(cfg):
if cfg.BN.bn_momentum < 0:
cfg.BN.pop('bn_momentum')
if cfg.BN.bn_eps < 0:
cfg.BN.pop('bn_eps')
if len(cfg.solver.lr_noise) == 0:
cfg.solver.pop('lr_noise')
if cfg.solver.start_epoch < 0:
cfg.solver.pop('start_epoch')
if cfg.model.drop_path == 0:
cfg.model.pop('drop_path')
if cfg.model.drop_block == 0:
cfg.model.pop('drop_block')
if len(cfg.augmentation.aa) == 0:
cfg.augmentation.pop('aa')
if cfg.augmentation.img_size <= 0:
cfg.augmentation.pop('img_size')
if cfg.augmentation.crop_pct <= 0:
cfg.augmentation.pop('crop_pct')
if len(cfg.augmentation.mean) == 0:
cfg.augmentation.pop('mean')
if len(cfg.augmentation.std) == 0:
cfg.augmentation.pop('std')
def resolve_data_config(cfg, default_cfg={}, model=None):
new_config = {}
default_cfg = default_cfg
if not default_cfg and model is not None and hasattr(model, 'default_cfg'):
default_cfg = model.default_cfg
# Resolve input/image size
in_chans = 3
input_size = (in_chans, 224, 224)
if 'img_size' in cfg.augmentation and cfg.augmentation.img_size > 0:
assert isinstance(cfg.augmentation.img_size, int)
input_size = (in_chans, cfg.augmentation.img_size, cfg.augmentation.img_size)
elif 'input_size' in default_cfg:
input_size = default_cfg['input_size']
new_config['input_size'] = input_size
# resolve interpolation method
new_config['interpolation'] = 'bicubic'
if 'interpolation' in cfg.augmentation and len(cfg.augmentation.interpolation) > 0:
new_config['interpolation'] = cfg.augmentation.interpolation
elif 'interpolation' in default_cfg:
new_config['interpolation'] = default_cfg['interpolation']
# resolve dataset + model mean for normalization
new_config['mean'] = IMAGENET_DEFAULT_MEAN
if 'mean' in cfg.augmentation and len(cfg.augmentation.mean) > 0:
mean = tuple(cfg.augmentation.mean)
if len(mean) == 1:
mean = tuple(list(mean) * in_chans)
else:
assert len(mean) == in_chans
new_config['mean'] = mean
elif 'mean' in default_cfg:
new_config['mean'] = default_cfg['mean']
# resolve dataset + model std deviation for normalization
new_config['std'] = IMAGENET_DEFAULT_STD
if 'std' in cfg.augmentation and len(cfg.augmentation.std) > 0:
std = tuple(cfg.augmentation.std)
if len(std) == 1:
std = tuple(list(std) * in_chans)
else:
assert len(std) == in_chans
new_config['std'] = std
elif 'std' in default_cfg:
new_config['std'] = default_cfg['std']
# resolve default crop percentage
new_config['crop_pct'] = DEFAULT_CROP_PCT
if 'crop_pct' in cfg.augmentation and cfg.augmentation.crop_pct > 0:
new_config['crop_pct'] = cfg.augmentation.crop_pct
elif 'crop_pct' in default_cfg:
new_config['crop_pct'] = default_cfg['crop_pct']
return new_config |
tests/test_sampling/test_graph_samplers.py | Rodrigo-A-Pereira/pykeen | 750 | 12700987 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""Tests for graph samplers."""
import unittest
import torch
from pykeen.datasets import Nations
from pykeen.training.schlichtkrull_sampler import GraphSampler, _compute_compressed_adjacency_list
class GraphSamplerTest(unittest.TestCase):
"""Test the GraphSampler."""
def setUp(self) -> None:
"""Set up the test case with a triples factory."""
self.triples_factory = Nations().training
self.num_samples = 20
self.num_epochs = 10
self.graph_sampler = GraphSampler(triples_factory=self.triples_factory, num_samples=self.num_samples)
def test_sample(self) -> None:
"""Test drawing samples from GraphSampler."""
for e in range(self.num_epochs):
# sample a batch
batch_indices = []
for j in self.graph_sampler:
batch_indices.append(torch.as_tensor(j))
batch = torch.stack(batch_indices)
# check shape
assert batch.shape == (self.num_samples,)
# get triples
triples_batch = self.triples_factory.mapped_triples[batch]
# check connected components
# super inefficient
components = [{int(e)} for e in torch.cat([triples_batch[:, i] for i in (0, 2)]).unique()]
for h, _, t in triples_batch:
h, t = int(h), int(t)
s_comp_ind = [i for i, c in enumerate(components) if h in c][0]
o_comp_ind = [i for i, c in enumerate(components) if t in c][0]
# join
if s_comp_ind != o_comp_ind:
s_comp = components.pop(max(s_comp_ind, o_comp_ind))
o_comp = components.pop(min(s_comp_ind, o_comp_ind))
so_comp = s_comp.union(o_comp)
components.append(so_comp)
else:
pass
# already joined
if len(components) < 2:
break
# check that there is only a single component
assert len(components) == 1
class AdjacencyListCompressionTest(unittest.TestCase):
"""Unittest for utility method."""
def setUp(self) -> None:
"""Set up the test case with a triples factory."""
self.triples_factory = Nations().training
def test_compute_compressed_adjacency_list(self):
"""Test method _compute_compressed_adjacency_list ."""
degrees, offsets, comp_adj_lists = _compute_compressed_adjacency_list(triples_factory=self.triples_factory)
triples = self.triples_factory.mapped_triples
uniq, cnt = torch.unique(torch.cat([triples[:, i] for i in (0, 2)]), return_counts=True)
assert (degrees == cnt).all()
assert (offsets[1:] == torch.cumsum(cnt, dim=0)[:-1]).all()
assert (offsets < comp_adj_lists.shape[0]).all()
# check content of comp_adj_lists
for i in range(self.triples_factory.num_entities):
start = offsets[i]
stop = start + degrees[i]
adj_list = comp_adj_lists[start:stop]
# check edge ids
edge_ids = adj_list[:, 0]
adjacent_edges = set(
int(a) for a in ((triples[:, 0] == i) | (triples[:, 2] == i)).nonzero(as_tuple=False).flatten()
)
assert adjacent_edges == set(map(int, edge_ids))
|
mlcomp/migration/versions/009_dag_tag.py | lightforever/kaggler | 166 | 12701015 | from migrate import ForeignKeyConstraint
from sqlalchemy import Table, Column, MetaData, String, Integer
meta = MetaData()
table = Table(
'dag_tag', meta,
Column('dag', Integer, primary_key=True),
Column('tag', String(100), primary_key=True),
)
def upgrade(migrate_engine):
conn = migrate_engine.connect()
trans = conn.begin()
try:
meta.bind = conn
table.create()
dag = Table('dag', meta, autoload=True)
ForeignKeyConstraint([table.c.dag], [dag.c.id],
ondelete='CASCADE').create()
except Exception:
trans.rollback()
raise
else:
trans.commit()
def downgrade(migrate_engine):
conn = migrate_engine.connect()
trans = conn.begin()
try:
meta.bind = conn
table.drop()
except Exception:
trans.rollback()
raise
else:
trans.commit()
|
src/base/environment.py | AbhinavGopal/ts_tutorial | 290 | 12701037 | """ Environment determines the underlying law of the system.
All bandit problems should inherit from environment.
"""
import numpy as np
##############################################################################
class Environment(object):
"""Base class for all bandit environments."""
def __init__(self):
"""Initialize the environment."""
pass
def get_observation(self):
"""Returns an observation from the environment."""
pass
def get_optimal_reward(self):
"""Returns the optimal possible reward for the environment at that point."""
pass
def get_expected_reward(self, action):
"""Gets the expected reward of an action."""
pass
def get_stochastic_reward(self, action):
"""Gets a stochastic reward for the action."""
pass
def advance(self, action, reward):
"""Updating the environment (useful for nonstationary bandit)."""
pass
|
scripts/init_markdown.py | xofolowski/atc-react | 366 | 12701044 | <filename>scripts/init_markdown.py
#!/usr/bin/env python3
try:
from scripts.reactutils import REACTutils
except:
from response.atc_react.scripts.reactutils import REACTutils
from pathlib import Path
def react_create_markdown_dirs():
REACTConfig = REACTutils.load_config('config.yml')
base_dir = Path(REACTConfig.get(
'md_name_of_root_directory',
'../docs'
))
target_dir_list = ['Response_Actions', 'Response_Playbooks' , 'Response_Stages']
for item in target_dir_list:
(base_dir / item).mkdir(parents=True, exist_ok=True)
if __name__ == '__main__':
react_create_markdown_dirs()
|
tutorials/W3D2_HiddenDynamics/solutions/W3D2_Tutorial2_Solution_76573dcd.py | eduardojdiniz/CompNeuro | 2,294 | 12701064 | <gh_stars>1000+
def create_HMM(switch_prob=0.1, noise_level=1e-1, startprob=[1.0, 0.0]):
"""Create an HMM with binary state variable and 1D Gaussian measurements
The probability to switch to the other state is `switch_prob`. Two
measurement models have mean 1.0 and -1.0 respectively. `noise_level`
specifies the standard deviation of the measurement models.
Args:
switch_prob (float): probability to jump to the other state
noise_level (float): standard deviation of measurement models. Same for
two components
Returns:
model (GaussianHMM instance): the described HMM
"""
n_components = 2
startprob_vec = np.asarray(startprob)
# STEP 1: Transition probabilities
transmat_mat = np.array([[1. - switch_prob, switch_prob], [switch_prob, 1. - switch_prob]]) # # np.array([[...], [...]])
# STEP 2: Measurement probabilities
# Mean measurements for each state
means_vec = np.array([-1.0, 1.0])
# Noise for each state
vars_vec = np.ones(2) * noise_level * noise_level
# Initialize model
model = GaussianHMM1D(
startprob = startprob_vec,
transmat = transmat_mat,
means = means_vec,
vars = vars_vec,
n_components = n_components
)
return model
def sample(model, T):
"""Generate samples from the given HMM
Args:
model (GaussianHMM1D): the HMM with Gaussian measurement
T (int): number of time steps to sample
Returns:
M (numpy vector): the series of measurements
S (numpy vector): the series of latent states
"""
# Initialize S and M
S = np.zeros((T,),dtype=int)
M = np.zeros((T,))
# Calculate initial state
S[0] = np.random.choice([0,1],p=model.startprob)
# Latent state at time `t` depends on `t-1` and the corresponding transition probabilities to other states
for t in range(1,T):
# STEP 3: Get vector of probabilities for all possible `S[t]` given a particular `S[t-1]`
transition_vector = model.transmat[S[t-1],:]
# Calculate latent state at time `t`
S[t] = np.random.choice([0,1],p=transition_vector)
# Calculate measurements conditioned on the latent states
# Since measurements are independent of each other given the latent states, we could calculate them as a batch
means = model.means[S]
scales = np.sqrt(model.vars[S])
M = np.random.normal(loc=means, scale=scales, size=(T,))
return M, S
# Set random seed
np.random.seed(101)
# Set parameters of HMM
T = 100
switch_prob = 0.1
noise_level = 2.0
# Create HMM
model = create_HMM(switch_prob=switch_prob, noise_level=noise_level)
# Sample from HMM
M, S = sample(model,T)
assert M.shape==(T,)
assert S.shape==(T,)
# Print values
print(M[:5])
print(S[:5]) |
contrib/marat/python/Residue.py | dinisAbranches/nwchem | 317 | 12701083 | <gh_stars>100-1000
'''
Created on Feb 5, 2012
@author: marat
'''
import sys
from ResAtom import *
class Residue:
'''
classdocs
'''
def __init__(self,params={}):
'''
Default constructor for Residue class
atoms list of atoms in the residue
name residue name
'''
if params.has_key("atoms"):
self.atoms = params["coords"]
else:
self.atoms=[]
if params.has_key("name"):
self.name = params["name"]
else:
self.name = ""
def __str__(self):
output = ""
for a in self.atoms:
output=output + str(a)+"\n"
return output
def toPDBrecord(self,id_atom=1,id_res=1):
output = ""
i=id_atom-1
for a in self.atoms:
i = i + 1
output=output + a.toPDBrecord(i) +"\n"
return output
def AddAtom(self,a):
if self.name =="" :
self.name = a.resname
else:
if a.resname != self.name:
print("different names for the same residue index")
sys.exit(1)
self.atoms.append(a)
if __name__ == '__main__':
aline1 = "ATOM 3 O2 IO3 1 -1.182 1.410 0.573 -0.80 O"
aline2 = "ATOM 1 I1 IO3 1 -1.555 -0.350 0.333 1.39 I"
res0 = Residue()
a = ResAtom.fromPDBrecord(aline2)
b = ResAtom.fromPDBrecord(aline1)
res0.AddAtom(a)
res0.AddAtom(b)
print(res0.toPDBrecord(id_atom=4))
|
common/nlp/roberta/utils.py | xiling42/VL-BERT | 671 | 12701149 | <filename>common/nlp/roberta/utils.py
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
import os
try:
from functools import lru_cache
except ImportError:
# Just a dummy decorator to get the checks to run on python2
# because honestly I don't want to support a byte-level unicode BPE tokenizer on python 2 right now.
def lru_cache():
return lambda func: func
import logging
import json
import six
from io import open
from functools import wraps
import boto3
import requests
from botocore.exceptions import ClientError
import shutil
from hashlib import sha256
import fnmatch
import tempfile
from tqdm import tqdm
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(
os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
default_cache_path = os.path.join(torch_cache_home, 'pytorch_transformers')
try:
from pathlib import Path
PYTORCH_PRETRAINED_BERT_CACHE = Path(
os.getenv('PYTORCH_TRANSFORMERS_CACHE', os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)))
except (AttributeError, ImportError):
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_TRANSFORMERS_CACHE',
os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
default_cache_path))
PYTORCH_TRANSFORMERS_CACHE = PYTORCH_PRETRAINED_BERT_CACHE # Kept for backward compatibility
logger = logging.getLogger(__name__)
SPECIAL_TOKENS_MAP_FILE = 'special_tokens_map.json'
ADDED_TOKENS_FILE = 'added_tokens.json'
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
_chr = unichr if sys.version_info[0] == 2 else chr
bs = list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
cs = bs[:]
n = 0
for b in range(2 ** 8):
if b not in bs:
bs.append(b)
cs.append(2 ** 8 + n)
n += 1
cs = [_chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file):
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_TRANSFORMERS_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_TRANSFORMERS_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if sys.version_info[0] == 2 and not isinstance(cache_dir, str):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
try:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except EnvironmentError:
etag = None
if sys.version_info[0] == 2 and etag is not None:
etag = etag.decode('utf-8')
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + '.*')
matching_files = list(filter(lambda s: not s.endswith('.json'), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
output_string = json.dumps(meta)
if sys.version_info[0] == 2 and isinstance(output_string, str):
output_string = unicode(output_string, 'utf-8') # The beauty of python 2
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
class PreTrainedTokenizer(object):
""" Base class for all tokenizers.
Handle all the shared methods for tokenization and special tokens as well as methods dowloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary.
This class also contain the added tokens in a unified way on top of all tokenizers so we don't have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...).
Class attributes (overridden by derived classes):
- ``vocab_files_names``: a python ``dict`` with, as keys, the ``__init__`` keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string).
- ``pretrained_vocab_files_map``: a python ``dict of dict`` the high-level keys being the ``__init__`` keyword name of each vocabulary file required by the model, the low-level being the `short-cut-names` (string) of the pretrained models with, as associated values, the `url` (string) to the associated pretrained vocabulary file.
- ``max_model_input_sizes``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or None if the model has no maximum input size.
Parameters:
- ``bos_token``: (`Optional`) string: a beginning of sentence token. Will be associated to ``self.bos_token``
- ``eos_token``: (`Optional`) string: an end of sentence token. Will be associated to ``self.eos_token``
- ``unk_token``: (`Optional`) string: an unknown token. Will be associated to ``self.unk_token``
- ``sep_token``: (`Optional`) string: a separation token (e.g. to separate context and query in an input sequence). Will be associated to ``self.sep_token``
- ``pad_token``: (`Optional`) string: a padding token. Will be associated to ``self.pad_token``
- ``cls_token``: (`Optional`) string: a classification token (e.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model). Will be associated to ``self.cls_token``
- ``mask_token``: (`Optional`) string: a masking token (e.g. when training a model with masked-language modeling). Will be associated to ``self.mask_token``
- ``additional_special_tokens``: (`Optional`) list: a list of additional special tokens. Adding all special tokens here ensure they won't be split by the tokenization process. Will be associated to ``self.additional_special_tokens``
"""
vocab_files_names = {}
pretrained_vocab_files_map = {}
max_model_input_sizes = {}
SPECIAL_TOKENS_ATTRIBUTES = ["bos_token", "eos_token", "unk_token", "sep_token",
"pad_token", "cls_token", "mask_token",
"additional_special_tokens"]
@property
def bos_token(self):
""" Beginning of sentence token (string). Log an error if used while not having been set. """
if self._bos_token is None:
logger.error("Using bos_token, but it is not set yet.")
return self._bos_token
@property
def eos_token(self):
""" End of sentence token (string). Log an error if used while not having been set. """
if self._eos_token is None:
logger.error("Using eos_token, but it is not set yet.")
return self._eos_token
@property
def unk_token(self):
""" Unknown token (string). Log an error if used while not having been set. """
if self._unk_token is None:
logger.error("Using unk_token, but it is not set yet.")
return self._unk_token
@property
def sep_token(self):
""" Separation token (string). E.g. separate context and query in an input sequence. Log an error if used while not having been set. """
if self._sep_token is None:
logger.error("Using sep_token, but it is not set yet.")
return self._sep_token
@property
def pad_token(self):
""" Padding token (string). Log an error if used while not having been set. """
if self._pad_token is None:
logger.error("Using pad_token, but it is not set yet.")
return self._pad_token
@property
def cls_token(self):
""" Classification token (string). E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. """
if self._cls_token is None:
logger.error("Using cls_token, but it is not set yet.")
return self._cls_token
@property
def mask_token(self):
""" Mask token (string). E.g. when training a model with masked-language modeling. Log an error if used while not having been set. """
if self._mask_token is None:
logger.error("Using mask_token, but it is not set yet.")
return self._mask_token
@property
def additional_special_tokens(self):
""" All the additional special tokens you may want to use (list of strings). Log an error if used while not having been set. """
if self._additional_special_tokens is None:
logger.error("Using additional_special_tokens, but it is not set yet.")
return self._additional_special_tokens
@bos_token.setter
def bos_token(self, value):
self._bos_token = value
@eos_token.setter
def eos_token(self, value):
self._eos_token = value
@unk_token.setter
def unk_token(self, value):
self._unk_token = value
@sep_token.setter
def sep_token(self, value):
self._sep_token = value
@pad_token.setter
def pad_token(self, value):
self._pad_token = value
@cls_token.setter
def cls_token(self, value):
self._cls_token = value
@mask_token.setter
def mask_token(self, value):
self._mask_token = value
@additional_special_tokens.setter
def additional_special_tokens(self, value):
self._additional_special_tokens = value
def __init__(self, max_len=None, **kwargs):
self._bos_token = None
self._eos_token = None
self._unk_token = None
self._sep_token = None
self._pad_token = None
self._cls_token = None
self._mask_token = None
self._additional_special_tokens = []
self.max_len = max_len if max_len is not None else int(1e12)
self.added_tokens_encoder = {}
self.added_tokens_decoder = {}
for key, value in kwargs.items():
if key in self.SPECIAL_TOKENS_ATTRIBUTES:
if key == 'additional_special_tokens':
assert isinstance(value, (list, tuple)) and all(
isinstance(t, str) or (six.PY2 and isinstance(t, unicode)) for t in value)
else:
assert isinstance(value, str) or (six.PY2 and isinstance(value, unicode))
setattr(self, key, value)
@classmethod
def from_pretrained(cls, *inputs, **kwargs):
r"""
Instantiate a :class:`~pytorch_transformers.PreTrainedTokenizer` (or a derived class) from a predefined tokenizer.
Args:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.: ``bert-base-uncased``.
- a path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :func:`~pytorch_transformers.PreTrainedTokenizer.save_pretrained` method, e.g.: ``./my_model_directory/``.
- (not applicable to all derived classes) a path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (e.g. Bert, XLNet), e.g.: ``./my_model_directory/vocab.txt``.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used.
inputs: (`optional`) positional arguments: will be passed to the Tokenizer ``__init__`` method.
kwargs: (`optional`) keyword arguments: will be passed to the Tokenizer ``__init__`` method. Can be used to set special tokens like ``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``. See parameters in the doc string of :class:`~pytorch_transformers.PreTrainedTokenizer` for details.
Examples::
# We can't instantiate directly the base class `PreTrainedTokenizer` so let's show our examples on a derived class: BertTokenizer
# Download vocabulary from S3 and cache.
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`)
tokenizer = BertTokenizer.from_pretrained('./test/saved_model/')
# If the tokenizer uses a single vocabulary file, you can point directly to this file
tokenizer = BertTokenizer.from_pretrained('./test/saved_model/my_vocab.txt')
# You can link tokens to special vocabulary when instantiating
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', unk_token='<unk>')
# You should be sure '<unk>' is in the vocabulary when doing that.
# Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead)
assert tokenizer.unk_token == '<unk>'
"""
return cls._from_pretrained(*inputs, **kwargs)
@classmethod
def _from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
cache_dir = kwargs.pop('cache_dir', None)
s3_models = list(cls.max_model_input_sizes.keys())
vocab_files = {}
if pretrained_model_name_or_path in s3_models:
# Get the vocabulary from AWS S3 bucket
for file_id, map_list in cls.pretrained_vocab_files_map.items():
vocab_files[file_id] = map_list[pretrained_model_name_or_path]
else:
# Get the vocabulary from local files
logger.info(
"Model name '{}' not found in model shortcut name list ({}). "
"Assuming '{}' is a path or url to a directory containing tokenizer files.".format(
pretrained_model_name_or_path, ', '.join(s3_models),
pretrained_model_name_or_path))
# Look for the tokenizer main vocabulary files
for file_id, file_name in cls.vocab_files_names.items():
if os.path.isdir(pretrained_model_name_or_path):
# If a directory is provided we look for the standard filenames
full_file_name = os.path.join(pretrained_model_name_or_path, file_name)
else:
# If a path to a file is provided we use it (will only work for non-BPE tokenizer using a single vocabulary file)
full_file_name = pretrained_model_name_or_path
if not os.path.exists(full_file_name):
logger.info("Didn't find file {}. We won't load it.".format(full_file_name))
full_file_name = None
vocab_files[file_id] = full_file_name
# Look for the additional tokens files
all_vocab_files_names = {'added_tokens_file': ADDED_TOKENS_FILE,
'special_tokens_map_file': SPECIAL_TOKENS_MAP_FILE}
# If a path to a file was provided, get the parent directory
saved_directory = pretrained_model_name_or_path
if os.path.exists(saved_directory) and not os.path.isdir(saved_directory):
saved_directory = os.path.dirname(saved_directory)
for file_id, file_name in all_vocab_files_names.items():
full_file_name = os.path.join(saved_directory, file_name)
if not os.path.exists(full_file_name):
logger.info("Didn't find file {}. We won't load it.".format(full_file_name))
full_file_name = None
vocab_files[file_id] = full_file_name
if all(full_file_name is None for full_file_name in vocab_files.values()):
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find tokenizer files"
"at this path or url.".format(
pretrained_model_name_or_path, ', '.join(s3_models),
pretrained_model_name_or_path, ))
return None
# Get files from url, cache, or disk depending on the case
try:
resolved_vocab_files = {}
for file_id, file_path in vocab_files.items():
if file_path is None:
resolved_vocab_files[file_id] = None
else:
resolved_vocab_files[file_id] = cached_path(file_path, cache_dir=cache_dir)
except EnvironmentError:
if pretrained_model_name_or_path in s3_models:
logger.error("Couldn't reach server to download vocabulary.")
else:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} "
"at this path or url.".format(
pretrained_model_name_or_path, ', '.join(s3_models),
pretrained_model_name_or_path, str(vocab_files.keys())))
return None
for file_id, file_path in vocab_files.items():
if file_path == resolved_vocab_files[file_id]:
logger.info("loading file {}".format(file_path))
else:
logger.info("loading file {} from cache at {}".format(
file_path, resolved_vocab_files[file_id]))
# Set max length if needed
if pretrained_model_name_or_path in cls.max_model_input_sizes:
# if we're using a pretrained model, ensure the tokenizer
# wont index sequences longer than the number of positional embeddings
max_len = cls.max_model_input_sizes[pretrained_model_name_or_path]
if max_len is not None and isinstance(max_len, (int, float)):
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Merge resolved_vocab_files arguments in kwargs.
added_tokens_file = resolved_vocab_files.pop('added_tokens_file', None)
special_tokens_map_file = resolved_vocab_files.pop('special_tokens_map_file', None)
for args_name, file_path in resolved_vocab_files.items():
if args_name not in kwargs:
kwargs[args_name] = file_path
if special_tokens_map_file is not None:
special_tokens_map = json.load(open(special_tokens_map_file, encoding="utf-8"))
for key, value in special_tokens_map.items():
if key not in kwargs:
kwargs[key] = value
# Instantiate tokenizer.
tokenizer = cls(*inputs, **kwargs)
# Add supplementary tokens.
if added_tokens_file is not None:
added_tok_encoder = json.load(open(added_tokens_file, encoding="utf-8"))
added_tok_decoder = {v: k for k, v in added_tok_encoder.items()}
tokenizer.added_tokens_encoder.update(added_tok_encoder)
tokenizer.added_tokens_decoder.update(added_tok_decoder)
return tokenizer
def save_pretrained(self, save_directory):
""" Save the tokenizer vocabulary files (with added tokens) and the
special-tokens-to-class-attributes-mapping to a directory.
This method make sure the full tokenizer can then be re-loaded using the :func:`~pytorch_transformers.PreTrainedTokenizer.from_pretrained` class method.
"""
if not os.path.isdir(save_directory):
logger.error("Saving directory ({}) should be a directory".format(save_directory))
return
special_tokens_map_file = os.path.join(save_directory, SPECIAL_TOKENS_MAP_FILE)
added_tokens_file = os.path.join(save_directory, ADDED_TOKENS_FILE)
with open(special_tokens_map_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.special_tokens_map, ensure_ascii=False))
with open(added_tokens_file, 'w', encoding='utf-8') as f:
if self.added_tokens_encoder:
out_str = json.dumps(self.added_tokens_encoder, ensure_ascii=False)
else:
out_str = u"{}"
f.write(out_str)
vocab_files = self.save_vocabulary(save_directory)
return vocab_files + (special_tokens_map_file, added_tokens_file)
def save_vocabulary(self, save_directory):
""" Save the tokenizer vocabulary to a directory. This method does *NOT* save added tokens
and special token mappings.
Please use :func:`~pytorch_transformers.PreTrainedTokenizer.save_pretrained` `()` to save the full Tokenizer state if you want to reload it using the :func:`~pytorch_transformers.PreTrainedTokenizer.from_pretrained` class method.
"""
raise NotImplementedError
def vocab_size(self):
""" Size of the base vocabulary (without the added tokens) """
raise NotImplementedError
def __len__(self):
""" Size of the full vocabulary with the added tokens """
return self.vocab_size + len(self.added_tokens_encoder)
def add_tokens(self, new_tokens):
"""
Add a list of new tokens to the tokenizer class. If the new tokens are not in the
vocabulary, they are added to it with indices starting from length of the current vocabulary.
Args:
new_tokens: list of string. Each string is a token to add. Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them).
Returns:
Number of tokens added to the vocabulary.
Examples::
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2'])
print('We have added', num_added_toks, 'tokens')
model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
"""
if not new_tokens:
return 0
to_add_tokens = []
for token in new_tokens:
assert isinstance(token, str) or (six.PY2 and isinstance(token, unicode))
if token != self.unk_token and \
self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token):
to_add_tokens.append(token)
logger.info("Adding %s to the vocabulary", token)
added_tok_encoder = dict((tok, len(self) + i) for i, tok in enumerate(to_add_tokens))
added_tok_decoder = {v: k for k, v in added_tok_encoder.items()}
self.added_tokens_encoder.update(added_tok_encoder)
self.added_tokens_decoder.update(added_tok_decoder)
return len(to_add_tokens)
def add_special_tokens(self, special_tokens_dict):
"""
Add a dictionary of special tokens (eos, pad, cls...) to the encoder and link them
to class attributes. If special tokens are NOT in the vocabulary, they are added
to it (indexed starting from the last index of the current vocabulary).
Args:
special_tokens_dict: dict of string. Keys should be in the list of predefined special attributes:
[``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``,
``additional_special_tokens``].
Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them).
Returns:
Number of tokens added to the vocabulary.
Examples::
# Let's see how to add a new classification token to GPT-2
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2Model.from_pretrained('gpt2')
special_tokens_dict = {'cls_token': '<CLS>'}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
print('We have added', num_added_toks, 'tokens')
model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
assert tokenizer.cls_token == '<CLS>'
"""
if not special_tokens_dict:
return 0
added_tokens = 0
for key, value in special_tokens_dict.items():
assert key in self.SPECIAL_TOKENS_ATTRIBUTES
if key == 'additional_special_tokens':
assert isinstance(value, (list, tuple)) and all(
isinstance(t, str) or (six.PY2 and isinstance(t, unicode)) for t in value)
added_tokens += self.add_tokens(value)
else:
assert isinstance(value, str) or (six.PY2 and isinstance(value, unicode))
added_tokens += self.add_tokens([value])
logger.info("Assigning %s to the %s key of the tokenizer", value, key)
setattr(self, key, value)
return added_tokens
def tokenize(self, text, **kwargs):
""" Converts a string in a sequence of tokens (string), using the tokenizer.
Split in words for word-based vocabulary or sub-words for sub-word-based
vocabularies (BPE/SentencePieces/WordPieces).
Take care of added tokens.
"""
def split_on_tokens(tok_list, text):
if not text:
return []
if not tok_list:
return self._tokenize(text, **kwargs)
tok = tok_list[0]
split_text = text.split(tok)
return sum((split_on_tokens(tok_list[1:], sub_text.strip()) + [tok] \
for sub_text in split_text), [])[:-1]
added_tokens = list(self.added_tokens_encoder.keys()) + self.all_special_tokens
tokenized_text = split_on_tokens(added_tokens, text)
return tokenized_text
def _tokenize(self, text, **kwargs):
""" Converts a string in a sequence of tokens (string), using the tokenizer.
Split in words for word-based vocabulary or sub-words for sub-word-based
vocabularies (BPE/SentencePieces/WordPieces).
Do NOT take care of added tokens.
"""
raise NotImplementedError
def convert_tokens_to_ids(self, tokens):
""" Converts a single token, or a sequence of tokens, (str/unicode) in a single integer id
(resp. a sequence of ids), using the vocabulary.
"""
if isinstance(tokens, str) or (six.PY2 and isinstance(tokens, unicode)):
return self._convert_token_to_id_with_added_voc(tokens)
ids = []
for token in tokens:
ids.append(self._convert_token_to_id_with_added_voc(token))
if len(ids) > self.max_len:
logger.warning("Token indices sequence length is longer than the specified maximum sequence length "
"for this model ({} > {}). Running this sequence through the model will result in "
"indexing errors".format(len(ids), self.max_len))
return ids
def _convert_token_to_id_with_added_voc(self, token):
if token in self.added_tokens_encoder:
return self.added_tokens_encoder[token]
return self._convert_token_to_id(token)
def _convert_token_to_id(self, token):
raise NotImplementedError
def encode(self, text, text_pair=None, add_special_tokens=False):
"""
Converts a string in a sequence of ids (integer), using the tokenizer and vocabulary.
Same as doing ``self.convert_tokens_to_ids(self.tokenize(text))``.
Args:
text: The first sequence to be encoded.
text_pair: Optional second sequence to be encoded.
add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative
to their model.
"""
if text_pair is None:
if add_special_tokens:
return self.add_special_tokens_single_sentence(self.convert_tokens_to_ids(self.tokenize(text)))
else:
return self.convert_tokens_to_ids(self.tokenize(text))
first_sentence_tokens = [self._convert_token_to_id(token) for token in self.tokenize(text)]
second_sentence_tokens = [self._convert_token_to_id(token) for token in self.tokenize(text_pair)]
if add_special_tokens:
return self.add_special_tokens_sentences_pair(first_sentence_tokens, second_sentence_tokens)
else:
return first_sentence_tokens, second_sentence_tokens
def add_special_tokens_single_sentence(self, token_ids):
raise NotImplementedError
def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1):
raise NotImplementedError
def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
""" Converts a single index or a sequence of indices (integers) in a token "
(resp.) a sequence of tokens (str/unicode), using the vocabulary and added tokens.
Args:
skip_special_tokens: Don't decode special tokens (self.all_special_tokens). Default: False
"""
if isinstance(ids, int):
if ids in self.added_tokens_decoder:
return self.added_tokens_decoder[ids]
else:
return self._convert_id_to_token(ids)
tokens = []
for index in ids:
if index in self.all_special_ids and skip_special_tokens:
continue
if index in self.added_tokens_decoder:
tokens.append(self.added_tokens_decoder[index])
else:
tokens.append(self._convert_id_to_token(index))
return tokens
def _convert_id_to_token(self, index):
raise NotImplementedError
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) in a single string.
The most simple way to do it is ' '.join(self.convert_ids_to_tokens(token_ids))
but we often want to remove sub-word tokenization artifacts at the same time.
"""
return ' '.join(self.convert_ids_to_tokens(tokens))
def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
"""
Converts a sequence of ids (integer) in a string, using the tokenizer and vocabulary
with options to remove special tokens and clean up tokenization spaces.
Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``.
"""
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
text = self.convert_tokens_to_string(filtered_tokens)
if self.sep_token is not None and self.sep_token in text:
text = text.replace(self.cls_token, self.sep_token)
split_text = list(filter(lambda sentence: len(sentence) > 0, text.split(self.sep_token)))
if clean_up_tokenization_spaces:
clean_text = [self.clean_up_tokenization(text) for text in split_text]
return clean_text
else:
return split_text
else:
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
@property
def special_tokens_map(self):
""" A dictionary mapping special token class attribute (cls_token, unk_token...) to their
values ('<unk>', '<cls>'...)
"""
set_attr = {}
for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
attr_value = getattr(self, "_" + attr)
if attr_value:
set_attr[attr] = attr_value
return set_attr
@property
def all_special_tokens(self):
""" List all the special tokens ('<unk>', '<cls>'...) mapped to class attributes
(cls_token, unk_token...).
"""
all_toks = []
set_attr = self.special_tokens_map
for attr_value in set_attr.values():
all_toks = all_toks + (attr_value if isinstance(attr_value, (list, tuple)) else [attr_value])
all_toks = list(set(all_toks))
return all_toks
@property
def all_special_ids(self):
""" List the vocabulary indices of the special tokens ('<unk>', '<cls>'...) mapped to
class attributes (cls_token, unk_token...).
"""
all_toks = self.all_special_tokens
all_ids = list(self._convert_token_to_id(t) for t in all_toks)
return all_ids
@staticmethod
def clean_up_tokenization(out_string):
""" Clean up a list of simple English tokenization artifacts like spaces before punctuations and abreviated forms.
"""
out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ','
).replace(" ' ",
"'").replace(
" n't", "n't").replace(" 'm", "'m").replace(" do not", " don't"
).replace(" 's", "'s").replace(" 've", "'ve").replace(" 're",
"'re")
return out_string
|
tensorflow/contrib/data/python/ops/prefetching_ops.py | PaulWang1905/tensorflow | 848 | 12701151 | <reponame>PaulWang1905/tensorflow<gh_stars>100-1000
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for prefetching_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import prefetching_ops
from tensorflow.python.util import deprecation
@deprecation.deprecated(None,
"Use `tf.data.experimental.prefetch_to_device(...)`.")
def prefetch_to_device(device, buffer_size=None):
"""A transformation that prefetches dataset values to the given `device`.
NOTE: Although the transformation creates a `tf.data.Dataset`, the
transformation must be the final `Dataset` in the input pipeline.
Args:
device: A string. The name of a device to which elements will be prefetched.
buffer_size: (Optional.) The number of elements to buffer on `device`.
Defaults to an automatically chosen value.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return prefetching_ops.prefetch_to_device(device, buffer_size)
@deprecation.deprecated(None, "Use `tf.data.experimental.copy_to_device(...)`.")
def copy_to_device(target_device, source_device="/cpu:0"):
"""A transformation that copies dataset elements to the given `target_device`.
Args:
target_device: The name of a device to which elements will be copied.
source_device: The original device on which `input_dataset` will be placed.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
return prefetching_ops.copy_to_device(target_device, source_device)
|
lib/spack/external/py/_io/capture.py | kkauder/spack | 2,479 | 12701179 | import os
import sys
import py
import tempfile
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
if sys.version_info < (3,0):
class TextIO(StringIO):
def write(self, data):
if not isinstance(data, unicode):
data = unicode(data, getattr(self, '_encoding', 'UTF-8'), 'replace')
StringIO.write(self, data)
else:
TextIO = StringIO
try:
from io import BytesIO
except ImportError:
class BytesIO(StringIO):
def write(self, data):
if isinstance(data, unicode):
raise TypeError("not a byte value: %r" %(data,))
StringIO.write(self, data)
patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'}
class FDCapture:
""" Capture IO to/from a given os-level filedescriptor. """
def __init__(self, targetfd, tmpfile=None, now=True, patchsys=False):
""" save targetfd descriptor, and open a new
temporary file there. If no tmpfile is
specified a tempfile.Tempfile() will be opened
in text mode.
"""
self.targetfd = targetfd
if tmpfile is None and targetfd != 0:
f = tempfile.TemporaryFile('wb+')
tmpfile = dupfile(f, encoding="UTF-8")
f.close()
self.tmpfile = tmpfile
self._savefd = os.dup(self.targetfd)
if patchsys:
self._oldsys = getattr(sys, patchsysdict[targetfd])
if now:
self.start()
def start(self):
try:
os.fstat(self._savefd)
except OSError:
raise ValueError("saved filedescriptor not valid, "
"did you call start() twice?")
if self.targetfd == 0 and not self.tmpfile:
fd = os.open(devnullpath, os.O_RDONLY)
os.dup2(fd, 0)
os.close(fd)
if hasattr(self, '_oldsys'):
setattr(sys, patchsysdict[self.targetfd], DontReadFromInput())
else:
os.dup2(self.tmpfile.fileno(), self.targetfd)
if hasattr(self, '_oldsys'):
setattr(sys, patchsysdict[self.targetfd], self.tmpfile)
def done(self):
""" unpatch and clean up, returns the self.tmpfile (file object)
"""
os.dup2(self._savefd, self.targetfd)
os.close(self._savefd)
if self.targetfd != 0:
self.tmpfile.seek(0)
if hasattr(self, '_oldsys'):
setattr(sys, patchsysdict[self.targetfd], self._oldsys)
return self.tmpfile
def writeorg(self, data):
""" write a string to the original file descriptor
"""
tempfp = tempfile.TemporaryFile()
try:
os.dup2(self._savefd, tempfp.fileno())
tempfp.write(data)
finally:
tempfp.close()
def dupfile(f, mode=None, buffering=0, raising=False, encoding=None):
""" return a new open file object that's a duplicate of f
mode is duplicated if not given, 'buffering' controls
buffer size (defaulting to no buffering) and 'raising'
defines whether an exception is raised when an incompatible
file object is passed in (if raising is False, the file
object itself will be returned)
"""
try:
fd = f.fileno()
mode = mode or f.mode
except AttributeError:
if raising:
raise
return f
newfd = os.dup(fd)
if sys.version_info >= (3,0):
if encoding is not None:
mode = mode.replace("b", "")
buffering = True
return os.fdopen(newfd, mode, buffering, encoding, closefd=True)
else:
f = os.fdopen(newfd, mode, buffering)
if encoding is not None:
return EncodedFile(f, encoding)
return f
class EncodedFile(object):
def __init__(self, _stream, encoding):
self._stream = _stream
self.encoding = encoding
def write(self, obj):
if isinstance(obj, unicode):
obj = obj.encode(self.encoding)
elif isinstance(obj, str):
pass
else:
obj = str(obj)
self._stream.write(obj)
def writelines(self, linelist):
data = ''.join(linelist)
self.write(data)
def __getattr__(self, name):
return getattr(self._stream, name)
class Capture(object):
def call(cls, func, *args, **kwargs):
""" return a (res, out, err) tuple where
out and err represent the output/error output
during function execution.
call the given function with args/kwargs
and capture output/error during its execution.
"""
so = cls()
try:
res = func(*args, **kwargs)
finally:
out, err = so.reset()
return res, out, err
call = classmethod(call)
def reset(self):
""" reset sys.stdout/stderr and return captured output as strings. """
if hasattr(self, '_reset'):
raise ValueError("was already reset")
self._reset = True
outfile, errfile = self.done(save=False)
out, err = "", ""
if outfile and not outfile.closed:
out = outfile.read()
outfile.close()
if errfile and errfile != outfile and not errfile.closed:
err = errfile.read()
errfile.close()
return out, err
def suspend(self):
""" return current snapshot captures, memorize tempfiles. """
outerr = self.readouterr()
outfile, errfile = self.done()
return outerr
class StdCaptureFD(Capture):
""" This class allows to capture writes to FD1 and FD2
and may connect a NULL file to FD0 (and prevent
reads from sys.stdin). If any of the 0,1,2 file descriptors
is invalid it will not be captured.
"""
def __init__(self, out=True, err=True, mixed=False,
in_=True, patchsys=True, now=True):
self._options = {
"out": out,
"err": err,
"mixed": mixed,
"in_": in_,
"patchsys": patchsys,
"now": now,
}
self._save()
if now:
self.startall()
def _save(self):
in_ = self._options['in_']
out = self._options['out']
err = self._options['err']
mixed = self._options['mixed']
patchsys = self._options['patchsys']
if in_:
try:
self.in_ = FDCapture(0, tmpfile=None, now=False,
patchsys=patchsys)
except OSError:
pass
if out:
tmpfile = None
if hasattr(out, 'write'):
tmpfile = out
try:
self.out = FDCapture(1, tmpfile=tmpfile,
now=False, patchsys=patchsys)
self._options['out'] = self.out.tmpfile
except OSError:
pass
if err:
if out and mixed:
tmpfile = self.out.tmpfile
elif hasattr(err, 'write'):
tmpfile = err
else:
tmpfile = None
try:
self.err = FDCapture(2, tmpfile=tmpfile,
now=False, patchsys=patchsys)
self._options['err'] = self.err.tmpfile
except OSError:
pass
def startall(self):
if hasattr(self, 'in_'):
self.in_.start()
if hasattr(self, 'out'):
self.out.start()
if hasattr(self, 'err'):
self.err.start()
def resume(self):
""" resume capturing with original temp files. """
self.startall()
def done(self, save=True):
""" return (outfile, errfile) and stop capturing. """
outfile = errfile = None
if hasattr(self, 'out') and not self.out.tmpfile.closed:
outfile = self.out.done()
if hasattr(self, 'err') and not self.err.tmpfile.closed:
errfile = self.err.done()
if hasattr(self, 'in_'):
tmpfile = self.in_.done()
if save:
self._save()
return outfile, errfile
def readouterr(self):
""" return snapshot value of stdout/stderr capturings. """
if hasattr(self, "out"):
out = self._readsnapshot(self.out.tmpfile)
else:
out = ""
if hasattr(self, "err"):
err = self._readsnapshot(self.err.tmpfile)
else:
err = ""
return [out, err]
def _readsnapshot(self, f):
f.seek(0)
res = f.read()
enc = getattr(f, "encoding", None)
if enc:
res = py.builtin._totext(res, enc, "replace")
f.truncate(0)
f.seek(0)
return res
class StdCapture(Capture):
""" This class allows to capture writes to sys.stdout|stderr "in-memory"
and will raise errors on tries to read from sys.stdin. It only
modifies sys.stdout|stderr|stdin attributes and does not
touch underlying File Descriptors (use StdCaptureFD for that).
"""
def __init__(self, out=True, err=True, in_=True, mixed=False, now=True):
self._oldout = sys.stdout
self._olderr = sys.stderr
self._oldin = sys.stdin
if out and not hasattr(out, 'file'):
out = TextIO()
self.out = out
if err:
if mixed:
err = out
elif not hasattr(err, 'write'):
err = TextIO()
self.err = err
self.in_ = in_
if now:
self.startall()
def startall(self):
if self.out:
sys.stdout = self.out
if self.err:
sys.stderr = self.err
if self.in_:
sys.stdin = self.in_ = DontReadFromInput()
def done(self, save=True):
""" return (outfile, errfile) and stop capturing. """
outfile = errfile = None
if self.out and not self.out.closed:
sys.stdout = self._oldout
outfile = self.out
outfile.seek(0)
if self.err and not self.err.closed:
sys.stderr = self._olderr
errfile = self.err
errfile.seek(0)
if self.in_:
sys.stdin = self._oldin
return outfile, errfile
def resume(self):
""" resume capturing with original temp files. """
self.startall()
def readouterr(self):
""" return snapshot value of stdout/stderr capturings. """
out = err = ""
if self.out:
out = self.out.getvalue()
self.out.truncate(0)
self.out.seek(0)
if self.err:
err = self.err.getvalue()
self.err.truncate(0)
self.err.seek(0)
return out, err
class DontReadFromInput:
"""Temporary stub class. Ideally when stdin is accessed, the
capturing should be turned off, with possibly all data captured
so far sent to the screen. This should be configurable, though,
because in automated test runs it is better to crash than
hang indefinitely.
"""
def read(self, *args):
raise IOError("reading from stdin while output is captured")
readline = read
readlines = read
__iter__ = read
def fileno(self):
raise ValueError("redirected Stdin is pseudofile, has no fileno()")
def isatty(self):
return False
def close(self):
pass
try:
devnullpath = os.devnull
except AttributeError:
if os.name == 'nt':
devnullpath = 'NUL'
else:
devnullpath = '/dev/null'
|
complexPyTorch/complexFunctions.py | jackhwalters/complexPyTorch | 270 | 12701203 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: spopoff
"""
from torch.nn.functional import relu, max_pool2d, avg_pool2d, dropout, dropout2d, interpolate, sigmoid, tanh
import torch
def complex_matmul(A, B):
'''
Performs the matrix product between two complex matricess
'''
outp_real = torch.matmul(A.real, B.real) - torch.matmul(A.imag, B.imag)
outp_imag = torch.matmul(A.real, B.imag) + torch.matmul(A.imag, B.real)
return outp_real.type(torch.complex64) + 1j * outp_imag.type(torch.complex64)
def complex_avg_pool2d(input, *args, **kwargs):
'''
Perform complex average pooling.
'''
absolute_value_real = avg_pool2d(input.real, *args, **kwargs)
absolute_value_imag = avg_pool2d(input.imag, *args, **kwargs)
return absolute_value_real.type(torch.complex64)+1j*absolute_value_imag.type(torch.complex64)
def complex_normalize(input):
'''
Perform complex normalization
'''
real_value, imag_value = input.real, input.imag
real_norm = (real_value - real_value.mean()) / real_value.std()
imag_norm = (imag_value - imag_value.mean()) / imag_value.std()
return real_norm.type(torch.complex64) + 1j*imag_norm.type(torch.complex64)
def complex_relu(input):
return relu(input.real).type(torch.complex64)+1j*relu(input.imag).type(torch.complex64)
<<<<<<< HEAD
=======
def complex_relu(input):
return relu(input.real).type(torch.complex64)+1j*relu(input.imag).type(torch.complex64)
>>>>>>> octaveguinebretiere-master
def complex_sigmoid(input):
return sigmoid(input.real).type(torch.complex64)+1j*sigmoid(input.imag).type(torch.complex64)
def complex_tanh(input):
return tanh(input.real).type(torch.complex64)+1j*tanh(input.imag).type(torch.complex64)
def complex_opposite(input):
return -(input.real).type(torch.complex64)+1j*(-(input.imag).type(torch.complex64))
def complex_stack(input, dim):
input_real = [x.real for x in input]
input_imag = [x.imag for x in input]
return torch.stack(input_real, dim).type(torch.complex64)+1j*torch.stack(input_imag, dim).type(torch.complex64)
def _retrieve_elements_from_indices(tensor, indices):
flattened_tensor = tensor.flatten(start_dim=-2)
output = flattened_tensor.gather(dim=-1, index=indices.flatten(start_dim=-2)).view_as(indices)
return output
def complex_upsample(input, size=None, scale_factor=None, mode='nearest',
align_corners=None, recompute_scale_factor=None):
'''
Performs upsampling by separately interpolating the real and imaginary part and recombining
'''
outp_real = interpolate(input.real, size=size, scale_factor=scale_factor, mode=mode,
align_corners=align_corners, recompute_scale_factor=recompute_scale_factor)
outp_imag = interpolate(input.imag, size=size, scale_factor=scale_factor, mode=mode,
align_corners=align_corners, recompute_scale_factor=recompute_scale_factor)
return outp_real.type(torch.complex64) + 1j * outp_imag.type(torch.complex64)
def complex_upsample2(input, size=None, scale_factor=None, mode='nearest',
align_corners=None, recompute_scale_factor=None):
'''
Performs upsampling by separately interpolating the amplitude and phase part and recombining
'''
outp_abs = interpolate(input.abs(), size=size, scale_factor=scale_factor, mode=mode,
align_corners=align_corners, recompute_scale_factor=recompute_scale_factor)
angle = torch.atan2(input.imag,input.real)
outp_angle = interpolate(angle, size=size, scale_factor=scale_factor, mode=mode,
align_corners=align_corners, recompute_scale_factor=recompute_scale_factor)
return outp_abs \
* (torch.cos(angle).type(torch.complex64)+1j*torch.sin(angle).type(torch.complex64))
def complex_max_pool2d(input,kernel_size, stride=None, padding=0,
dilation=1, ceil_mode=False, return_indices=False):
'''
Perform complex max pooling by selecting on the absolute value on the complex values.
'''
absolute_value, indices = max_pool2d(
input.abs(),
kernel_size = kernel_size,
stride = stride,
padding = padding,
dilation = dilation,
ceil_mode = ceil_mode,
return_indices = True
)
# performs the selection on the absolute values
absolute_value = absolute_value.type(torch.complex64)
# retrieve the corresonding phase value using the indices
# unfortunately, the derivative for 'angle' is not implemented
angle = torch.atan2(input.imag,input.real)
# get only the phase values selected by max pool
angle = _retrieve_elements_from_indices(angle, indices)
return absolute_value \
* (torch.cos(angle).type(torch.complex64)+1j*torch.sin(angle).type(torch.complex64))
def complex_dropout(input, p=0.5, training=True):
# need to have the same dropout mask for real and imaginary part,
# this not a clean solution!
#mask = torch.ones_like(input).type(torch.float32)
mask = torch.ones(*input.shape, dtype = torch.float32)
mask = dropout(mask, p, training)*1/(1-p)
mask.type(input.dtype)
return mask*input
def complex_dropout2d(input, p=0.5, training=True):
# need to have the same dropout mask for real and imaginary part,
# this not a clean solution!
mask = torch.ones(*input.shape, dtype = torch.float32)
mask = dropout2d(mask, p, training)*1/(1-p)
mask.type(input.dtype)
return mask*input
|
docs/conf.py | hbrodin/polytracker | 304 | 12701206 | <gh_stars>100-1000
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import subprocess
from polytracker import version as version_string
# -- Project information -----------------------------------------------------
project = "PolyTracker"
copyright = "2019–2021, Trail of Bits"
author = "<NAME> and <NAME>"
# The full version, including alpha/beta/rc tags
release = version_string()
version = release
github_url = f"https://github.com/trailofbits/polytracker"
# Has this version been released yet?
if subprocess.call(["git", "rev-list" f"v{version}"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) == 0:
# There is a tag associated with this release
github_url = f"{github_url}releases/tag/v{version}"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.autosectionlabel',
'sphinx_rtd_theme',
#'sphinxcontrib.fulltoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'classic'
html_theme = 'sphinx_rtd_theme'
html_theme_options = {
'canonical_url': f'https://trailofbits.github.io/polytracker/latest/',
'logo_only': False,
'display_version': False, # This manually configured in our custom templates
'prev_next_buttons_location': 'bottom',
'style_external_links': True,
#'vcs_pageview_mode': '',
#'style_nav_header_background': 'white',
# Toc options
'collapse_navigation': True,
'sticky_navigation': True,
'navigation_depth': 4,
'includehidden': True,
'titles_only': False
}
html_context = {
'github_url': github_url
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
#html_js_files = [
# 'localtoc.js',
#]
def skip(app, what, name, obj, would_skip, options):
if name == "__init__":
return False
return would_skip
def docstring_callback(app, what, name, obj, options, lines: list):
if what == 'class' or what == 'function':
if lines and lines[0].strip():
lines.insert(1, '')
lines.insert(2, name)
lines.insert(3, '*' * len(name))
if len(lines) == 4:
lines.append('')
def setup(app):
app.connect("autodoc-skip-member", skip)
#app.connect('autodoc-process-docstring', docstring_callback)
add_package_names = False
# prefix each section label with the name of the document it is in, followed by a colon
autosectionlabel_prefix_document = True
intersphinx_mapping = {'python': ('https://docs.python.org/3', None)}
napoleon_include_private_with_doc = True
napoleon_include_special_with_doc = True
todo_include_todos = True
#autodoc_default_options = {
# 'inherited-members': True
#}
|
src/modules/catalog/module.py | Ermlab/python-ddd | 308 | 12701216 | from seedwork.application.modules import BusinessModule
from .domain.repositories import ListingRepository
from modules.catalog.application.query.get_all_listings import (
GetAllListings,
get_all_listings,
)
from modules.catalog.application.query.get_listings_of_seller import (
GetListingsOfSeller,
get_listings_of_seller,
)
from modules.catalog.application.query.get_listing_details import (
GetListingDetails,
get_listing_details,
)
from modules.catalog.application.command.create_listing_draft import (
CreateListingDraftCommand,
create_listing_draft,
)
class CatalogModule(BusinessModule):
query_handlers = {
GetAllListings: lambda self, q: get_all_listings(q, self.listing_repository),
GetAllListings: lambda self, q: get_all_listings(q, self.listing_repository),
GetListingDetails: lambda self, q: get_listing_details(
q, self.listing_repository
),
GetListingsOfSeller: lambda self, q: get_listings_of_seller(
q, self.listing_repository
),
}
command_handlers = {
CreateListingDraftCommand: lambda self, c: create_listing_draft(
c, self.listing_repository
),
}
def __init__(
self,
listing_repository: ListingRepository,
) -> None:
self.listing_repository = listing_repository
@staticmethod
def create(container):
"""Factory method for creating a module by using dependencies from a DI container"""
return CatalogModule(
logger=container.logger(),
listing_repository=container.listing_repository(),
)
|
fedot/sensitivity/pipeline_sensitivity.py | rozlana-g/FEDOT | 358 | 12701231 | import json
from os.path import join
from typing import List, Type, Optional
from fedot.core.data.data import InputData
from fedot.core.log import Log, default_log
from fedot.core.pipelines.pipeline import Pipeline
from fedot.core.utils import default_fedot_data_dir
from fedot.sensitivity.operations_hp_sensitivity.multi_operations_sensitivity import MultiOperationsHPAnalyze
from fedot.sensitivity.sa_requirements import SensitivityAnalysisRequirements
class PipelineAnalysis:
"""
This class is for analyzing the Pipeline as the black-box model,
using analysis approaches defined for whole pipeline perturbation,
i.e. MultiOperationsHPAnalyze.
:param pipeline: pipeline object to analyze
:param train_data: data used for Pipeline training
:param test_data: data used for Pipeline validation
:param approaches: methods applied to pipeline \
Default: [MultiOperationsHPAnalyze]
:param requirements: extra requirements to define specific details for different approaches.\
See SensitivityAnalysisRequirements class documentation.
:param path_to_save: path to save results to. Default: ~home/Fedot/sensitivity/pipeline_sa
Default: False
:param log: log: Log object to record messages
"""
def __init__(self, pipeline: Pipeline, train_data: InputData, test_data: InputData,
approaches: Optional[List[Type[MultiOperationsHPAnalyze]]] = None,
requirements: SensitivityAnalysisRequirements = None,
path_to_save=None, log: Log = None):
self.pipeline = pipeline
self.train_data = train_data
self.test_data = test_data
self.requirements = \
SensitivityAnalysisRequirements() if requirements is None else requirements
self.approaches = [MultiOperationsHPAnalyze] if approaches is None else approaches
self.path_to_save = \
join(default_fedot_data_dir(), 'sensitivity', 'pipeline_sa') if path_to_save is None else path_to_save
self.log = default_log(__name__) if log is None else log
def analyze(self) -> dict:
"""
Apply defined approaches for the black-box pipeline analysis
:return:
"""
all_approaches_results = dict()
for approach in self.approaches:
analyze_result = approach(pipeline=self.pipeline,
train_data=self.train_data,
test_data=self.test_data,
requirements=self.requirements).analyze()
all_approaches_results[f'{approach.__name__}'] = analyze_result
if self.requirements.is_save:
self._save_results_to_json(all_approaches_results)
return all_approaches_results
def _save_results_to_json(self, result: dict):
result_file = join(self.path_to_save, 'pipeline_SA_results.json')
with open(result_file, 'w', encoding='utf-8') as file:
file.write(json.dumps(result, indent=4))
|
examples/particles.py | DrewMQ/pygame_tutorials | 544 | 12701256 | # Particles for pygame
# by KidsCanCode 2015
# For educational purposes only
import pygame
import random
# TODO: particle rotations
# TODO: test with varied particle images
# TODO: more particle paths
def interpolate(v1, v2, range):
return pygame.math.Vector2(v1.x + (v2.x - v1.x) * range,
v1.y + (v2.y - v1.y) * range)
class Particle(pygame.sprite.Sprite):
def __init__(self, game, image, pos, vel, life, lifetime,
fade_start, dorotate):
pygame.sprite.Sprite.__init__(self)
self.game = game
self.pos = pos
self.vel = vel
self.rot_cache = {}
self.base_image = image
self.dorotate = dorotate
if dorotate:
self.image = pygame.transform.rotate(self.base_image, -self.rot)
else:
self.image = self.base_image.copy()
self.rect = self.image.get_rect()
self.lifetime = lifetime
self.life = life
self.fade_start = fade_start
self.duration = lifetime - fade_start
self.update()
def update(self):
# if self.dorotate:
# old_center = self.rect.center
# if self.rot in self.rot_cache:
# self.image = self.rot_cache[self.rot]
# else:
# self.image = pygame.transform.rotate(self.base_image, -self.rot)
# self.rot_cache[self.rot] = self.image
# self.rect = self.image.get_rect()
# self.rect.center = old_center
self.life += self.game.dt
self.fade()
self.pos += self.vel
self.rect.centerx = self.pos.x # + self.game.OFFSET.x
self.rect.centery = self.pos.y # + self.game.OFFSET.y
def blit(self):
return self.game.screen.blit(self.image, self.rect, special_flags=pygame.BLEND_ADD)
def fade(self):
if self.life > self.fade_start:
try:
ratio = (self.life - self.fade_start) / self.duration
except ZeroDivisionError:
ratio = 1.0
if ratio > 1.0:
ratio = 1.0
mask = int(255 * (1 - ratio))
self.image.fill([mask, mask, mask], special_flags=pygame.BLEND_MIN)
def is_dead(self):
if self.life > self.lifetime:
return True
return False
class ParticleEmitter:
def __init__(self, game, parent, offset, vel, image, count, lifetime,
fade_start, size, angle_range, dorotate=False):
self.game = game
self.parent = parent
self.offset = offset
self.particle_vel = vel
self.pos = self.parent.pos + self.game.OFFSET + self.offset.rotate(self.parent.rot)
self.base_image = image
self.size = size
self.angle_range = angle_range
self.image = pygame.transform.scale(self.base_image, (self.size, self.size))
self.count = count
self.lifetime = lifetime
self.fade_start = fade_start
self.particles = []
self.timer = 0
self.prevcurve = [self.pos for x in range(3)]
self.active = True
def print_state(self):
print("c:{}, p:{}".format(self.count, len(self.particles)))
def update(self):
self.pos = self.parent.pos + self.game.OFFSET + self.offset.rotate(-self.parent.rot)
self.rand_angle = random.randint(-self.angle_range, self.angle_range)
# update all particles
for part in self.particles:
part.update()
if part.is_dead():
self.particles.remove(part)
# print("p.kill")
# create a new particle
if self.count != 0 and self.active:
self.timer += self.game.dt
newparticles = self.count * self.timer
if newparticles > 1:
for i in range(int(newparticles)):
t = i / newparticles
time_elapsed = (1.0 - t) * self.game.dt
vel = self.particle_vel.rotate(-self.parent.rot + self.rand_angle)
pos = interpolate(self.prevcurve[0], self.pos, t)
pos += (self.parent.vel + vel) * time_elapsed
# pos += vel * time_elapsed
init_life = time_elapsed
self.timer = 0
# print("new part: pos: {} vel: {}".format(pos, vel))
self.particles.append(Particle(self.game, self.image, pos,
vel, init_life, self.lifetime,
self.fade_start, False))
self.prevcurve[2] = self.prevcurve[1]
self.prevcurve[1] = self.prevcurve[0]
self.prevcurve[0] = self.pos
def draw(self):
rects = []
for part in self.particles:
rects.append(part.blit())
return rects
def kill_all(self):
self.count = 0
self.active = False
self.particles = []
|
Anacoref/py/extract_anaphora.py | Koziev/NLP_Datasets | 257 | 12701259 | <reponame>Koziev/NLP_Datasets<filename>Anacoref/py/extract_anaphora.py
"""
Парсинг датасетов из соревнования Rucoref-2015 (раскрытие анафоры и пр.)
Описание исходного датасета и задачи http://www.dialog-21.ru/evaluation/2014/anaphora/
"""
import io
import os
import pandas as pd
import csv
rucoref_folder = '../../../data/rucoref_2015/rucoref_29.10.2015'
output_file = '../../../tmp/ruanaphora_corpus.dat'
df_tokens = pd.read_csv(os.path.join(rucoref_folder, 'Tokens.txt'), encoding='utf-8', delimiter='\t', quoting=3)
df_groups = pd.read_csv(os.path.join(rucoref_folder, 'Groups.txt'), encoding='utf-8', delimiter='\t', quoting=3)
groupid2content = dict(zip(df_groups['group_id'].values, df_groups['content']))
groupid2link = dict(zip(df_groups['group_id'].values, df_groups['link']))
token2refcontent = dict()
for i, r in df_groups.iterrows():
doc_id = r['doc_id']
shift = r['shift']
link = r['link']
attr = r['attributes']
if attr in ['ref:def|str:pron|type:anaph', 'ref:def|str:pron|type:coref']:
token_id = (doc_id, shift)
if link != 0:
new_groupid = link
njump = 0
while njump < 5:
link2 = groupid2link[new_groupid]
if link2 != 0:
new_groupid = groupid2link[new_groupid]
njump += 1
else:
break
token2refcontent[token_id] = groupid2content[new_groupid]
df_res = pd.DataFrame(columns='doc_id shift token lemma gram refcontent'.split(), index=None)
n_discovered = 0
for i, r in df_tokens.iterrows():
doc_id = r['doc_id']
shift = r['shift']
token_id = (doc_id, shift)
token = r['token']
lemma = r['lemma']
gram = r['gram']
refcontent = token2refcontent.get(token_id, '')
n_discovered += refcontent != ''
df_res = df_res.append({'doc_id': doc_id, 'shift': shift, 'token': token, 'lemma': lemma, 'gram': gram, 'refcontent': refcontent}, ignore_index=True)
df_res.to_csv(output_file, quoting=csv.QUOTE_MINIMAL, index=False, sep='\t')
print(u'раскрыто анафор={}'.format(n_discovered))
|
src/fava/serialisation.py | psimonyi/fava | 1,224 | 12701264 | <gh_stars>1000+
"""(De)serialisation of entries.
When adding entries, these are saved via the JSON API - using the functionality
of this module to obtain the appropriate data structures from
`beancount.core.data`. Similarly, for the full entry completion, a JSON
representation of the entry is provided.
This is not intended to work well enough for full roundtrips yet.
"""
import datetime
import functools
import re
from typing import Any
from typing import FrozenSet
from typing import Tuple
from beancount.core.amount import Amount
from beancount.core.data import Balance
from beancount.core.data import Directive
from beancount.core.data import EMPTY_SET
from beancount.core.data import Note
from beancount.core.data import Posting
from beancount.core.data import Transaction
from beancount.core.number import D
from beancount.core.position import to_string as position_to_string
from beancount.parser.parser import parse_string
from fava.helpers import FavaAPIException
from fava.util.date import parse_date
def extract_tags_links(
string: str,
) -> Tuple[str, FrozenSet[str], FrozenSet[str]]:
"""Extract tags and links from a narration string.
Args:
string: A string, possibly containing tags (`#tag`) and links
(`^link`).
Returns:
A triple (new_string, tags, links) where `new_string` is `string`
stripped of tags and links.
"""
if string is None:
return None, EMPTY_SET, EMPTY_SET
tags = re.findall(r"(?:^|\s)#([A-Za-z0-9\-_/.]+)", string)
links = re.findall(r"(?:^|\s)\^([A-Za-z0-9\-_/.]+)", string)
new_string = re.sub(r"(?:^|\s)[#^]([A-Za-z0-9\-_/.]+)", "", string).strip()
return new_string, frozenset(tags), frozenset(links)
@functools.singledispatch
def serialise(entry: Directive) -> Any:
"""Serialise an entry."""
if not entry:
return None
ret = entry._asdict()
ret["type"] = entry.__class__.__name__
if isinstance(entry, Transaction):
ret["payee"] = entry.payee or ""
if entry.tags:
ret["narration"] += " " + " ".join(["#" + t for t in entry.tags])
if entry.links:
ret["narration"] += " " + " ".join(
["^" + link for link in entry.links]
)
del ret["links"]
del ret["tags"]
ret["postings"] = [serialise(pos) for pos in entry.postings]
elif ret["type"] == "Balance":
amt = ret["amount"]
ret["amount"] = {"number": str(amt.number), "currency": amt.currency}
return ret
@serialise.register(Posting)
def _serialise_posting(posting: Posting) -> Any:
"""Serialise a posting."""
if isinstance(posting.units, Amount):
position_str = position_to_string(posting)
else:
position_str = ""
if posting.price is not None:
position_str += f" @ {posting.price.to_string()}"
return {"account": posting.account, "amount": position_str}
def deserialise_posting(posting: Any) -> Posting:
"""Parse JSON to a Beancount Posting."""
amount = posting.get("amount", "")
entries, errors, _ = parse_string(
f'2000-01-01 * "" ""\n Assets:Account {amount}'
)
if errors:
raise FavaAPIException(f"Invalid amount: {amount}")
txn = entries[0]
assert isinstance(txn, Transaction)
pos = txn.postings[0]
return pos._replace(account=posting["account"], meta=None)
def deserialise(json_entry: Any) -> Directive:
"""Parse JSON to a Beancount entry.
Args:
json_entry: The entry.
Raises:
KeyError: if one of the required entry fields is missing.
FavaAPIException: if the type of the given entry is not supported.
"""
date = parse_date(json_entry.get("date", ""))[0]
if not isinstance(date, datetime.date):
raise FavaAPIException("Invalid entry date.")
if json_entry["type"] == "Transaction":
narration, tags, links = extract_tags_links(json_entry["narration"])
postings = [deserialise_posting(pos) for pos in json_entry["postings"]]
return Transaction(
json_entry["meta"],
date,
json_entry.get("flag", ""),
json_entry.get("payee", ""),
narration,
tags,
links,
postings,
)
if json_entry["type"] == "Balance":
raw_amount = json_entry["amount"]
amount = Amount(D(str(raw_amount["number"])), raw_amount["currency"])
return Balance(
json_entry["meta"], date, json_entry["account"], amount, None, None
)
if json_entry["type"] == "Note":
comment = json_entry["comment"].replace('"', "")
return Note(json_entry["meta"], date, json_entry["account"], comment)
raise FavaAPIException("Unsupported entry type.")
|
dl/dl-python/ipython_config.py | ReDeiPirati/dockerfiles | 168 | 12701297 | <filename>dl/dl-python/ipython_config.py
# Configuration file for ipython.
#------------------------------------------------------------------------------
# InteractiveShellApp(Configurable) configuration
#------------------------------------------------------------------------------
## lines of code to run at IPython startup.
c.InteractiveShellApp.exec_lines = ['%load_ext autoreload', '%autoreload 2']
## A list of dotted module names of IPython extensions to load.
c.InteractiveShellApp.extensions = ['autoreload']
|
OnlineStudy/rbac/middlewares/rbac.py | NanRenTeam-9/MongoMicroCourse | 132 | 12701325 | #! /usr/bin/env python
# -*- coding:utf-8 -*-
import re
from django.utils.deprecation import MiddlewareMixin
from django.conf import settings
from django.shortcuts import HttpResponse
class RbacMiddleware(MiddlewareMixin):
def process_request(self, request):
current_url = request.path_info
# 白名单,不需要做权限验证的url
for valid in settings.VALID_URL:
if re.match(valid, current_url):
return None # 中间件返回空,中间件不拦截,执行视图函数
# 获取权限
permission_dict = request.session.get(settings.INIT_PERMISSION)
if not permission_dict:
return HttpResponse('未获取到用户数据,请登录!')
# 路径导航
url_navigation = [
{'title': '首页', 'url': '/index/'}
]
# 此处代码进行判断:/logout /index
for url in settings.NO_PERMISSION_LIST:
if re.match(url,request.path_info):
# 需要登录,但无需权限检验
request.current_menu_selected = 0
request.url_navigation = url_navigation
return None
flag = False
for item in permission_dict.values():
reg = '^%s$' % item['url']
if re.match(reg, current_url):
# 获取当前选中的菜单id ,先检测pid再检测id
# 非菜单选项挂靠:如果是pid那么则是非菜单权限,通过此pid找到父级的id,如果是id则是二级菜单权限
# 注意,此处的item['pid'] or item['id'],可能会有先后顺序
request.current_menu_selected = item['pid'] or item['id']
flag = True
# 构建导航
if item['pid']:
url_navigation.extend([
{'title': item['p_title'], 'url': item['p_url']},
{'title': item['title'], 'url': item['url'], 'class': 'active'}
])
else:
url_navigation.extend([
{'title': item['title'], 'url': item['url'], 'class': 'active'},
])
request.url_navigation = url_navigation
break
if not flag:
return HttpResponse('无权访问')
|
tests/test_tensor/test_hybrid_device.py | hpcaitech/ColossalAI | 1,630 | 12701389 | <filename>tests/test_tensor/test_hybrid_device.py<gh_stars>1000+
from colossalai.utils import free_port, get_current_device
from colossalai.utils.model.colo_init_context import ColoInitContext
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.tensor import ComputePattern, ParallelAction
from functools import partial
from colossalai.core import global_context as gpc
from colossalai.context import ParallelMode
from colossalai.nn.parallel.layers import init_colo_module
from colossalai.nn.parallel.data_parallel import ColoDDP
from colossalai.nn.optimizer import ColoOptimizer
import colossalai
import torch
import torch.multiprocessing as mp
import pytest
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.embed = torch.nn.Embedding(20, 4)
self.proj = torch.nn.Linear(4, 8)
def forward(self, x):
# move input to cpu and restore output
current_dev = x.device
x = x.to('cpu')
x = self.embed(x)
x = x.to(current_dev)
x = self.proj(x)
return x
def run_hybrid_device(use_ddp):
with ColoInitContext(device=get_current_device()):
model = Net()
real_model = model
if use_ddp:
model = ColoDDP(model)
real_model = model.module
print(f'embedding weight size: {real_model.embed.weight.size()} | device: {real_model.embed.weight.device}')
#print(f'linear weight size: {real_model.proj.weight.size()} | device: {real_model.proj.weight.device}')
parallel_action = ParallelAction(ComputePattern.TP1D)
init_colo_module(model, parallel_action, recursive=True, mode='col')
# use cpu gloo to handle embedding
real_model.embed.to('cpu')
gloo_group_tp = gpc.get_cpu_group(ParallelMode.PARALLEL_1D)
real_model.embed.weight.spec.dist_spec.process_group = gloo_group_tp
print(f'embedding weight size: {real_model.embed.weight.size()} | new device: {real_model.embed.weight.device}')
#print(f'linear weight size: {real_model.proj.weight.size()} | new device: {real_model.proj.weight.device}')
optimizer = ColoOptimizer(dict(model.named_parameters()), torch.optim.SGD, lr=0.1)
data = torch.randint(low=0, high=20, size=(16,), device=get_current_device())
out = model(data)
out.sum().backward()
optimizer.step()
def run_dist(rank, world_size, port, use_ddp):
if use_ddp and world_size == 1:
return
tp_world_size = world_size // 2 if use_ddp else world_size
config = dict(parallel=dict(tensor=dict(mode="1d", size=tp_world_size),))
colossalai.launch(config=config, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
run_hybrid_device(use_ddp)
@pytest.mark.dist
@pytest.mark.parametrize('world_size', [1, 4])
@pytest.mark.parametrize('use_ddp', [False, True])
@rerun_if_address_is_in_use()
# Working for simulate the embedding(CPU DP+TP) -> nn(GPU DP+TP)
def _test_hybrid_device(world_size, use_ddp):
run_func = partial(run_dist, world_size=world_size, port=free_port(), use_ddp=use_ddp)
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
_test_hybrid_device(4, True)
|
train.py | mikito0011/Chainer_Mask_R-CNN | 153 | 12701394 | import chainer
from chainer import training
from chainer.training import extensions, ParallelUpdater
from chainer.training.triggers import ManualScheduleTrigger
from chainer.datasets import TransformDataset
from chainercv.datasets import VOCBboxDataset, voc_bbox_label_names
from chainercv import transforms
from chainercv.transforms.image.resize import resize
import argparse
import numpy as np
import time
#from mask_rcnn_vgg import MaskRCNNVGG16
from mask_rcnn_resnet import MaskRCNNResNet
from coco_dataset import COCODataset
from mask_rcnn_train_chain import MaskRCNNTrainChain
from utils.bn_utils import freeze_bn, bn_to_affine
from utils.cocoapi_evaluator import COCOAPIEvaluator
from utils.detection_coco_evaluator import DetectionCOCOEvaluator
import logging
import traceback
from utils.updater import SubDivisionUpdater
import cv2
def resize_bbox(bbox, in_size, out_size):
bbox_o = bbox.copy()
y_scale = float(out_size[0]) / in_size[0]
x_scale = float(out_size[1]) / in_size[1]
bbox_o[:, 0] = y_scale * bbox[:, 1]
bbox_o[:, 2] = y_scale * (bbox[:, 1]+bbox[:, 3])
bbox_o[:, 1] = x_scale * bbox[:, 0]
bbox_o[:, 3] = x_scale * (bbox[:, 0]+bbox[:, 2])
return bbox_o
def parse():
parser = argparse.ArgumentParser(
description='Mask RCNN trainer')
parser.add_argument('--dataset', choices=('coco2017'),
default='coco2017')
parser.add_argument('--extractor', choices=('resnet50','resnet101'),
default='resnet50', help='extractor network')
parser.add_argument('--gpu', '-g', type=int, default=0)
parser.add_argument('--lr', '-l', type=float, default=1e-4)
parser.add_argument('--batchsize', '-b', type=int, default=8)
parser.add_argument('--freeze_bn', action='store_true', default=False, help='freeze batchnorm gamma/beta')
parser.add_argument('--bn2affine', action='store_true', default=False, help='batchnorm to affine')
parser.add_argument('--out', '-o', default='result',
help='Output directory')
parser.add_argument('--seed', '-s', type=int, default=0)
parser.add_argument('--roialign', action='store_false', default=True, help='default: True')
parser.add_argument('--lr_step', '-ls', type=int, default=120000)
parser.add_argument('--lr_initialchange', '-li', type=int, default=400)
parser.add_argument('--pretrained', '-p', type=str, default='imagenet')
parser.add_argument('--snapshot', type=int, default=4000)
parser.add_argument('--validation', type=int, default=30000)
parser.add_argument('--resume', type=str)
parser.add_argument('--iteration', '-i', type=int, default=180000)
parser.add_argument('--roi_size', '-r', type=int, default=14, help='ROI size for mask head input')
parser.add_argument('--gamma', type=float, default=1, help='mask loss weight')
return parser.parse_args()
class Transform(object):
def __init__(self, net, labelids):
self.net = net
self.labelids = labelids
def __call__(self, in_data):
if len(in_data)==5:
img, label, bbox, mask, i = in_data
elif len(in_data)==4:
img, bbox, label, i= in_data
label = [self.labelids.index(l) for l in label]
_, H, W = img.shape
if chainer.config.train:
img = self.net.prepare(img)
_, o_H, o_W = img.shape
scale = o_H / H
if len(bbox)==0:
return img, [],[],1
bbox = resize_bbox(bbox, (H, W), (o_H, o_W))
mask = resize(mask,(o_H, o_W))
if chainer.config.train:
#horizontal flip
img, params = transforms.random_flip(
img, x_random=True, return_param=True)
bbox = transforms.flip_bbox(
bbox, (o_H, o_W), x_flip=params['x_flip'])
mask = transforms.flip(mask, x_flip=params['x_flip'])
return img, bbox, label, scale, mask, i
def convert(batch, device):
return chainer.dataset.convert.concat_examples(batch, device, padding=-1)
def main():
args = parse()
np.random.seed(args.seed)
print('arguments: ', args)
# Model setup
if args.dataset == 'coco2017':
train_data = COCODataset()
test_data = COCODataset(json_file='instances_val2017.json', name='val2017', id_list_file='val2017.txt')
train_class_ids =train_data.class_ids
test_ids = test_data.ids
cocoanns = test_data.coco
if args.extractor=='vgg16':
mask_rcnn = MaskRCNNVGG16(n_fg_class=80, pretrained_model=args.pretrained, roi_size=args.roi_size, roi_align = args.roialign)
elif args.extractor=='resnet50':
mask_rcnn = MaskRCNNResNet(n_fg_class=80, pretrained_model=args.pretrained,roi_size=args.roi_size, n_layers=50, roi_align = args.roialign, class_ids=train_class_ids)
elif args.extractor=='resnet101':
mask_rcnn = MaskRCNNResNet(n_fg_class=80, pretrained_model=args.pretrained,roi_size=args.roi_size, n_layers=101, roi_align = args.roialign, class_ids=train_class_ids)
mask_rcnn.use_preset('evaluate')
model = MaskRCNNTrainChain(mask_rcnn, gamma=args.gamma, roi_size=args.roi_size)
# Trainer setup
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
optimizer = chainer.optimizers.MomentumSGD(lr=args.lr, momentum=0.9)
#optimizer = chainer.optimizers.Adam()#alpha=0.001, beta1=0.9, beta2=0.999 , eps=0.00000001)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(rate=0.0001))
train_data=TransformDataset(train_data, Transform(mask_rcnn, train_class_ids))
test_data=TransformDataset(test_data, Transform(mask_rcnn, train_class_ids))
train_iter = chainer.iterators.SerialIterator(
train_data, batch_size=args.batchsize)
test_iter = chainer.iterators.SerialIterator(
test_data, batch_size=1, repeat=False, shuffle=False)
updater = SubDivisionUpdater(train_iter, optimizer, device=args.gpu, subdivisions=args.batchsize)
#updater = ParallelUpdater(train_iter, optimizer, devices={"main": 0, "second": 1}, converter=convert ) #for training with multiple GPUs
trainer = training.Trainer(
updater, (args.iteration, 'iteration'), out=args.out)
# Extensions
trainer.extend(
extensions.snapshot_object(model.mask_rcnn, 'snapshot_model.npz'),
trigger=(args.snapshot, 'iteration'))
trainer.extend(extensions.ExponentialShift('lr', 10),
trigger=ManualScheduleTrigger(
[args.lr_initialchange], 'iteration'))
trainer.extend(extensions.ExponentialShift('lr', 0.1),
trigger=(args.lr_step, 'iteration'))
if args.resume is not None:
chainer.serializers.load_npz(args.resume, model.mask_rcnn)
if args.freeze_bn:
freeze_bn(model.mask_rcnn)
if args.bn2affine:
bn_to_affine(model.mask_rcnn)
log_interval = 40, 'iteration'
plot_interval = 160, 'iteration'
print_interval = 40, 'iteration'
#trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu), trigger=(args.validation, 'iteration'))
#trainer.extend(DetectionCOCOEvaluator(test_iter, model.mask_rcnn), trigger=(args.validation, 'iteration')) #COCO AP Evaluator with VOC metric
trainer.extend(COCOAPIEvaluator(test_iter, model.mask_rcnn, test_ids, cocoanns), trigger=(args.validation, 'iteration')) #COCO AP Evaluator
trainer.extend(chainer.training.extensions.observe_lr(),
trigger=log_interval)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.PrintReport(
['iteration', 'epoch', 'elapsed_time', 'lr',
'main/loss',
'main/avg_loss',
'main/roi_loc_loss',
'main/roi_cls_loss',
'main/roi_mask_loss',
'main/rpn_loc_loss',
'main/rpn_cls_loss',
'validation/main/loss',
'validation/main/map',
]), trigger=print_interval)
trainer.extend(extensions.ProgressBar(update_interval=1000))
#trainer.extend(extensions.dump_graph('main/loss'))
try:
trainer.run()
except:
traceback.print_exc()
if __name__ == '__main__':
main()
|
clearly/server/streaming_dispatcher.py | lowercase00/clearly | 344 | 12701397 | import logging
import re
import signal
import threading
from contextlib import contextmanager
from enum import Enum
from queue import Empty, Queue
from typing import List, Optional, Pattern, Tuple, Union, Callable
from ..protos.clearly_pb2 import PatternFilter, TaskMessage, WorkerMessage
from ..utils.data import accept_task, accept_worker
logger = logging.getLogger(__name__)
BASE_THREAD_NAME = 'clearly-dispatcher'
class Role(Enum):
TASKS = (accept_task,)
WORKERS = (accept_worker,)
def __new__(cls, func_accept):
obj = object.__new__(cls)
obj._value_ = len(cls.__members__) + 1
obj.__func_accept = func_accept
return obj
@property
def thread_name(self) -> str:
return '{}-{}'.format(BASE_THREAD_NAME, self.name.lower())
@property
def func_accept(self) -> Callable[[Pattern, bool, Union[TaskMessage, WorkerMessage]], bool]:
return self.__func_accept
class StreamingDispatcher:
"""Dispatch events to connected clients.
Server object, gets cleaned tasks and workers and send them to interested parties.
One instance takes care of only one of those, two instances are needed.
Attributes:
queue_input: to receive from event listener
observers: currently connected clients, interested in real time worker events
role: current role this dispatcher is running
"""
def __init__(self, queue_input: Queue, role: Role):
"""Construct a client dispatcher instance.
Args:
queue_input: to receive from event listener
"""
logger.info('Creating %s', StreamingDispatcher.__name__)
self.queue_input, self.role = queue_input, role
self.observers: List[Tuple[Queue, Pattern, bool]] = []
# running engine (should be asyncio in the future)
self.dispatcher_thread: Optional[threading.Thread] = None
# detect shutdown.
def sigterm_handler(_signo, _stack_frame): # pragma: no cover
self.__stop()
signal.signal(signal.SIGTERM, sigterm_handler)
self.__start()
@contextmanager
def streaming_capture(self, capture: PatternFilter, queue: Queue) -> None:
"""Put a connected client in streaming capture mode, filtering all
incoming events in real time.
Args:
capture: the criteria for desired events
queue: where to put the matching events
"""
observer = queue, re.compile(capture.pattern), capture.negate
# should not need any locks, thanks to GIL
self.observers.append(observer)
try:
yield
finally:
self.observers.remove(observer)
def __start(self) -> None: # pragma: no cover
"""Start the real time engine that captures tasks."""
assert not self.dispatcher_thread
self.dispatcher_thread = threading.Thread(target=self.__run, name=self.role.thread_name)
self.dispatcher_thread.daemon = True
self.running = True # graceful shutdown
self.dispatcher_thread.start()
def __stop(self) -> None: # pragma: no cover
"""Stop the background engine."""
if not self.dispatcher_thread:
return
logger.info('Stopping %s', self.role.thread_name)
self.running = False # graceful shutdown
self.dispatcher_thread.join(1)
self.dispatcher_thread = None
def __run(self) -> None: # pragma: no cover
logger.info('Starting: %r', threading.current_thread())
while self.running:
try:
message = self.queue_input.get(timeout=1)
except Empty:
continue
self._dispatch(message)
logger.info('Stopped: %r', threading.current_thread())
def _dispatch(self, message: Union[TaskMessage, WorkerMessage]) -> None:
# let's see who's interested.
for q, pattern, negate in self.observers:
if self.role.func_accept(pattern, negate, message):
q.put(message)
|
tests/test_3_mac_receive_jpg.py | fjolublar/imagezmq | 823 | 12701435 | """test_3_mac_receive_jpg.py -- receive & display jpg stream.
A simple test program that uses imagezmq to receive an image jpg stream from a
Raspberry Pi and display it as a video steam.
1. Run this program in its own terminal window on the mac:
python test_3_mac_receive_jpg.py
This "receive and display images" program must be running before starting the
RPi sending program.
2. Run the jpg sending program on the RPi:
python test_3_rpi_send_jpg.py
A cv2.imshow() window will appear on the Mac showing the tramsmitted images as
a video stream. You can repeat Step 2 and start the test_3_rpi_send_jpg.py on
multiple RPis and each one will cause a new cv2.imshow() window to open.
To end the programs, press Ctrl-C in the terminal window of the RPi first.
Then press Ctrl-C in the terminal window of the receiving proram. You may
have to press Ctrl-C in the display window as well.
"""
import sys
import numpy as np
import cv2
import imagezmq
image_hub = imagezmq.ImageHub()
while True: # show streamed images until Ctrl-C
rpi_name, jpg_buffer = image_hub.recv_jpg()
image = cv2.imdecode(np.frombuffer(jpg_buffer, dtype='uint8'), -1)
# see opencv docs for info on -1 parameter
cv2.imshow(rpi_name, image) # 1 window for each RPi
cv2.waitKey(1)
image_hub.send_reply(b'OK')
|
siteroot/settings/dev.py | voidfun/linkding | 1,312 | 12701450 | <filename>siteroot/settings/dev.py
"""
Development settings for linkding webapp
"""
# Start from development settings
# noinspection PyUnresolvedReferences
from .base import *
# Turn on debug mode
DEBUG = True
# Turn on SASS compilation
SASS_PROCESSOR_ENABLED = True
# Enable debug toolbar
INSTALLED_APPS.append('debug_toolbar')
MIDDLEWARE.append('debug_toolbar.middleware.DebugToolbarMiddleware')
INTERNAL_IPS = [
'127.0.0.1',
]
# Enable debug logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '{levelname} {message}',
'style': '{',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple'
}
},
'root': {
'handlers': ['console'],
'level': 'WARNING',
},
'loggers': {
'django.db.backends': {
'level': 'ERROR', # Set to DEBUG to log all SQL calls
'handlers': ['console'],
},
'bookmarks.services.tasks': { # Log task output
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
}
}
}
# Import custom settings
# noinspection PyUnresolvedReferences
from .custom import *
|
services/ui_backend_service/api/flow.py | runsascoded/metaflow-service | 103 | 12701451 | <gh_stars>100-1000
from services.utils import handle_exceptions
from .utils import find_records
class FlowApi(object):
def __init__(self, app, db):
self.db = db
app.router.add_route("GET", "/flows", self.get_all_flows)
app.router.add_route("GET", "/flows/{flow_id}", self.get_flow)
self._async_table = self.db.flow_table_postgres
@handle_exceptions
async def get_flow(self, request):
"""
---
description: Get one flow
tags:
- Flow
parameters:
- $ref: '#/definitions/Params/Path/flow_id'
produces:
- application/json
responses:
"200":
description: Returns one flow
schema:
$ref: '#/definitions/ResponsesFlow'
"405":
description: invalid HTTP Method
schema:
$ref: '#/definitions/ResponsesError405'
"""
flow_name = request.match_info.get("flow_id")
return await find_records(request,
self._async_table,
fetch_single=True,
initial_conditions=["flow_id = %s"],
initial_values=[flow_name])
@handle_exceptions
async def get_all_flows(self, request):
"""
---
description: Get all flows
tags:
- Flow
parameters:
- $ref: '#/definitions/Params/Builtin/_page'
- $ref: '#/definitions/Params/Builtin/_limit'
- $ref: '#/definitions/Params/Builtin/_order'
- $ref: '#/definitions/Params/Builtin/_tags'
- $ref: '#/definitions/Params/Builtin/_group'
- $ref: '#/definitions/Params/Custom/flow_id'
- $ref: '#/definitions/Params/Custom/user_name'
- $ref: '#/definitions/Params/Custom/ts_epoch'
produces:
- application/json
responses:
"200":
description: Returns all flows
schema:
$ref: '#/definitions/ResponsesFlowList'
"405":
description: invalid HTTP Method
schema:
$ref: '#/definitions/ResponsesError405'
"""
return await find_records(request,
self._async_table,
initial_conditions=[],
initial_values=[],
allowed_order=self._async_table.keys,
allowed_group=self._async_table.keys,
allowed_filters=self._async_table.keys
)
|
conans/test/unittests/util/files/test_dirty.py | matthiasng/conan | 6,205 | 12701469 | # coding=utf-8
import os
import unittest
from conans.test.utils.test_files import temp_folder
from conans.util.files import set_dirty, clean_dirty, set_dirty_context_manager, _DIRTY_FOLDER
class DirtyTest(unittest.TestCase):
def setUp(self):
""" Create temporary folder to save dirty state
"""
self.temp_folder = temp_folder()
self.dirty_folder = self.temp_folder + _DIRTY_FOLDER
def test_set_dirty(self):
""" Dirty flag must be created by set_dirty
"""
set_dirty(self.temp_folder)
self.assertTrue(os.path.exists(self.dirty_folder))
def test_clean_dirty(self):
""" Dirty flag must be cleaned by clean_dirty
"""
set_dirty(self.temp_folder)
self.assertTrue(os.path.exists(self.dirty_folder))
clean_dirty(self.temp_folder)
self.assertFalse(os.path.exists(self.dirty_folder))
def test_set_dirty_context(self):
""" Dirty context must remove lock before exiting
"""
with set_dirty_context_manager(self.temp_folder):
self.assertTrue(os.path.exists(self.dirty_folder))
self.assertFalse(os.path.exists(self.dirty_folder))
def test_interrupted_dirty_context(self):
""" Broken context must preserve dirty state
Raise an exception in middle of context. By default,
dirty file is not removed.
"""
try:
with set_dirty_context_manager(self.temp_folder):
self.assertTrue(os.path.exists(self.dirty_folder))
raise RuntimeError()
except RuntimeError:
pass
self.assertTrue(os.path.exists(self.dirty_folder))
|
modoboa/policyd/handlers.py | HarshCasper/modoboa | 1,602 | 12701497 | """App related signal handlers."""
import redis
from django.conf import settings
from django.db.models import signals
from django.dispatch import receiver
from modoboa.admin import models as admin_models
from . import constants
def set_message_limit(instance, key):
"""Store message limit in Redis."""
old_message_limit = instance._loaded_values.get("message_limit")
if old_message_limit == instance.message_limit:
return
rclient = redis.Redis(
host=settings.REDIS_HOST,
port=settings.REDIS_PORT,
db=settings.REDIS_QUOTA_DB
)
if instance.message_limit is None:
# delete existing key
if rclient.hexists(constants.REDIS_HASHNAME, key):
rclient.hdel(constants.REDIS_HASHNAME, key)
return
if old_message_limit is not None:
diff = instance.message_limit - old_message_limit
else:
diff = instance.message_limit
rclient.hincrby(constants.REDIS_HASHNAME, key, diff)
@receiver(signals.post_save, sender=admin_models.Domain)
def set_domain_message_limit(sender, instance, created, **kwargs):
"""Store domain message limit in Redis."""
set_message_limit(instance, instance.name)
@receiver(signals.post_save, sender=admin_models.Mailbox)
def set_mailbox_message_limit(sender, instance, created, **kwargs):
"""Store mailbox message limit in Redis."""
set_message_limit(instance, instance.full_address)
|
REDSI_1160929_1161573/boost_1_67_0/tools/build/test/railsys.py | Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo | 198 | 12701518 | #!/usr/bin/python
# Copyright 2003 <NAME>
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
t = BoostBuild.Tester()
t.set_tree("railsys")
t.run_build_system("--v2", subdir="program")
t.cleanup()
|
examples/pxScene2d/external/libnode-v6.9.0/deps/v8/tools/testrunner/server/presence_handler.py | madanagopaltcomcast/pxCore | 5,964 | 12701539 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import socket
import SocketServer
import threading
try:
import ujson as json
except:
import json
from . import constants
from ..objects import peer
STARTUP_REQUEST = "V8 test peer starting up"
STARTUP_RESPONSE = "Let's rock some tests!"
EXIT_REQUEST = "V8 testing peer going down"
def GetOwnIP():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
s.close()
return ip
class PresenceHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = json.loads(self.request[0].strip())
if data[0] == STARTUP_REQUEST:
jobs = data[1]
relative_perf = data[2]
pubkey_fingerprint = data[3]
trusted = self.server.daemon.IsTrusted(pubkey_fingerprint)
response = [STARTUP_RESPONSE, self.server.daemon.jobs,
self.server.daemon.relative_perf,
self.server.daemon.pubkey_fingerprint, trusted]
response = json.dumps(response)
self.server.SendTo(self.client_address[0], response)
p = peer.Peer(self.client_address[0], jobs, relative_perf,
pubkey_fingerprint)
p.trusted = trusted
self.server.daemon.AddPeer(p)
elif data[0] == STARTUP_RESPONSE:
jobs = data[1]
perf = data[2]
pubkey_fingerprint = data[3]
p = peer.Peer(self.client_address[0], jobs, perf, pubkey_fingerprint)
p.trusted = self.server.daemon.IsTrusted(pubkey_fingerprint)
p.trusting_me = data[4]
self.server.daemon.AddPeer(p)
elif data[0] == EXIT_REQUEST:
self.server.daemon.DeletePeer(self.client_address[0])
if self.client_address[0] == self.server.daemon.ip:
self.server.shutdown_lock.release()
class PresenceDaemon(SocketServer.ThreadingMixIn, SocketServer.UDPServer):
def __init__(self, daemon):
self.daemon = daemon
address = (daemon.ip, constants.PRESENCE_PORT)
SocketServer.UDPServer.__init__(self, address, PresenceHandler)
self.shutdown_lock = threading.Lock()
def shutdown(self):
self.shutdown_lock.acquire()
self.SendToAll(json.dumps([EXIT_REQUEST]))
self.shutdown_lock.acquire()
self.shutdown_lock.release()
SocketServer.UDPServer.shutdown(self)
def SendTo(self, target, message):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(message, (target, constants.PRESENCE_PORT))
sock.close()
def SendToAll(self, message):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip = self.daemon.ip.split(".")
for i in range(1, 254):
ip[-1] = str(i)
sock.sendto(message, (".".join(ip), constants.PRESENCE_PORT))
sock.close()
def FindPeers(self):
request = [STARTUP_REQUEST, self.daemon.jobs, self.daemon.relative_perf,
self.daemon.pubkey_fingerprint]
request = json.dumps(request)
self.SendToAll(request)
|
atcoder/agc001/a.py | Ashindustry007/competitive-programming | 506 | 12701549 | <reponame>Ashindustry007/competitive-programming
#!/usr/bin/env python3
# https://agc001.contest.atcoder.jp/tasks/agc001_a
n = int(input())
l = [int(x) for x in input().split()]
l.sort()
print(sum(l[::2]))
|
tests/functional/checkout/__init__.py | QueoLda/django-oscar | 4,639 | 12701560 | from decimal import Decimal as D
from http import client as http_client
from unittest import mock
from django.urls import reverse
from oscar.apps.shipping import methods
from oscar.core.loading import get_class, get_classes, get_model
from oscar.test import factories
Basket = get_model('basket', 'Basket')
ConditionalOffer = get_model('offer', 'ConditionalOffer')
Order = get_model('order', 'Order')
FailedPreCondition = get_class('checkout.exceptions', 'FailedPreCondition')
GatewayForm = get_class('checkout.forms', 'GatewayForm')
UnableToPlaceOrder = get_class('order.exceptions', 'UnableToPlaceOrder')
RedirectRequired, UnableToTakePayment, PaymentError = get_classes(
'payment.exceptions', ['RedirectRequired', 'UnableToTakePayment', 'PaymentError'])
NoShippingRequired = get_class('shipping.methods', 'NoShippingRequired')
class CheckoutMixin(object):
def create_digital_product(self):
product_class = factories.ProductClassFactory(
requires_shipping=False, track_stock=False)
product = factories.ProductFactory(product_class=product_class)
factories.StockRecordFactory(
num_in_stock=None, price=D('12.00'), product=product)
return product
def add_product_to_basket(self, product=None, **kwargs):
if product is None:
product = factories.ProductFactory()
factories.StockRecordFactory(
num_in_stock=10, price=D('12.00'), product=product)
detail_page = self.get(product.get_absolute_url(), user=kwargs.get('logged_in_user', self.user))
form = detail_page.forms['add_to_basket_form']
form.submit()
def add_voucher_to_basket(self, voucher=None):
if voucher is None:
voucher = factories.create_voucher()
basket_page = self.get(reverse('basket:summary'))
form = basket_page.forms['voucher_form']
form['code'] = voucher.code
form.submit()
def enter_guest_details(self, email='<EMAIL>'):
index_page = self.get(reverse('checkout:index'))
if index_page.status_code == 200:
index_page.form['username'] = email
index_page.form.select('options', GatewayForm.GUEST)
index_page.form.submit()
def create_shipping_country(self):
return factories.CountryFactory(
iso_3166_1_a2='GB', is_shipping_country=True)
def enter_shipping_address(self):
self.create_shipping_country()
address_page = self.get(reverse('checkout:shipping-address'))
if address_page.status_code == 200:
form = address_page.forms['new_shipping_address']
form['first_name'] = 'John'
form['last_name'] = 'Doe'
form['line1'] = '1 Egg Road'
form['line4'] = 'Shell City'
form['postcode'] = 'N12 9RT'
form.submit()
def enter_shipping_method(self):
self.get(reverse('checkout:shipping-method'))
def place_order(self):
payment_details = self.get(
reverse('checkout:shipping-method')).follow().follow()
preview = payment_details.click(linkid="view_preview")
return preview.forms['place_order_form'].submit().follow()
def reach_payment_details_page(self):
self.add_product_to_basket()
if self.is_anonymous:
self.enter_guest_details('<EMAIL>')
self.enter_shipping_address()
return self.get(
reverse('checkout:shipping-method')).follow().follow()
def ready_to_place_an_order(self):
payment_details = self.reach_payment_details_page()
return payment_details.click(linkid="view_preview")
class IndexViewPreConditionsMixin:
view_name = None
# Disable skip conditions, so that we do not first get redirected forwards
@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_payment_is_required')
@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_basket_requires_shipping')
def test_check_basket_is_not_empty(
self,
mock_skip_unless_basket_requires_shipping,
mock_skip_unless_payment_is_required,
):
response = self.get(reverse(self.view_name))
self.assertRedirectsTo(response, 'basket:summary')
# Disable skip conditions, so that we do not first get redirected forwards
@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_payment_is_required')
@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_basket_requires_shipping')
def test_check_basket_is_valid(
self,
mock_skip_unless_basket_requires_shipping,
mock_skip_unless_payment_is_required,
):
# Add product to basket but then remove its stock so it is not
# purchasable.
product = factories.ProductFactory()
self.add_product_to_basket(product)
product.stockrecords.all().update(num_in_stock=0)
if self.is_anonymous:
self.enter_guest_details()
response = self.get(reverse(self.view_name))
self.assertRedirectsTo(response, 'basket:summary')
class ShippingAddressViewSkipConditionsMixin:
view_name = None
next_view_name = None
def test_skip_unless_basket_requires_shipping(self):
product = self.create_digital_product()
self.add_product_to_basket(product)
if self.is_anonymous:
self.enter_guest_details()
response = self.get(reverse(self.view_name))
self.assertRedirectsTo(response, self.next_view_name)
class ShippingAddressViewPreConditionsMixin(IndexViewPreConditionsMixin):
view_name = None
# Disable skip conditions, so that we do not first get redirected forwards
@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_payment_is_required')
@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_basket_requires_shipping')
def test_check_user_email_is_captured(
self,
mock_skip_unless_basket_requires_shipping,
mock_skip_unless_payment_is_required,
):
if self.is_anonymous:
self.add_product_to_basket()
response = self.get(reverse(self.view_name))
self.assertRedirectsTo(response, 'checkout:index')
class ShippingAddressViewMixin(ShippingAddressViewSkipConditionsMixin, ShippingAddressViewPreConditionsMixin):
def test_submitting_valid_form_adds_data_to_session(self):
self.add_product_to_basket()
if self.is_anonymous:
self.enter_guest_details()
self.create_shipping_country()
page = self.get(reverse('checkout:shipping-address'))
form = page.forms['new_shipping_address']
form['first_name'] = 'Barry'
form['last_name'] = 'Chuckle'
form['line1'] = '1 King Street'
form['line4'] = 'Gotham City'
form['postcode'] = 'N1 7RR'
response = form.submit()
self.assertRedirectsTo(response, 'checkout:shipping-method')
session_data = self.app.session['checkout_data']
session_fields = session_data['shipping']['new_address_fields']
self.assertEqual('Barry', session_fields['first_name'])
self.assertEqual('Chuckle', session_fields['last_name'])
self.assertEqual('1 King Street', session_fields['line1'])
self.assertEqual('Gotham City', session_fields['line4'])
self.assertEqual('N1 7RR', session_fields['postcode'])
def test_shows_initial_data_if_the_form_has_already_been_submitted(self):
self.add_product_to_basket()
if self.is_anonymous:
self.enter_guest_details()
self.enter_shipping_address()
page = self.get(reverse('checkout:shipping-address'), user=self.user)
form = page.forms['new_shipping_address']
self.assertEqual('John', form['first_name'].value)
self.assertEqual('Doe', form['last_name'].value)
self.assertEqual('1 Egg Road', form['line1'].value)
self.assertEqual('Shell City', form['line4'].value)
self.assertEqual('N12 9RT', form['postcode'].value)
class ShippingMethodViewSkipConditionsMixin:
view_name = None
next_view_name = None
def test_skip_unless_basket_requires_shipping(self):
# This skip condition is not a "normal" one, but is implemented in the
# view's "get" method
product = self.create_digital_product()
self.add_product_to_basket(product)
if self.is_anonymous:
self.enter_guest_details()
response = self.get(reverse(self.view_name))
self.assertRedirectsTo(response, self.next_view_name)
self.assertEqual(self.app.session['checkout_data']['shipping']['method_code'], NoShippingRequired.code)
@mock.patch('oscar.apps.checkout.views.Repository')
def test_skip_if_single_shipping_method_is_available(self, mock_repo):
# This skip condition is not a "normal" one, but is implemented in the
# view's "get" method
self.add_product_to_basket()
if self.is_anonymous:
self.enter_guest_details()
self.enter_shipping_address()
# Ensure one shipping method available
instance = mock_repo.return_value
instance.get_shipping_methods.return_value = [methods.Free()]
response = self.get(reverse('checkout:shipping-method'))
self.assertRedirectsTo(response, 'checkout:payment-method')
class ShippingMethodViewPreConditionsMixin(ShippingAddressViewPreConditionsMixin):
view_name = None
# Disable skip conditions, so that we do not first get redirected forwards
@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_payment_is_required')
@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_basket_requires_shipping')
@mock.patch('oscar.apps.checkout.views.Repository')
def test_check_shipping_methods_are_available(
self,
mock_repo,
mock_skip_unless_basket_requires_shipping,
mock_skip_unless_payment_is_required,
):
# This pre condition is not a "normal" one, but is implemented in the
# view's "get" method
self.add_product_to_basket()
if self.is_anonymous:
self.enter_guest_details()
self.enter_shipping_address()
# Ensure no shipping methods available
instance = mock_repo.return_value
instance.get_shipping_methods.return_value = []
response = self.get(reverse('checkout:shipping-method'))
self.assertRedirectsTo(response, 'checkout:shipping-address')
# Disable skip conditions, so that we do not first get redirected forwards
@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_payment_is_required')
@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_basket_requires_shipping')
def test_check_shipping_data_is_captured(
self,
mock_skip_unless_basket_requires_shipping,
mock_skip_unless_payment_is_required,
):
# This pre condition is not a "normal" one, but is implemented in the
# view's "get" method
self.add_product_to_basket()
if self.is_anonymous:
self.enter_guest_details()
response = self.get(reverse(self.view_name))
self.assertRedirectsTo(response, 'checkout:shipping-address')
class ShippingMethodViewMixin(ShippingMethodViewSkipConditionsMixin, ShippingMethodViewPreConditionsMixin):
@mock.patch('oscar.apps.checkout.views.Repository')
def test_shows_form_when_multiple_shipping_methods_available(self, mock_repo):
self.add_product_to_basket()
if self.is_anonymous:
self.enter_guest_details()
self.enter_shipping_address()
# Ensure multiple shipping methods available
method = mock.MagicMock()
method.code = 'm'
instance = mock_repo.return_value
instance.get_shipping_methods.return_value = [methods.Free(), method]
form_page = self.get(reverse('checkout:shipping-method'))
self.assertIsOk(form_page)
response = form_page.forms[0].submit()
self.assertRedirectsTo(response, 'checkout:payment-method')
# Disable skip conditions, so that we do not first get redirected forwards
@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_payment_is_required')
@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_basket_requires_shipping')
@mock.patch('oscar.apps.checkout.views.Repository')
def test_check_user_can_submit_only_valid_shipping_method(
self,
mock_repo,
mock_skip_unless_basket_requires_shipping,
mock_skip_unless_payment_is_required,
):
self.add_product_to_basket()
if self.is_anonymous:
self.enter_guest_details()
self.enter_shipping_address()
method = mock.MagicMock()
method.code = 'm'
instance = mock_repo.return_value
instance.get_shipping_methods.return_value = [methods.Free(), method]
form_page = self.get(reverse('checkout:shipping-method'))
# a malicious attempt?
form_page.forms[0]['method_code'].value = 'super-free-shipping'
response = form_page.forms[0].submit()
self.assertIsNotRedirect(response)
response.mustcontain('Your submitted shipping method is not permitted')
class PaymentMethodViewSkipConditionsMixin:
@mock.patch('oscar.apps.checkout.session.SurchargeApplicator.get_surcharges')
def test_skip_unless_payment_is_required(self, mock_get_surcharges):
mock_get_surcharges.return_value = []
product = factories.create_product(price=D('0.00'), num_in_stock=100)
self.add_product_to_basket(product)
if self.is_anonymous:
self.enter_guest_details()
self.enter_shipping_address()
# The shipping method is set automatically, as there is only one (free)
# available
response = self.get(reverse('checkout:payment-method'))
self.assertRedirectsTo(response, 'checkout:preview')
class PaymentMethodViewPreConditionsMixin(ShippingMethodViewPreConditionsMixin):
view_name = None
# Disable skip conditions, so that we do not first get redirected forwards
@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_payment_is_required')
@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_basket_requires_shipping')
def test_check_shipping_data_is_captured(
self,
mock_skip_unless_basket_requires_shipping,
mock_skip_unless_payment_is_required,
):
super().test_check_shipping_data_is_captured()
self.enter_shipping_address()
response = self.get(reverse(self.view_name))
self.assertRedirectsTo(response, 'checkout:shipping-method')
class PaymentMethodViewMixin(PaymentMethodViewSkipConditionsMixin, PaymentMethodViewPreConditionsMixin):
pass
class PaymentDetailsViewSkipConditionsMixin:
@mock.patch('oscar.apps.checkout.session.SurchargeApplicator.get_surcharges')
def test_skip_unless_payment_is_required(self, mock_get_surcharges):
mock_get_surcharges.return_value = []
product = factories.create_product(price=D('0.00'), num_in_stock=100)
self.add_product_to_basket(product)
if self.is_anonymous:
self.enter_guest_details()
self.enter_shipping_address()
# The shipping method is set automatically, as there is only one (free)
# available
response = self.get(reverse('checkout:payment-details'))
self.assertRedirectsTo(response, 'checkout:preview')
class PaymentDetailsViewPreConditionsMixin(PaymentMethodViewPreConditionsMixin):
"""
Does not add any new pre conditions.
"""
class PaymentDetailsViewMixin(PaymentDetailsViewSkipConditionsMixin, PaymentDetailsViewPreConditionsMixin):
@mock.patch('oscar.apps.checkout.views.PaymentDetailsView.handle_payment')
def test_redirects_customers_when_using_bank_gateway(self, mock_method):
bank_url = 'https://bank-website.com'
e = RedirectRequired(url=bank_url)
mock_method.side_effect = e
preview = self.ready_to_place_an_order()
bank_redirect = preview.forms['place_order_form'].submit()
assert bank_redirect.status_code == 302
assert bank_redirect.url == bank_url
@mock.patch('oscar.apps.checkout.views.PaymentDetailsView.handle_payment')
def test_handles_anticipated_payments_errors_gracefully(self, mock_method):
msg = 'Submitted expiration date is wrong'
e = UnableToTakePayment(msg)
mock_method.side_effect = e
preview = self.ready_to_place_an_order()
response = preview.forms['place_order_form'].submit()
self.assertIsOk(response)
# check user is warned
response.mustcontain(msg)
# check basket is restored
basket = Basket.objects.get()
self.assertEqual(basket.status, Basket.OPEN)
@mock.patch('oscar.apps.checkout.views.logger')
@mock.patch('oscar.apps.checkout.views.PaymentDetailsView.handle_payment')
def test_handles_unexpected_payment_errors_gracefully(
self, mock_method, mock_logger):
msg = 'This gateway is down for maintenance'
e = PaymentError(msg)
mock_method.side_effect = e
preview = self.ready_to_place_an_order()
response = preview.forms['place_order_form'].submit()
self.assertIsOk(response)
# check user is warned with a generic error
response.mustcontain(
'A problem occurred while processing payment for this order',
no=[msg])
# admin should be warned
self.assertTrue(mock_logger.error.called)
# check basket is restored
basket = Basket.objects.get()
self.assertEqual(basket.status, Basket.OPEN)
@mock.patch('oscar.apps.checkout.views.logger')
@mock.patch('oscar.apps.checkout.views.PaymentDetailsView.handle_payment')
def test_handles_bad_errors_during_payments(
self, mock_method, mock_logger):
e = Exception()
mock_method.side_effect = e
preview = self.ready_to_place_an_order()
response = preview.forms['place_order_form'].submit()
self.assertIsOk(response)
self.assertTrue(mock_logger.exception.called)
basket = Basket.objects.get()
self.assertEqual(basket.status, Basket.OPEN)
@mock.patch('oscar.apps.checkout.views.logger')
@mock.patch('oscar.apps.checkout.views.PaymentDetailsView.handle_order_placement')
def test_handles_unexpected_order_placement_errors_gracefully(
self, mock_method, mock_logger):
e = UnableToPlaceOrder()
mock_method.side_effect = e
preview = self.ready_to_place_an_order()
response = preview.forms['place_order_form'].submit()
self.assertIsOk(response)
self.assertTrue(mock_logger.error.called)
basket = Basket.objects.get()
self.assertEqual(basket.status, Basket.OPEN)
@mock.patch('oscar.apps.checkout.views.logger')
@mock.patch('oscar.apps.checkout.views.PaymentDetailsView.handle_order_placement')
def test_handles_all_other_exceptions_gracefully(self, mock_method, mock_logger):
mock_method.side_effect = Exception()
preview = self.ready_to_place_an_order()
response = preview.forms['place_order_form'].submit()
self.assertIsOk(response)
self.assertTrue(mock_logger.exception.called)
basket = Basket.objects.get()
self.assertEqual(basket.status, Basket.OPEN)
class PaymentDetailsPreviewViewPreConditionsMixin(PaymentDetailsViewPreConditionsMixin):
# Disable skip conditions, so that we do not first get redirected forwards
@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_payment_is_required')
@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_basket_requires_shipping')
@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.check_payment_data_is_captured')
def test_check_payment_data_is_captured(
self,
mock_check_payment_data_is_captured,
mock_skip_unless_basket_requires_shipping,
mock_skip_unless_payment_is_required,
):
mock_check_payment_data_is_captured.side_effect = FailedPreCondition(url=reverse('checkout:payment-details'))
response = self.ready_to_place_an_order()
self.assertRedirectsTo(response, 'checkout:payment-details')
class PaymentDetailsPreviewViewMixin(PaymentDetailsPreviewViewPreConditionsMixin):
def test_allows_order_to_be_placed(self):
self.add_product_to_basket()
if self.is_anonymous:
self.enter_guest_details()
self.enter_shipping_address()
payment_details = self.get(
reverse('checkout:shipping-method')).follow().follow()
preview = payment_details.click(linkid="view_preview")
preview.forms['place_order_form'].submit().follow()
self.assertEqual(1, Order.objects.all().count())
def test_payment_form_being_submitted_from_payment_details_view(self):
payment_details = self.reach_payment_details_page()
preview = payment_details.forms['sensible_data'].submit()
self.assertEqual(0, Order.objects.all().count())
preview.form.submit().follow()
self.assertEqual(1, Order.objects.all().count())
def test_handles_invalid_payment_forms(self):
payment_details = self.reach_payment_details_page()
form = payment_details.forms['sensible_data']
# payment forms should use the preview URL not the payment details URL
form.action = reverse('checkout:payment-details')
self.assertEqual(form.submit(status="*").status_code, http_client.BAD_REQUEST)
def test_placing_an_order_using_a_voucher_records_use(self):
self.add_product_to_basket()
self.add_voucher_to_basket()
if self.is_anonymous:
self.enter_guest_details()
self.enter_shipping_address()
thankyou = self.place_order()
order = thankyou.context['order']
self.assertEqual(1, order.discounts.all().count())
discount = order.discounts.all()[0]
voucher = discount.voucher
self.assertEqual(1, voucher.num_orders)
def test_placing_an_order_using_an_offer_records_use(self):
offer = factories.create_offer()
self.add_product_to_basket()
if self.is_anonymous:
self.enter_guest_details()
self.enter_shipping_address()
self.place_order()
# Reload offer
offer = ConditionalOffer.objects.get(id=offer.id)
self.assertEqual(1, offer.num_orders)
self.assertEqual(1, offer.num_applications)
|
lib/aws/s3/admin.py | goztrk/django-htk | 206 | 12701568 | # Django Imports
from django.contrib import admin
class S3MediaAssetAdmin(admin.ModelAdmin):
list_display = (
'id',
)
|
operational_analysis/toolkits/unit_conversion.py | NREL/wp3-precon | 123 | 12701585 | <gh_stars>100-1000
"""
This module provides basic methods for unit conversion and calculation of basic wind plant variables
"""
def convert_power_to_energy(power_col, sample_rate_min="10T"):
"""
Compute energy [kWh] from power [kw] and return the data column
Args:
df(:obj:`pandas.DataFrame`): the existing data frame to append to
col(:obj:`string`): Power column to use if not power_kw
sample_rate_min(:obj:`float`): Sampling rate in minutes to use for conversion, if not ten minutes
Returns:
:obj:`pandas.Series`: Energy in kWh that matches the length of the input data frame 'df'
"""
time_conversion = {"1T": 1.0, "5T": 5.0, "10T": 10.0, "30T": 30.0, "1H": 60.0}
energy_kwh = power_col * time_conversion[sample_rate_min] / 60.0
return energy_kwh
def compute_gross_energy(
net_energy, avail_losses, curt_losses, avail_type="frac", curt_type="frac"
):
"""
This function computes gross energy for a wind plant or turbine by adding reported availability and
curtailment losses to reported net energy. Account is made of whether availabilty or curtailment loss data
is reported in energy ('energy') or fractional units ('frac'). If in energy units, this function assumes that net
energy, availability loss, and curtailment loss are all reported in the same units
Args:
net energy (numpy array of Pandas series): reported net energy for wind plant or turbine
avail (numpy array of Pandas series): reported availability losses for wind plant or turbine
curt (numpy array of Pandas series): reported curtailment losses for wind plant or turbine
Returns:
gross (numpy array of Pandas series): calculated gross energy for wind plant or turbine
"""
if (avail_type == "frac") & (curt_type == "frac"):
gross = net_energy / (1 - avail_losses - curt_losses)
elif (avail_type == "frac") & (curt_type == "energy"):
gross = net_energy / (1 - avail_losses) + curt_losses
elif (avail_type == "energy") & (curt_type == "frac"):
gross = net_energy / (1 - curt_losses) + avail_losses
elif (avail_type == "energy") & (curt_type == "energy"):
gross = net_energy + curt_losses + avail_losses
if len(gross[gross < net_energy]) > 0:
raise Exception("Gross energy cannot be less than net energy. Check your input values")
if (len(avail_losses[avail_losses < 0]) > 0) | (len(curt_losses[curt_losses < 0]) > 0):
raise Exception(
"Cannot have negative availability or curtailment input values. Check your data"
)
return gross
def convert_feet_to_meter(variable):
"""
Compute variable in [meter] from [feet] and return the data column
Args:
df(:obj:`pandas.Series`): the existing data frame to append to
variable(:obj:`string`): variable in feet
Returns:
:obj:`pandas.Series`: variable in meters of the input data frame 'df'
"""
out = variable * 0.3048
return out
|
tests/resolution/check/test_check_dns_server_ipv6_error.py | janiversen/supervisor | 597 | 12701606 | """Test check DNS Servers for IPv6 errors."""
from unittest.mock import AsyncMock, call, patch
from aiodns.error import DNSError
import pytest
from supervisor.const import CoreState
from supervisor.coresys import CoreSys
from supervisor.resolution.checks.dns_server_ipv6_error import CheckDNSServerIPv6Errors
from supervisor.resolution.const import ContextType, IssueType
@pytest.fixture(name="dns_query")
async def fixture_dns_query() -> AsyncMock:
"""Mock aiodns query."""
with patch(
"supervisor.resolution.checks.dns_server_ipv6_error.DNSResolver.query",
new_callable=AsyncMock,
) as dns_query:
yield dns_query
async def test_base(coresys: CoreSys):
"""Test check basics."""
dns_server_ipv6_errors = CheckDNSServerIPv6Errors(coresys)
assert dns_server_ipv6_errors.slug == "dns_server_ipv6_error"
assert dns_server_ipv6_errors.enabled
async def test_check(coresys: CoreSys, dns_query: AsyncMock):
"""Test check for DNS server IPv6 errors."""
dns_server_ipv6_errors = CheckDNSServerIPv6Errors(coresys)
coresys.core.state = CoreState.RUNNING
coresys.plugins.dns.servers = ["dns://1.1.1.1"]
assert dns_server_ipv6_errors.dns_servers == [
"dns://1.1.1.1",
"dns://192.168.30.1",
]
assert len(coresys.resolution.issues) == 0
await dns_server_ipv6_errors.run_check.__wrapped__(dns_server_ipv6_errors)
assert dns_query.call_args_list == [
call("_checkdns.home-assistant.io", "AAAA"),
call("_checkdns.home-assistant.io", "AAAA"),
]
assert len(coresys.resolution.issues) == 0
dns_query.reset_mock()
coresys.plugins.dns.servers = []
assert dns_server_ipv6_errors.dns_servers == ["dns://192.168.30.1"]
dns_query.side_effect = DNSError(1, "DNS server returned answer with no data")
await dns_server_ipv6_errors.run_check.__wrapped__(dns_server_ipv6_errors)
dns_query.assert_called_once_with("_checkdns.home-assistant.io", "AAAA")
assert len(coresys.resolution.issues) == 0
dns_query.reset_mock()
dns_query.side_effect = DNSError(4, "Domain name not found")
await dns_server_ipv6_errors.run_check.__wrapped__(dns_server_ipv6_errors)
dns_query.assert_called_once_with("_checkdns.home-assistant.io", "AAAA")
assert len(coresys.resolution.issues) == 1
assert coresys.resolution.issues[0].type is IssueType.DNS_SERVER_IPV6_ERROR
assert coresys.resolution.issues[0].context is ContextType.DNS_SERVER
assert coresys.resolution.issues[0].reference == "dns://192.168.30.1"
async def test_approve(coresys: CoreSys, dns_query: AsyncMock):
"""Test approve existing DNS Server IPv6 error issues."""
dns_server_ipv6_errors = CheckDNSServerIPv6Errors(coresys)
coresys.core.state = CoreState.RUNNING
assert dns_server_ipv6_errors.dns_servers == ["dns://192.168.30.1"]
dns_query.side_effect = DNSError(4, "Domain name not found")
assert (
await dns_server_ipv6_errors.approve_check(reference="dns://1.1.1.1") is False
)
dns_query.assert_not_called()
assert (
await dns_server_ipv6_errors.approve_check(reference="dns://192.168.30.1")
is True
)
dns_query.assert_called_once_with("_checkdns.home-assistant.io", "AAAA")
dns_query.reset_mock()
dns_query.side_effect = DNSError(1, "DNS server returned answer with no data")
assert (
await dns_server_ipv6_errors.approve_check(reference="dns://192.168.30.1")
is False
)
dns_query.assert_called_once_with("_checkdns.home-assistant.io", "AAAA")
dns_query.reset_mock()
dns_query.side_effect = None
assert (
await dns_server_ipv6_errors.approve_check(reference="dns://192.168.30.1")
is False
)
dns_query.assert_called_once_with("_checkdns.home-assistant.io", "AAAA")
async def test_did_run(coresys: CoreSys):
"""Test that the check ran as expected."""
dns_server_ipv6_errors = CheckDNSServerIPv6Errors(coresys)
should_run = dns_server_ipv6_errors.states
should_not_run = [state for state in CoreState if state not in should_run]
assert should_run == [CoreState.RUNNING]
assert len(should_not_run) != 0
with patch.object(
CheckDNSServerIPv6Errors, "run_check", return_value=None
) as check:
for state in should_run:
coresys.core.state = state
await dns_server_ipv6_errors()
check.assert_called_once()
check.reset_mock()
for state in should_not_run:
coresys.core.state = state
await dns_server_ipv6_errors()
check.assert_not_called()
check.reset_mock()
async def test_check_if_affected(coresys: CoreSys):
"""Test that check is still executed even if already affected."""
dns_server_ipv6_errors = CheckDNSServerIPv6Errors(coresys)
coresys.core.state = CoreState.RUNNING
coresys.resolution.create_issue(
IssueType.DNS_SERVER_IPV6_ERROR,
ContextType.DNS_SERVER,
reference="dns://192.168.30.1",
)
assert len(coresys.resolution.issues) == 1
with patch.object(
CheckDNSServerIPv6Errors, "approve_check", return_value=True
) as approve, patch.object(
CheckDNSServerIPv6Errors, "run_check", return_value=None
) as check:
await dns_server_ipv6_errors()
approve.assert_called_once()
check.assert_called_once()
|
fix_old_file_list.py | hexahedria/gated-graph-transformer-network | 160 | 12701613 | import pickle
import os
import argparse
import sys
def main(task_dir, dry_run=False):
with open(os.path.join(task_dir,'file_list.p'),'rb') as f:
bucketed = pickle.load(f)
if dry_run:
print("Got {} (for example)".format(bucketed[0][0]))
bucketed = [['./bucket_' + x.split('bucket_')[1] for x in b] for b in bucketed]
if dry_run:
print("Converting to {} (for example)".format(bucketed[0][0]))
print("Will resolve to {} (for example)".format(os.path.normpath(os.path.join(task_dir,bucketed[0][0]))))
else:
with open(os.path.join(task_dir,'file_list.p'),'wb') as f:
pickle.dump(bucketed, f)
parser = argparse.ArgumentParser(description='Fix the file list of a parsed directory.')
parser.add_argument('task_dir', help="Directory of parsed files")
parser.add_argument('--dry-run', action="store_true", help="Don't overwrite files")
if __name__ == '__main__':
args = vars(parser.parse_args())
main(**args)
|
ssseg/modules/models/segmentors/gcnet/__init__.py | zhizhangxian/sssegmentation | 411 | 12701664 | '''initialize'''
from .gcnet import GCNet
from .contextblock import ContextBlock |
evaluation/print_scores.py | sedrickkeh/dart | 107 | 12701699 | <gh_stars>100-1000
import statistics
import sys
if __name__ == '__main__':
print('##################### Summary ##########################')
with open('bleu.txt') as f:
bleu = float(f.read().strip().split()[2].replace(',',''))
print("BLEU: {:.2f}".format(bleu))
with open('meteor.txt') as f:
meteor = float(f.readlines()[-1].strip().split()[-1])
print("METEOR: {:.2f}".format(meteor))
with open('ter.txt') as f:
ter = float(f.readlines()[-4].strip().split()[2])
print("TER: {:.2f}".format(ter))
with open('moverscore.txt') as f:
moverscore = float(f.readlines()[-1].strip())
print("MoverScore: {:.2f}".format(moverscore))
with open('bertscore.txt') as f:
bertscore = float(f.read().strip().split()[-1])
print("BERTScore F1: {:.2f}".format(bertscore))
with open('bleurt.txt') as f:
scores = [float(s) for s in f.readlines()]
bleurt = statistics.mean(scores)
print("BLEURT: {:.2f}".format(bleurt))
print(' & '.join(["{:.2f}".format(bleu), "{:.2f}".format(meteor), "{:.2f}".format(ter), "{:.2f}".format(moverscore), "{:.2f}".format(bertscore), "{:.2f}".format(bleurt)])) |
tests/utils/test_field_coverage.py | zanachka/spidermon | 405 | 12701705 | from spidermon.utils.field_coverage import calculate_field_coverage
def test_calculate_field_coverage_from_stats():
spider_stats = {
"finish_reason": "finished",
"spidermon_item_scraped_count": 100,
"spidermon_item_scraped_count/dict": 100,
"spidermon_item_scraped_count/dict/author": 100,
"spidermon_item_scraped_count/dict/author/author_url": 64,
"spidermon_item_scraped_count/dict/author/name": 100,
"spidermon_item_scraped_count/dict/quote": 50,
"spidermon_item_scraped_count/dict/tags": 100,
}
expected_coverage = {
"spidermon_field_coverage/dict/author": 1.0,
"spidermon_field_coverage/dict/author/author_url": 0.64,
"spidermon_field_coverage/dict/author/name": 1.0,
"spidermon_field_coverage/dict/quote": 0.5,
"spidermon_field_coverage/dict/tags": 1.0,
}
coverage = calculate_field_coverage(spider_stats)
assert coverage == expected_coverage
|
pixielib/models/FLAME.py | YuliangXiu/PIXIE | 196 | 12701734 | # -*- coding: utf-8 -*-
#
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# Using this computer program means that you agree to the terms
# in the LICENSE file included with this software distribution.
# Any use not explicitly granted by the LICENSE is prohibited.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# For comments or questions, please email us at <EMAIL>
# For commercial licensing contact, please contact <EMAIL>
import torch
import torch.nn as nn
import numpy as np
import pickle
import torch.nn.functional as F
class FLAMETex(nn.Module):
"""
FLAME texture:
https://github.com/TimoBolkart/TF_FLAME/blob/ade0ab152300ec5f0e8555d6765411555c5ed43d/sample_texture.py#L64
FLAME texture converted from BFM:
https://github.com/TimoBolkart/BFM_to_FLAME
"""
def __init__(self, config):
super(FLAMETex, self).__init__()
if config.tex_type == 'BFM':
mu_key = 'MU'
pc_key = 'PC'
n_pc = 199
tex_path = config.tex_path
tex_space = np.load(tex_path)
texture_mean = tex_space[mu_key].reshape(1, -1)
texture_basis = tex_space[pc_key].reshape(-1, n_pc)
elif config.tex_type == 'FLAME':
mu_key = 'mean'
pc_key = 'tex_dir'
n_pc = 200
tex_path = config.flame_tex_path
tex_space = np.load(tex_path)
texture_mean = tex_space[mu_key].reshape(1, -1)/255.
texture_basis = tex_space[pc_key].reshape(-1, n_pc)/255.
else:
print('texture type ', config.tex_type, 'not exist!')
raise NotImplementedError
n_tex = config.n_tex
num_components = texture_basis.shape[1]
texture_mean = torch.from_numpy(texture_mean).float()[None,...]
texture_basis = torch.from_numpy(texture_basis[:,:n_tex]).float()[None,...]
self.register_buffer('texture_mean', texture_mean)
self.register_buffer('texture_basis', texture_basis)
def forward(self, texcode=None):
'''
texcode: [batchsize, n_tex]
texture: [bz, 3, 256, 256], range: 0-1
'''
texture = self.texture_mean + (self.texture_basis*texcode[:,None,:]).sum(-1)
texture = texture.reshape(texcode.shape[0], 512, 512, 3).permute(0,3,1,2)
texture = F.interpolate(texture, [256, 256])
texture = texture[:,[2,1,0], :,:]
return texture
def texture_flame2smplx(cached_data, flame_texture, smplx_texture):
''' Convert flame texture map (face-only) into smplx texture map (includes body texture)
TODO: pytorch version ==> grid sample
'''
if smplx_texture.shape[0] != smplx_texture.shape[1]:
print('SMPL-X texture not squared (%d != %d)' % (smplx_texture[0], smplx_texture[1]))
return
if smplx_texture.shape[0] != cached_data['target_resolution']:
print('SMPL-X texture size does not match cached image resolution (%d != %d)' % (smplx_texture.shape[0], cached_data['target_resolution']))
return
x_coords = cached_data['x_coords']
y_coords = cached_data['y_coords']
target_pixel_ids = cached_data['target_pixel_ids']
source_uv_points = cached_data['source_uv_points']
source_tex_coords = np.zeros_like((source_uv_points)).astype(int)
source_tex_coords[:, 0] = np.clip(flame_texture.shape[0]*(1.0-source_uv_points[:,1]), 0.0, flame_texture.shape[0]).astype(int)
source_tex_coords[:, 1] = np.clip(flame_texture.shape[1]*(source_uv_points[:,0]), 0.0, flame_texture.shape[1]).astype(int)
smplx_texture[y_coords[target_pixel_ids].astype(int), x_coords[target_pixel_ids].astype(int), :] = flame_texture[source_tex_coords[:,0], source_tex_coords[:,1]]
return smplx_texture |
models/BERT/utils/__init__.py | hansheng0512/LateTemporalModeling3DCNN | 144 | 12701765 | <filename>models/BERT/utils/__init__.py
from .feed_forward import PositionwiseFeedForward
from .layer_norm import LayerNorm
from .sublayer import *
from .gelu import GELU
|
tests/test_linked.py | allanon/hyp | 105 | 12701776 | from hyp.schematics import Responder as SchematicsResponder
from fixtures import (
PostResponder,
PersonResponder,
PostSerializer,
)
class TestLinked(object):
def test_single(self):
author = {'id': 1, 'name': 'John'}
comments = [
{'id': 1, 'content': 'My comment'},
{'id': 2, 'content': 'Another comment'},
]
post = {'id': 1, 'title': 'My title', 'comments': comments, 'author': author}
response = PostResponder.build(post, linked={
'comments': comments, 'author': [author]
})
assert response == {
'posts': {
'id': 1,
'title': 'My title',
'links': {
'author': 1,
'comments': [1, 2],
}
},
'links': {
'posts.author': {
'href': 'http://example.com/people/{posts.author}',
'type': 'people',
},
'posts.comments': {
'href': 'http://example.com/comments/{posts.comments}',
'type': 'comments',
}
},
'linked': {
'comments': [
{'id': 1, 'content': 'My comment'},
{'id': 2, 'content': 'Another comment'},
],
'people': [
{'id': 1, 'name': 'John'},
]
}
}
def test_multiple_same_type(self):
class MultipleAuthorsResponder(SchematicsResponder):
TYPE = 'posts'
SERIALIZER = PostSerializer
LINKS = {
'author': {
'responder': PersonResponder,
'href': 'http://example.com/people/{posts.author}',
},
'coauthor': {
'responder': PersonResponder,
'href': 'http://example.com/people/{posts.author}',
},
}
author = {'id': 1, 'name': 'John'}
coauthor = {'id': 2, 'name': 'Lisa'}
post = {'id': 1, 'title': 'My title', 'author': author, 'coauthor': coauthor}
response = MultipleAuthorsResponder.build(post, linked={
'author': [author], 'coauthor': [coauthor]
})
assert len(response['linked']['people']) == 2
ids = [person['id'] for person in response['linked']['people']]
assert 1 in ids
assert 2 in ids
def test_custom_linked_key(self):
class CustomPostResponder(SchematicsResponder):
TYPE = 'posts'
SERIALIZER = PostSerializer
LINKS = {
'author': {
'responder': PersonResponder,
'href': 'http://example.com/people/{posts.author}',
'key': 'writer',
},
}
author = {'id': 1, 'name': 'John'}
post = {'id': 1, 'title': 'My title', 'writer': author}
response = CustomPostResponder.build(post, linked={
'author': [author],
})
assert response == {
'posts': {
'id': 1,
'title': 'My title',
'links': {
'author': 1,
}
},
'links': {
'posts.author': {
'href': 'http://example.com/people/{posts.author}',
'type': 'people',
}
},
'linked': {
'people': [
{'id': 1, 'name': 'John'},
],
}
}
|
contrib/libs/python/gen_includes.py | ZhekehZ/catboost | 6,989 | 12701821 | import sys
import os
import errno
from os import listdir
from os.path import dirname, relpath, join
def ensure_dir_exists(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def make_dir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def files(directory):
for dirpath, dirnames, filenames in os.walk(directory):
for name in filenames:
yield relpath(join(dirpath, name), directory)
def headers_set(directory):
return {
f for f in files(directory)
if f.endswith('.h') and not f.startswith('internal/')
}
if __name__ == "__main__":
python2_path = sys.argv[1]
python3_path = sys.argv[2]
output_path = sys.argv[3]
ensure_dir_exists(join('.', python2_path))
ensure_dir_exists(join('.', python3_path))
only_headers2 = headers_set(python2_path)
only_headers3 = headers_set(python3_path)
all_headers = only_headers2 | only_headers3
for header in all_headers:
path = join(output_path, header)
make_dir(dirname(path))
f = open(path, 'w')
f.write('#pragma once\n\n')
f.write('#ifdef USE_PYTHON3\n')
if (header in only_headers3):
f.write('#include <' + join(python3_path, header) + '>\n')
else:
f.write('#error "No <' + header + '> in Python3"\n')
f.write('#else\n')
if (header in only_headers2):
f.write('#include <' + join(python2_path, header) + '>\n')
else:
f.write('#error "No <' + header + '> in Python2"\n')
f.write('#endif\n')
|
.github/make_index.py | rafael-santiago/LIEF | 2,999 | 12701863 | <reponame>rafael-santiago/LIEF<filename>.github/make_index.py
""" Build index from directory listing
From: https://stackoverflow.com/questions/39048654/how-to-enable-directory-indexing-on-github-pages
make_index.py </path/to/directory>
"""
INDEX_TEMPLATE = r"""
<html>
<title>Links for lief</title>
<body>
<h1>Links for lief</h1>
% for name in names:
<a href="${base_url}/${base}/${name}">${name}</a><br />
% endfor
</body>
</html>
"""
EXCLUDED = ['index.html', '.gitkeep']
BASE_URL = "https://lief-project.github.io"
import os
import argparse
# May need to do "pip install mako"
from mako.template import Template
def main():
parser = argparse.ArgumentParser()
parser.add_argument("directory")
parser.add_argument("--base")
parser.add_argument("--output")
args = parser.parse_args()
fnames = [fname for fname in sorted(os.listdir(args.directory))
if fname not in EXCLUDED]
html = Template(INDEX_TEMPLATE).render(names=fnames, base_url=BASE_URL, base=args.base)
with open(args.output, "w") as f:
f.write(html)
if __name__ == '__main__':
main()
|
idaes/core/util/convergence/mpi_utils.py | carldlaird/idaes-pse | 112 | 12701881 | #################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
from collections import OrderedDict
import importlib
"""
This module is a collection of classes that provide a
friendlier interface to MPI (through mpi4py). They help
allocate local tasks/data from global tasks/data and gather
global data (from all processors).
Although general, this module was only implemented to
work with the convergence evaluation framework. More work
is needed to make this appropriate for general use.
"""
class MPIInterface:
__have_mpi__ = None
def __init__(self):
if MPIInterface.__have_mpi__ is None:
# This is trying to import mpy4py.MPI, and setting a flag to indicate
# if it succeeds or not.
# we do this here instead of at the module level, because we only want
# to do the import if an MPIInterface is ever requested.
try:
# try the import (the 'globals()' makes sure it is imported
# in the module space and not local to the __init__ method)
globals()['MPI'] = importlib.import_module('mpi4py.MPI')
# import succeeded
MPIInterface.__have_mpi__ = True
except:
# import failed (e.g., no mpi4py installed)
MPIInterface.__have_mpi__ = False
self._comm = None
self._size = None
self._rank = None
if self.have_mpi:
self._comm = MPI.COMM_WORLD # pylint: disable=undefined-variable
self._size = self._comm.Get_size()
self._rank = self._comm.Get_rank()
@property
def have_mpi(self):
assert MPIInterface.__have_mpi__ is not None
return MPIInterface.__have_mpi__
@property
def comm(self):
return self._comm
@property
def rank(self):
return self._rank
@property
def size(self):
return self._size
class ParallelTaskManager:
def __init__(self, n_total_tasks, mpi_interface=None):
if mpi_interface is None:
self._mpi_interface = MPIInterface()
else:
self._mpi_interface = mpi_interface
self._n_total_tasks = n_total_tasks
if not self._mpi_interface.have_mpi:
self._local_map = range(n_total_tasks)
else:
rank = self._mpi_interface.rank
size = self._mpi_interface.size
# there must be a better way to do this
# find which entries in global correspond
# to this process (want them to be contiguous
# for the MPI Allgather calls later
local_N = [0 for i in range(self._mpi_interface.size)]
for i in range(n_total_tasks):
process_i = i % size
local_N[process_i] += 1
start = 0
end = None
for i, v in enumerate(local_N):
if i == self._mpi_interface.rank:
end = start + v
break
else:
start += v
self._local_map = list(range(start, end))
def is_root(self):
if not self._mpi_interface.have_mpi or self._mpi_interface.rank == 0:
return True
return False
# ToDo: fix the parallel task manager to handle dictionaries as well as lists
def global_to_local_data(self, global_data):
if type(global_data) is list:
local_data = list()
assert (len(global_data) == self._n_total_tasks)
for i in self._local_map:
local_data.append(global_data[i])
return local_data
elif type(global_data) is OrderedDict:
local_data = OrderedDict()
assert (len(global_data) == self._n_total_tasks)
idx = 0
for k,v in global_data.items():
if idx in self._local_map:
local_data[k] = v
idx += idx
return local_data
raise ValueError('Unknown type passed to global_to_local_data. Expected list or OrderedDict.')
def allgather_global_data(self, local_data):
assert (len(local_data) == len(self._local_map))
if not self._mpi_interface.have_mpi:
return list(local_data)
comm = self._mpi_interface.comm
global_data_list = comm.allgather(local_data)
# PYLINT-TODO-FIX fix the error due to the
# non-existing global_data_list_of_lists variable
# pylint: disable=undefined-variable
return self._stack_global_data(global_data_list_of_lists)
def gather_global_data(self, local_data):
assert (len(local_data) == len(self._local_map))
if not self._mpi_interface.have_mpi:
return list(local_data)
comm = self._mpi_interface.comm
global_data_list_of_lists = comm.gather(local_data)
if global_data_list_of_lists is not None:
return self._stack_global_data(global_data_list_of_lists)
assert self.is_root() == False
return None
def _stack_global_data(self, global_data_list_of_lists):
# stack the list of lists into one global data list
# ToDo: test that this is equivalent to [d for sublist in global_data_list_of_lists for d in sublist]
global_data = list()
for i in range(self._mpi_interface.size):
global_data.extend(global_data_list_of_lists[i])
return global_data
|
terrascript/data/Trois_Six/sendgrid.py | mjuenema/python-terrascript | 507 | 12701900 | <gh_stars>100-1000
# terrascript/data/Trois-Six/sendgrid.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:26:45 UTC)
__all__ = []
|
nautobot/extras/tests/dummy_jobs/test_field_order.py | psmware-ltd/nautobot | 384 | 12701911 | from nautobot.extras.jobs import Job, FileVar, StringVar
class TestFieldOrder(Job):
"""My job demo."""
var23 = StringVar(description="I want to be second")
var2 = StringVar(description="Hello")
var1 = FileVar(description="Some file wants to be first")
class Meta:
"""Metaclass attrs."""
field_order = ["var1", "var2", "var23"]
|
tools/enasearch/generate_macros.py | ic4f/tools-iuc | 142 | 12701950 | #!/usr/bin/env python
import enasearch
spaces = ' '
operator_names = {
"=": "equal",
"!=": "different",
"<": "lower",
"<=": "equal or lower",
">": "higher",
">=": "equal or higher",
}
def format_name(name, alternative_name):
"""
Format name to remove None name and & in name
"""
if name is None:
name = alternative_name
name = name.replace("&", "and")
return name
def sort_by_name(dict):
"""
Sort a dictionary on the values
"""
return sorted(dict, key=dict.get)
def write_analysis_fields():
"""
Write the analysis fields
"""
s = '%s<xml name="analysis_fields">\n' % (spaces)
fields = enasearch.get_returnable_fields(result="analysis", verbose=False)
for f in fields:
s += '%s<option value="%s">%s</option>\n' % (2 * spaces, f, f)
s += '%s</xml>\n' % (spaces)
return s
def write_display_options():
"""
Write the display options
"""
s = '%s<xml name="display_options">\n' % (spaces)
when_s = '%s<xml name="when_display_options">\n' % (spaces)
options = enasearch.get_display_options(verbose=False)
for opt in options:
s += '%s<option value="%s">%s</option>\n' % (2 * spaces, opt, options[opt]['description'])
when_s += '%s<when value="%s">\n' % (2 * spaces, opt)
if opt == 'fasta' or opt == 'fastq':
when_s += '%s<param name="range_start" argument="--subseq_range" type="integer" optional="true" label="Start integer for subsequences"/>\n' % (3 * spaces)
when_s += '%s<param name="range_stop" argument="--subseq_range" type="integer" optional="true" label="Stop integer for subsequences"/>\n' % (3 * spaces)
else:
when_s += '%s<param argument="--offset" type="integer" optional="true" label="First record to get"/>\n' % (3 * spaces)
when_s += '%s<param argument="--length" type="integer" optional="true" label="Number of records to retrieve"/>\n' % (3 * spaces)
when_s += '%s</when>\n' % (2 * spaces)
s += '%s</xml>\n' % (spaces)
when_s += '%s</xml>\n' % (spaces)
s += when_s
return s
def write_run_fields():
"""
Write the run fields
"""
s = '%s<xml name="run_fields">\n' % (spaces)
fields = enasearch.get_returnable_fields(result="read_run", verbose=False)
for f in fields:
s += '%s<option value="%s">%s</option>\n' % (2 * spaces, f, f)
s += '%s</xml>\n' % (spaces)
return s
def write_taxonomy_results():
"""
Write the possible taxonomy results
"""
s = '%s<xml name="taxonomy_results">\n' % (spaces)
fields = enasearch.get_taxonomy_results(verbose=False)
for f in fields:
s += '%s<option value="%s">%s</option>\n' % (2 * spaces, f, fields[f]['description'])
s += '%s</xml>\n' % (spaces)
return s
def write_result_parameters(fts=False):
"""
Write the parameters that are dependant of results
"""
res = enasearch.get_results(verbose=False)
options = enasearch.get_display_options(verbose=False)
ft = enasearch.get_filter_types(verbose=False)
# Format the filter type related parameters
ft_parameters = {}
for t in ft:
s = ''
if 'operators' in ft[t]:
s = '%s<param name="operation" type="select" label="Operator">\n' % (7 * spaces)
for o in ft[t]['operators']:
on = o
if o in operator_names:
on = operator_names[o]
s += '%s<option value="%s">%s</option>\n' % (8 * spaces, on, on)
s += '%s</param>\n' % (7 * spaces)
if 'value' in ft[t]:
value_format = 'float' if t == 'Number' else 'text'
s += '%s<param name="value" type="%s" value="" label="%s"/>\n' % (7 * spaces, value_format, ft[t]['value'])
elif 'values' in ft[t]:
s += '%s<param name="value" type="select" label="Value">\n' % (7 * spaces)
for v in ft[t]['values']:
s += '%s<option value="%s">%s</option>\n' % (8 * spaces, v, v)
s += '%s</param>\n' % (7 * spaces)
else:
s += '%s<conditional name="op">\n' % (7 * spaces)
s += '%s<param name="operation" type="select" label="Operation">\n' % (8 * spaces)
for op in ft[t]:
s += '%s<option value="%s">%s</option>\n' % (9 * spaces, op, ft[t][op]['description'])
s += '%s</param>\n' % (8 * spaces)
for op in ft[t]:
s += '%s<when value="%s">\n' % (8 * spaces, op)
s += '%s<param name="values" type="text" value="" label="%s" help="Values separated by simple comma"/>\n' % (9 * spaces, ",".join(ft[t][op]['parameters']))
s += '%s</when>\n' % (8 * spaces)
s += '%s</conditional>\n' % (7 * spaces)
ft_parameters[t] = s
# Start adding the conditional
s = '%s<conditional name="res">\n' % (2 * spaces)
# Add result parameter
s += '%s<param argument="--result" type="select" label="Result to return">\n' % (3 * spaces)
for r in res:
s += '%s<option value="%s">%s</option>\n' % (4 * spaces, r, res[r]['description'])
s += '%s</param>\n' % (3 * spaces)
for r in res:
sf = enasearch.get_sortable_fields(r)
ff = res[r]['filter_fields']
s += '%s<when value="%s">\n' % (3 * spaces, r)
if not fts:
s += '%s<repeat name="queries" title="Add a query">\n' % (4 * spaces)
# Add combination operator
s += '%s<param name="combination_operation" type="select" label="Combination operation">\n' % (5 * spaces)
s += '%s<option value="AND">AND</option>\n' % (6 * spaces)
s += '%s<option value="OR">OR</option>\n' % (6 * spaces)
s += '%s<option value="NOT">NOT</option>\n' % (6 * spaces)
s += '%s</param>\n' % (5 * spaces)
s += '%s<conditional name="filter_field">\n' % (5 * spaces)
s += '%s<param name="field" type="select" label="Field to query">\n' % (6 * spaces)
for f in ff:
s += '%s<option value="%s">%s</option>\n' % (7 * spaces, f, ff[f]['description'])
s += '%s</param>\n' % (6 * spaces)
for f in ff:
# Add the correct parameter given the type of field
typ = ff[f]['type'].capitalize()
if typ not in ft_parameters:
if f == 'location':
typ = 'Geospatial'
else:
continue
s += '%s<when value="%s">\n' % (6 * spaces, f)
s += ft_parameters[typ]
s += '%s</when>\n' % (6 * spaces)
s += '%s</conditional>\n' % (5 * spaces)
s += '%s</repeat>\n' % (4 * spaces)
# Add display opt
s += '%s<conditional name="display_opt">\n' % (4 * spaces)
s += '%s<param argument="--display" type="select" label="Display option to specify the display format">\n' % (5 * spaces)
s += '%s<expand macro="display_options"/>\n' % (6 * spaces)
s += '%s</param>\n' % (5 * spaces)
for opt in options:
s += '%s<when value="%s"' % (5 * spaces, opt)
if opt != 'fasta' and opt != 'fastq':
s += '>\n'
s += '%s<param argument="--offset" type="integer" optional="true" label="First record to get"/>\n' % (6 * spaces)
s += '%s<param argument="--length" type="integer" optional="true" label="Number of records to retrieve"/>\n' % (6 * spaces)
if opt == 'report':
s += '%s<param argument="--fields" type="select" multiple="true" label="Fields to return">\n' % (6 * spaces)
for f in res[r]['returnable_fields']:
s += '%s<option value="%s">%s</option>\n' % (7 * spaces, f, f)
s += '%s</param>\n' % (6 * spaces)
s += '%s<param argument="--sortfields" type="select" optional="true" multiple="true" label="Fields to sort the results">\n' % (6 * spaces)
for f in sf:
s += '%s<option value="%s">%s</option>\n' % (7 * spaces, f, sf[f]['description'])
s += '%s</param>\n' % (6 * spaces)
s += '%s</when>\n' % (5 * spaces)
else:
s += '/>\n'
s += '%s</conditional>\n' % (4 * spaces)
s += '%s</when>\n' % (3 * spaces)
s += '%s</conditional>\n' % (2 * spaces)
return s
def write_search_data_parameters():
"""
Write the parameters for search_data
"""
fts = '%s<xml name="free_text_search">\n' % (spaces)
fts += write_result_parameters(True)
fts += '%s</xml>\n' % (spaces)
cts = '%s<xml name="conditional_text_search">\n' % (spaces)
cts += write_result_parameters(False)
cts += '%s</xml>\n' % (spaces)
return fts + cts
def generate_search_macros(filepath):
"""
Generate the content of the macro file
"""
s = '<?xml version="1.0" ?>\n'
s += '<macros>\n'
s += write_analysis_fields()
s += write_display_options()
s += write_run_fields()
s += write_taxonomy_results()
s += write_search_data_parameters()
s += '</macros>\n'
with open(filepath, "w") as file:
file.write(s)
if __name__ == '__main__':
generate_search_macros("search_macros.xml")
|
py2app_tests/test_pkg_script.py | sorphin/py2app | 193 | 12701956 | <filename>py2app_tests/test_pkg_script.py
"""
Test case for a project that includes a script that has the same
base-name as a package used by the script.
"""
import sys
if (sys.version_info[0] == 2 and sys.version_info[:2] >= (2,7)) or \
(sys.version_info[0] == 3 and sys.version_info[:2] >= (3,2)):
import unittest
else:
import unittest2 as unittest
import subprocess
import shutil
import time
import os
import signal
import py2app
import zipfile
from .tools import kill_child_processes
DIR_NAME=os.path.dirname(os.path.abspath(__file__))
class TestBasicApp (unittest.TestCase):
py2app_args = []
python_args = []
app_dir = os.path.join(DIR_NAME, 'pkg_script_app')
# Basic setup code
#
# The code in this block needs to be moved to
# a base-class.
@classmethod
def setUpClass(cls):
kill_child_processes()
env=os.environ.copy()
env['TMPDIR'] = os.getcwd()
pp = os.path.dirname(os.path.dirname(py2app.__file__))
if 'PYTHONPATH' in env:
env['PYTHONPATH'] = pp + ':' + env['PYTHONPATH']
else:
env['PYTHONPATH'] = pp
if 'LANG' not in env:
# Ensure that testing though SSH works
env['LANG'] = 'en_US.UTF-8'
p = subprocess.Popen([
sys.executable ] + cls.python_args + [
'setup.py', 'py2app'] + cls.py2app_args,
cwd = cls.app_dir,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=False,
env=env
)
lines = p.communicate()[0]
if p.wait() != 0:
print (lines)
raise AssertionError("Creating basic_app bundle failed")
@classmethod
def tearDownClass(cls):
if os.path.exists(os.path.join(cls.app_dir, 'build')):
shutil.rmtree(os.path.join(cls.app_dir, 'build'))
if os.path.exists(os.path.join(cls.app_dir, 'dist')):
shutil.rmtree(os.path.join(cls.app_dir, 'dist'))
time.sleep(2)
def tearDown(self):
kill_child_processes()
time.sleep(1)
def start_app(self):
# Start the test app, return a subprocess object where
# stdin and stdout are connected to pipes.
path = os.path.join(
self.app_dir,
'dist/quot.app/Contents/MacOS/quot')
p = subprocess.Popen([path],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=False,
)
#stderr=subprocess.STDOUT)
return p
def wait_with_timeout(self, proc, timeout=10):
for i in range(timeout):
x = proc.poll()
if x is None:
time.sleep(1)
else:
return x
os.kill(proc.pid, signal.SIGKILL)
return proc.wait()
#
# End of setup code
#
def test_basic_start(self):
p = self.start_app()
p.stdin.close()
exit = self.wait_with_timeout(p)
self.assertEqual(exit, 0)
p.stdout.close()
def test_simple_imports(self):
p = self.start_app()
p.stdin.write(("print(%r in sys.path)\n"%(
os.path.join(self.app_dir, 'dist/quot.app/Contents/Resources'),)).encode('latin1'))
p.stdin.flush()
ln = p.stdout.readline()
self.assertEqual(ln.strip(), b"False")
# Basic module that is always present:
p.stdin.write('import_module("os")\n'.encode('latin1'))
p.stdin.flush()
ln = p.stdout.readline()
self.assertEqual(ln.strip(), b"os")
# Dependency of the main module:
p.stdin.write('import_module("quot")\n'.encode('latin1'))
p.stdin.flush()
ln = p.stdout.readline()
self.assertEqual(ln.strip(), b"quot")
# - verify that the right one gets loaded
if '--alias' not in self.py2app_args:
p.stdin.write('import quot;print(quot.__file__)\n'.encode('latin1'))
p.stdin.flush()
ln = p.stdout.readline()
self.assertTrue(b"Contents/Resources/lib" in ln.strip())
p.stdin.write('import_module("quot.queue")\n'.encode('latin1'))
p.stdin.flush()
ln = p.stdout.readline()
self.assertEqual(ln.strip(), b"quot.queue")
p.stdin.close()
p.stdout.close()
self.wait_with_timeout(p)
def test_zip_contents(self):
if '--alias' in self.py2app_args:
raise unittest.SkipTest("Not relevant for Alias builds")
dirpath = os.path.join(self.app_dir, 'dist/quot.app/Contents')
zfpath = os.path.join(dirpath, 'Resources/lib/python%d%d.zip'%(
sys.version_info[:2]))
if not os.path.exists(zfpath):
zfpath = os.path.join(dirpath, 'Resources/lib/python%d.%d/site-packages.zip'%(
sys.version_info[:2]))
if not os.path.exists(zfpath):
zfpath = os.path.join(dirpath, 'Resources/lib/site-packages.zip')
if not os.path.exists(zfpath):
self.fail("Cannot locate embedded zipfile")
zf = zipfile.ZipFile(zfpath, 'r')
for nm in ('quot.py', 'quot.pyc', 'quot.pyo'):
try:
zf.read(nm)
self.fail("'quot' module is in the zipfile")
except KeyError:
pass
class TestBasicAliasApp (TestBasicApp):
py2app_args = [ '--alias', ]
class TestBasicSemiStandaloneApp (TestBasicApp):
py2app_args = [ '--semi-standalone', ]
if __name__ == "__main__":
unittest.main()
|
src/orion/algo/evolution_es.py | nurbal/orion | 177 | 12701962 | # -*- coding: utf-8 -*-
"""
The Evolved Transformer and large-scale evolution of image classifiers
======================================================================
Implement evolution to exploit configurations with fixed resource efficiently
"""
import copy
import importlib
import logging
import numpy as np
from orion.algo.hyperband import Hyperband, HyperbandBracket
from orion.core.utils import format_trials
logger = logging.getLogger(__name__)
REGISTRATION_ERROR = """
Bad fidelity level {fidelity}. Should be in {budgets}.
Params: {params}
"""
SPACE_ERROR = """
EvolutionES cannot be used if space does not contain a fidelity dimension.
"""
BUDGET_ERROR = """
Cannot build budgets below max_resources;
(max: {}) - (min: {}) > (num_rungs: {})
"""
def compute_budgets(
min_resources, max_resources, reduction_factor, nums_population, pairs
):
"""Compute the budgets used for each execution of hyperband"""
budgets_eves = []
if reduction_factor == 1:
for i in range(min_resources, max_resources + 1):
if i == min_resources:
budgets_eves.append([(nums_population, i)])
else:
budgets_eves[0].append((pairs * 2, i))
else:
num_brackets = int(np.log(max_resources) / np.log(reduction_factor))
budgets = []
budgets_tab = {} # just for display consideration
for bracket_id in range(0, num_brackets + 1):
bracket_budgets = []
num_trials = int(
np.ceil(
int((num_brackets + 1) / (num_brackets - bracket_id + 1))
* (reduction_factor ** (num_brackets - bracket_id))
)
)
min_resources = max_resources / reduction_factor ** (
num_brackets - bracket_id
)
for i in range(0, num_brackets - bracket_id + 1):
n_i = int(num_trials / reduction_factor ** i)
min_i = int(min_resources * reduction_factor ** i)
bracket_budgets.append((n_i, min_i))
if budgets_tab.get(i):
budgets_tab[i].append((n_i, min_i))
else:
budgets_tab[i] = [(n_i, min_i)]
budgets.append(bracket_budgets)
for i in range(len(budgets[0])):
if i == 0:
budgets_eves.append([(nums_population, budgets[0][i][1])])
else:
budgets_eves[0].append((pairs * 2, budgets[0][i][1]))
return budgets_eves
class EvolutionES(Hyperband):
"""EvolutionES formulates hyperparameter optimization as an evolution.
For more information on the algorithm,
see original paper at
https://arxiv.org/pdf/1703.01041.pdf and
https://arxiv.org/pdf/1901.11117.pdf
Real et al. "Large-Scale Evolution of Image Classifiers"
So et all. "The Evolved Transformer"
Parameters
----------
space: `orion.algo.space.Space`
Optimisation space with priors for each dimension.
seed: None, int or sequence of int
Seed for the random number generator used to sample new trials.
Default: ``None``
repetitions: int
Number of execution of Hyperband. Default is numpy.inf which means to
run Hyperband until no new trials can be suggested.
nums_population: int
Number of population for EvolutionES. Larger number of population often gets better
performance but causes more computation. So there is a trade-off according to the search
space and required budget of your problems.
Default: 20
mutate: str or None, optional
In the mutate part, one can define the customized mutate function with its mutate factors,
such as multiply factor (times/divides by a multiply factor) and add factor
(add/subtract by a multiply factor). The function must be defined by
an importable string. If None, default
mutate function is used: ``orion.algo.mutate_functions.default_mutate``.
"""
requires_type = None
requires_dist = None
requires_shape = "flattened"
def __init__(
self,
space,
seed=None,
repetitions=np.inf,
nums_population=20,
mutate=None,
max_retries=1000,
):
super(EvolutionES, self).__init__(space, seed=seed, repetitions=repetitions)
pair = nums_population // 2
mutate_ratio = 0.3
self.nums_population = nums_population
self.nums_comp_pairs = pair
self.max_retries = max_retries
self.mutate_ratio = mutate_ratio
self.mutate = mutate
self.nums_mutate_gene = (
int((len(self.space.values()) - 1) * mutate_ratio)
if int((len(self.space.values()) - 1) * mutate_ratio) > 0
else 1
)
self._param_names += ["nums_population", "mutate", "max_retries"]
self.hurdles = []
self.population = {}
for i, dim in enumerate(self.space.values()):
if dim.type != "fidelity":
self.population[i] = [-1] * nums_population
self.performance = np.inf * np.ones(nums_population)
self.budgets = compute_budgets(
self.min_resources,
self.max_resources,
self.reduction_factor,
nums_population,
pair,
)
self.brackets = [
BracketEVES(self, bracket_budgets, 1) for bracket_budgets in self.budgets
]
self.seed_rng(seed)
@property
def state_dict(self):
"""Return a state dict that can be used to reset the state of the algorithm."""
state_dict = super(EvolutionES, self).state_dict
state_dict["population"] = copy.deepcopy(self.population)
state_dict["performance"] = copy.deepcopy(self.performance)
state_dict["hurdles"] = copy.deepcopy(self.hurdles)
return state_dict
def set_state(self, state_dict):
"""Reset the state of the algorithm based on the given state_dict"""
super(EvolutionES, self).set_state(state_dict)
self.population = state_dict["population"]
self.performance = state_dict["performance"]
self.hurdles = state_dict["hurdles"]
def _get_bracket(self, trial):
"""Get the bracket of a trial during observe"""
return self.brackets[-1]
class BracketEVES(HyperbandBracket):
"""Bracket of rungs for the algorithm Hyperband.
Parameters
----------
evolutiones: `evolutiones` algorithm
The evolutiones algorithm object which this bracket will be part of.
budgets: list of tuple
Each tuple gives the (n_trials, resource_budget) for the respective rung.
repetition_id: int
The id of hyperband execution this bracket belongs to
"""
def __init__(self, evolution_es, budgets, repetition_id):
super(BracketEVES, self).__init__(evolution_es, budgets, repetition_id)
self.eves = self.hyperband
self.search_space_without_fidelity = []
self._candidates = {}
if evolution_es.mutate:
self.mutate_attr = copy.deepcopy(evolution_es.mutate)
else:
self.mutate_attr = {}
function_string = self.mutate_attr.pop(
"function", "orion.algo.mutate_functions.default_mutate"
)
mod_name, func_name = function_string.rsplit(".", 1)
mod = importlib.import_module(mod_name)
self.mutate_func = getattr(mod, func_name)
for i, dim in enumerate(self.space.values()):
if dim.type != "fidelity":
self.search_space_without_fidelity.append(i)
@property
def space(self):
return self.eves.space
@property
def state_dict(self):
state_dict = super(BracketEVES, self).state_dict
state_dict["candidates"] = copy.deepcopy(self._candidates)
return state_dict
def set_state(self, state_dict):
super(BracketEVES, self).set_state(state_dict)
self._candidates = state_dict["candidates"]
def _get_teams(self, rung_id):
"""Get the red team and blue team"""
if self.has_rung_filled(rung_id + 1):
return []
rung = self.rungs[rung_id]["results"]
population_range = (
self.eves.nums_population
if len(list(rung.values())) > self.eves.nums_population
else len(list(rung.values()))
)
rung_trials = list(rung.values())
for trial_index in range(population_range):
objective, trial = rung_trials[trial_index]
self.eves.performance[trial_index] = objective
for ith_dim in self.search_space_without_fidelity:
self.eves.population[ith_dim][trial_index] = trial.params[
self.space[ith_dim].name
]
population_index = list(range(self.eves.nums_population))
red_team = self.eves.rng.choice(
population_index, self.eves.nums_comp_pairs, replace=False
)
diff_list = list(set(population_index).difference(set(red_team)))
blue_team = self.eves.rng.choice(
diff_list, self.eves.nums_comp_pairs, replace=False
)
return rung, population_range, red_team, blue_team
def _mutate_population(self, red_team, blue_team, rung, population_range, fidelity):
"""Get the mutated population and hurdles"""
winner_list = []
loser_list = []
if set(red_team) != set(blue_team):
hurdles = 0
for i, _ in enumerate(red_team):
winner, loser = (
(red_team, blue_team)
if self.eves.performance[red_team[i]]
< self.eves.performance[blue_team[i]]
else (blue_team, red_team)
)
winner_list.append(winner[i])
loser_list.append(loser[i])
hurdles += self.eves.performance[winner[i]]
self._mutate(winner[i], loser[i])
hurdles /= len(red_team)
self.eves.hurdles.append(hurdles)
logger.debug("Evolution hurdles are: %s", str(self.eves.hurdles))
trials = []
trial_ids = set()
nums_all_equal = [0] * population_range
for i in range(population_range):
point = [0] * len(self.space)
while True:
point = list(point)
point[
list(self.space.keys()).index(self.eves.fidelity_index)
] = fidelity
for j in self.search_space_without_fidelity:
point[j] = self.eves.population[j][i]
trial = format_trials.tuple_to_trial(point, self.space)
trial = self.eves.format_trial(trial)
trial_id = self.eves.get_id(trial)
if trial_id in trial_ids:
nums_all_equal[i] += 1
logger.debug("find equal one, continue to mutate.")
self._mutate(i, i)
elif self.eves.has_suggested(trial):
nums_all_equal[i] += 1
logger.debug("find one already suggested, continue to mutate.")
self._mutate(i, i)
else:
break
if nums_all_equal[i] > self.eves.max_retries:
logger.warning(
"Can not Evolve any more. You can make an early stop."
)
break
if nums_all_equal[i] < self.eves.max_retries:
trials.append(trial)
trial_ids.add(trial_id)
else:
logger.debug("Dropping trial %s", trial)
return trials, np.array(nums_all_equal)
def get_candidates(self, rung_id):
"""Get a candidate for promotion"""
if rung_id not in self._candidates:
rung, population_range, red_team, blue_team = self._get_teams(rung_id)
fidelity = self.rungs[rung_id + 1]["resources"]
self._candidates[rung_id] = self._mutate_population(
red_team, blue_team, rung, population_range, fidelity
)[0]
candidates = []
for candidate in self._candidates[rung_id]:
if not self.eves.has_suggested(candidate):
candidates.append(candidate)
return candidates
def _mutate(self, winner_id, loser_id):
select_genes_key_list = self.eves.rng.choice(
self.search_space_without_fidelity,
self.eves.nums_mutate_gene,
replace=False,
)
self.copy_winner(winner_id, loser_id)
kwargs = copy.deepcopy(self.mutate_attr)
for i, _ in enumerate(select_genes_key_list):
space = self.space.values()[select_genes_key_list[i]]
old = self.eves.population[select_genes_key_list[i]][loser_id]
new = self.mutate_func(space, self.eves.rng, old, **kwargs)
self.eves.population[select_genes_key_list[i]][loser_id] = new
self.eves.performance[loser_id] = -1
def copy_winner(self, winner_id, loser_id):
"""Copy winner to loser"""
for key in self.search_space_without_fidelity:
self.eves.population[key][loser_id] = self.eves.population[key][winner_id]
|
furniture/env/models/grippers/__init__.py | KejiaChen/assembly | 364 | 12701980 | <filename>furniture/env/models/grippers/__init__.py
from .gripper import Gripper
from .gripper_factory import gripper_factory
from .two_finger_gripper import TwoFingerGripper, LeftTwoFingerGripper
from .pr2_gripper import PR2Gripper
from .pushing_gripper import PushingGripper
from .robotiq_gripper import RobotiqGripper
from .robotiq_three_finger_gripper import RobotiqThreeFingerGripper
from .panda_gripper import PandaGripper
from .jaco_gripper import JacoGripper
from .fetch_gripper import FetchGripper
|
section_10_(dictionaries)/dict_get.py | hlcooll/python_lessons | 425 | 12702024 | # If you're new to dictionaries, you might want to start with dict_access.py
# We create a dictionary.
contacts = {
'Shannon': '202-555-1234',
'Amy': '410-515-3000',
'Jen': '301-600-5555',
'Julie': '202-333-9876'
}
name = raw_input("Enter the name of the person whose phone number you want: ")
print "We will get a KeyError if you entered a name that wasn't in the dictionary."
print "{0}'s number is: {1}".format(name, contacts[name])
print "But there's a way we don't need to worry about KeyErrors."
name = raw_input("Enter the name of the person whose phone number you want ... might I suggest Frankenstein? ")
# .get() is a dictionary method that lets us safely access a dictionary even if that key doesn't exist.
print "{0}'s number is ... {1}".format(name, contacts.get(name, " ... I couldn't find it!")) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.