id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
6698192
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module holds data for financing statement trust indenture information."""
from __future__ import annotations
from .db import db
TRUST_INDENTURE_YES = 'Y'
TRUST_INDENTURE_NO = 'N'
class TrustIndenture(db.Model): # pylint: disable=too-many-instance-attributes
"""This class manages all of the financing statement trust indenture information."""
__tablename__ = 'trust_indentures'
id = db.Column('id', db.Integer, db.Sequence('trust_id_seq'), primary_key=True)
trust_indenture = db.Column('trust_indenture', db.String(1), nullable=False)
# parent keys
registration_id = db.Column('registration_id', db.Integer, db.ForeignKey('registrations.id'), nullable=False,
index=True)
financing_id = db.Column('financing_id', db.Integer, db.ForeignKey('financing_statements.id'), nullable=False,
index=True)
registration_id_end = db.Column('registration_id_end', db.Integer, nullable=True,
index=True)
# db.ForeignKey('registration.registration_id'), nullable=True)
# Relationships - Registration
registration = db.relationship('Registration', foreign_keys=[registration_id],
back_populates='trust_indenture', cascade='all, delete', uselist=False)
# registration_end = db.relationship("Registration", foreign_keys=[registration_id_end])
# Relationships - FinancingStatement
financing_statement = db.relationship('FinancingStatement', foreign_keys=[financing_id],
back_populates='trust_indenture', cascade='all, delete', uselist=False)
@classmethod
def find_by_id(cls, trust_id: int = None):
"""Return a trust indenture object by expiry ID."""
trust_indenture = None
if trust_id:
trust_indenture = cls.query.get(trust_id)
return trust_indenture
@classmethod
def find_by_registration_id(cls, registration_id: int):
"""Return a list of trust indenture objects by registration number."""
trust_indenture = None
if registration_id:
trust_indenture = cls.query.filter(TrustIndenture.registration_id == registration_id) \
.order_by(TrustIndenture.id).one_or_none()
return trust_indenture
@classmethod
def find_by_financing_id(cls, financing_id: int):
"""Return a list of trust indenture objects by financing statement ID."""
trust_indenture = None
if financing_id:
trust_indenture = cls.query.filter(TrustIndenture.financing_id == financing_id) \
.order_by(TrustIndenture.id).all()
return trust_indenture
@staticmethod
def create_from_json(json_data, registration_id: int = None):
"""Create a trust indenture object from a json schema object: map json to db."""
trust_indenture = TrustIndenture()
if registration_id:
trust_indenture.registration_id = registration_id
if 'trustIndenture' in json_data and json_data['trustIndenture']:
trust_indenture.trust_indenture = TRUST_INDENTURE_YES
else:
trust_indenture.trust_indenture = TRUST_INDENTURE_NO
return [trust_indenture]
@staticmethod
def create_from_amendment_json(financing_id: int, registration_id: int):
"""Create a trust indenture object as part of an amendment registration: map json to db."""
trust_indenture = TrustIndenture()
trust_indenture.registration_id = registration_id
trust_indenture.financing_id = financing_id
trust_indenture.trust_indenture = TRUST_INDENTURE_YES
return trust_indenture
|
StarcoderdataPython
|
6694054
|
<filename>CartPole/uploadCartPole.py<gh_stars>0
import gym
gym.upload('/tmp/cartpole-v1-experiment-1', api_key='sk_OM2SEFKTTfia7GeMo8dWWA')
|
StarcoderdataPython
|
1610641
|
import tensorflow as tf
from neural_toolbox import rnn, utils
from generic.tf_utils.abstract_network import AbstractNetwork
class GuesserNetwork(AbstractNetwork):
def __init__(self, config, num_words, device='', reuse=False):
AbstractNetwork.__init__(self, "guesser", device=device)
mini_batch_size = None
with tf.variable_scope(self.scope_name, reuse=reuse):
# Dialogues
self.dialogues = tf.placeholder(tf.int32, [mini_batch_size, None], name='dialogues')
self.seq_length = tf.placeholder(tf.int32, [mini_batch_size], name='seq_length')
# Objects
self.obj_mask = tf.placeholder(tf.float32, [mini_batch_size, None], name='obj_mask')
self.obj_cats = tf.placeholder(tf.int32, [mini_batch_size, None], name='obj_cats')
self.obj_spats = tf.placeholder(tf.float32, [mini_batch_size, None, config['spat_dim']], name='obj_spats')
# Targets
self.targets = tf.placeholder(tf.int32, [mini_batch_size], name="targets_index")
self.object_cats_emb = utils.get_embedding(
self.obj_cats,
config['no_categories'] + 1,
config['cat_emb_dim'],
scope='cat_embedding')
self.objects_input = tf.concat([self.object_cats_emb, self.obj_spats], axis=2)
self.flat_objects_inp = tf.reshape(self.objects_input, [-1, config['cat_emb_dim'] + config['spat_dim']])
with tf.variable_scope('obj_mlp'):
h1 = utils.fully_connected(
self.flat_objects_inp,
n_out=config['obj_mlp_units'],
activation='relu',
scope='l1')
h2 = utils.fully_connected(
h1,
n_out=config['dialog_emb_dim'],
activation='relu',
scope='l2')
obj_embs = tf.reshape(h2, [-1, tf.shape(self.obj_cats)[1], config['dialog_emb_dim']])
# Compute the word embedding
input_words = utils.get_embedding(self.dialogues,
n_words=num_words,
n_dim=config['word_emb_dim'],
scope="input_word_embedding")
last_states, _ = rnn.variable_length_LSTM(input_words,
num_hidden=config['num_lstm_units'],
seq_length=self.seq_length)
# we will use this to extract the features for the dialogue
self.last_states = last_states
last_states = tf.reshape(last_states, [-1, config['num_lstm_units'], 1])
scores = tf.matmul(obj_embs, last_states)
scores = tf.reshape(scores, [-1, tf.shape(self.obj_cats)[1]])
def masked_softmax(scores, mask):
# subtract max for stability
scores = scores - tf.tile(tf.reduce_max(scores, axis=(1,), keep_dims=True), [1, tf.shape(scores)[1]])
# compute padded softmax
exp_scores = tf.exp(scores)
exp_scores *= mask
exp_sum_scores = tf.reduce_sum(exp_scores, axis=1, keep_dims=True)
return exp_scores / tf.tile(exp_sum_scores, [1, tf.shape(exp_scores)[1]])
self.softmax = masked_softmax(scores, self.obj_mask)
self.selected_object = tf.argmax(self.softmax, axis=1)
self.loss = tf.reduce_mean(utils.cross_entropy(self.softmax, self.targets))
self.error = tf.reduce_mean(utils.error(self.softmax, self.targets))
def get_loss(self):
return self.loss
def get_accuracy(self):
return 1. - self.error
# def find_object(self, sess, dialogue, seq_length, ground_data):
# """Inputs:
# High level method that return whether the guesser manage to find the correct object according a dialogue and game information
#
# Example
# --------
# {'question': [[1, 500, 3, 5, 2], [1, 48, 12, 2, 4]],
# 'seq_length': [5, 4] length of the sequence (=first padding token(4) or array shape)
# 'ground_data', {
# obj_mask : [[1,1,1,1], [1,1,,1,0]], #4 object / 3 objects
# obj_spats : np.array((2,8)), # spatial info by object
# obj_spats : [[10,22,11,10], [5,10,10]], # obj cat Ex [person,dog,cat,person],[kite,person,person]
# object_indices : [3,0] # indices for correct object, e.g. person:1 / kite:0
# },
# 'spatial': [[-0.5, 0.8, 0.7, 0.5, 0.4, 0.56, -0.3, -0.1]],
# 'seq_length': [5]}
# """
#
# # TO avoid code duplication, we can:
# # - create a predict method
# # guesser_input = dict(ground_data) # shallow copy
# # guesser_input["question"] = dialogue
# # guesser_input["seq_length"] = seq_length
# # selected_object = self.predict(sess, guesser_input) # return predicted_object (or softmax)
# # found = (selected_object == ground_data["targets"])
# # OR found = (np.argmax(selected_object, axis=1) == ground_data["targets"]) if softmax
#
# selected_object, softmax = sess.run([self.selected_object, self.softmax], feed_dict={
# self.dialogues: dialogue,
# self.seq_length: seq_length,
# self.mask: ground_data["obj_mask"],
# self.obj_spats: ground_data["obj_spats"],
# self.obj_cats: ground_data["obj_cats"],
# })
#
# found = (selected_object == ground_data["targets"])
#
# return found, softmax
#
#
|
StarcoderdataPython
|
11211465
|
from selenium.webdriver import FirefoxProfile
from tools.file import FileUtil
class FireFoxProfile:
DOWNLOAD_TO_DESK_TOP = 0
DOWNLOAD_TO_DEFAULT_PATH = 1
DOWNLOAD_TO_CUSTOMER = 2
def __init__(self):
self.__profile = FirefoxProfile()
def set_browser_download_path(self, path):
path = FileUtil.mkdirs(path)
r"""
D:\c\x\a
如果x文件夹和a文件夹都没有会下载到默认路径
:param path:
:return:
"""
self.__profile.set_preference("browser.download.dir", path)
return self
def use_download_path(self, level):
if not isinstance(level, int):
raise ValueError("必须是整数")
if level < 0 or level > 2:
raise ValueError("参数必须小于2大于0")
self.__profile.set_preference("browser.download.folderList", 2)
return self
def close_download_interface(self):
"""
关闭下载界面
:return:
"""
self.__profile.set_preference("browser.download.manager.showWhenStarting", False)
return self
def close_download_window(self):
"""
关闭下载窗口
:return:
"""
self.__profile.set_preference("browser.download.manager.useWindow", False)
return self
def close_focus(self):
"""
关闭获取焦点
:return:
"""
self.__profile.set_preference("browser.download.manager.focusWhenStarting", False)
return self
def close_exe_file_alert(self):
"""
关闭exe文件提示框
:return:
"""
self.__profile.set_preference("browser.download.manager.alertOnEXEOpen", False)
return self
def never_ask_open_file_mime(self, mime):
"""
不会弹出指定文件类型的提示框
:param mime: "application/pdf"
:return:
"""
self.__profile.set_preference("browser.helperApps.neverAsk.openFile", mime)
return self
def never_ask_save_to_disk(self, mime):
"""
下载指定类型文件时不会提示下载到硬盘中
:param mime:
:return:
"""
self.__profile.set_preference("browser.helperApps.neverAsk.saveToDisk", mime)
return self
def close_info_alert_when_download_finish(self):
"""
下载完成时关闭提示
:return:
"""
self.__profile.set_preference("browser.download.manager.showAlertOnComplete", False)
return self
def dont_close_window_when_download_finish(self):
"""
关闭下载结束时的提示框
:return:
"""
self.__profile.set_preference("browser.download.manager.closeWhenDone", False)
return self
def make(self):
return self.__profile
|
StarcoderdataPython
|
5049237
|
# -*- coding: utf-8 -*-
import ccxtpro
from asyncio import run
print('CCXT Pro Version:', ccxtpro.__version__)
# This example will run silent and will return your balance only when the balance is updated.
#
# 1. launch the example with your keys and keep it running
# 2. go to the trading section on the website
# 3. place a order on a spot market
# 4. see your balance updated in the example
#
# Warning! This example might produce a lot of output to your screen
async def watch_balance(exchange):
await exchange.load_markets()
# exchange.verbose = True # uncomment for debugging purposes if necessary
balance = await exchange.fetch_balance()
print('---------------------------------------------------------')
print(exchange.iso8601(exchange.milliseconds()))
print(balance)
print('')
while True:
try:
update = await exchange.watch_balance()
balance = exchange.deep_extend(balance, update)
# it will print the balance update when the balance changes
# if the balance remains unchanged the exchange will not send it
print('---------------------------------------------------------')
print(exchange.iso8601(exchange.milliseconds()))
print(balance)
print('')
except Exception as e:
print('watch_balance() failed')
print(type(e).__name__, str(e))
break
async def main():
exchange = ccxtpro.binance({
'apiKey': 'YOUR_API_KEY',
'secret': 'YOUR_SECRET',
})
await watch_balance(exchange)
await exchange.close()
run(main())
|
StarcoderdataPython
|
3331767
|
<filename>utiles/corrige_presentacion.py<gh_stars>0
#!/bin/python
import connect, MySQLdb
#import sys
#tabla=sys.argv[1]
db=MySQLdb.connect(host='localhost',user=connect.user,passwd=connect.passwd,db=connect.db)
cursor=db.cursor()
sql='select * from articulos where presentacion like "%cc%"'
cursor.execute(sql)
resultado=cursor.fetchall()
for registro in resultado:
sql='update articulos set contenido="'+str(registro[5])+'", presentacion="" where id="'+str(registro[0])+'"'
cursor.execute(sql)
#print sql
#estructura tabla
#--------------------------------------
# 0 id
# 1 codigo_interno
# 2 marca
# 3 descripcion
# 4 contenido
# 5 presentacion
# 6 codigo_barra
# 7 fecha
# 8 hora
# 9 clasificacion
# 10 subclasificacion
#--------------------------------------
|
StarcoderdataPython
|
3380064
|
<reponame>apolat2018/LSAT<filename>tune_mlp.py
# -*- coding: cp1254 -*-
"""
This Script tunes the Multi Layer Perceptron Algorithm. RandomizedSearchCV method can be used.
A graph is plotted for every selecting parameter. Also the values of Success rate
and Prediction rate are seen on screen.
Created on Mon Nov 5 22:30:05 2018
@author: AP
"""
#////////////////////IMPORTING THE REQUIRED LIBRARIES/////////////////////////
import arcpy
import os
from arcpy.sa import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import RandomizedSearchCV
arcpy.env.overwriteOutput = True
#////////////////////////////Getting Input Parameters//////////////////////////
rec=arcpy.GetParameterAsText(0)#The folder including output data of Data Preparation script
sf=arcpy.GetParameterAsText(1)#output file is saving this Folder
wt=str(arcpy.GetParameterAsText(2))#weighting data type (Frequency ratio or Information Value)
tuning_parameter=arcpy.GetParameterAsText(3)#Select a parameter for tuning
h_layer=arcpy.GetParameterAsText(4)#hidden layer size
act=arcpy.GetParameterAsText(5)#activation
slv=arcpy.GetParameterAsText(6)#solver
alpha=float(arcpy.GetParameterAsText(7))#Alpha
l_rate=arcpy.GetParameterAsText(8)#learnning rate
l_rate_init=float(arcpy.GetParameterAsText(9))#learning rate init
max_it=int(arcpy.GetParameterAsText(10))#maximum iteration number
mom=float(arcpy.GetParameterAsText(11))#momentum
RS=arcpy.GetParameterAsText(12)
arcpy.AddMessage(RS)
arcpy.env.workspace=rec
#//////////////////checking Hidden layer size single or multi./////////////////
h_layer=h_layer.split(";")
layer_lst=[]
for h in h_layer:
h=int(h)
layer_lst.append(h)
arcpy.AddMessage(len(layer_lst))
if len(layer_lst)==1:
hls=layer_lst[0]
else:
hls=tuple(layer_lst)#tuple for Hidden layer size parameter
arcpy.AddMessage(layer_lst)
arcpy.AddMessage(hls)
os.chdir(rec)
arcpy.AddMessage("Starting MLP Analysis...")
#////////////////Starting Tuning///////////////////////////////////////////////
arcpy.AddMessage("Starting analysis with MLP algorithm")
os.chdir(rec)
#//////Checking Weighting data Frequency ratio or İnformation value////////////
if wt=="frequency ratio":
trn="train_fr.csv"
pre="pre_fr.csv"
tst="valid_fr.csv"
else:
trn="train_iv.csv"
pre="pre_iv.csv"
tst="valid_iv.csv"
#Loading train data
veriler=pd.read_csv(trn)
veriler=veriler.replace(-9999,"NaN")
#Loading analysis data
analiz=pd.read_csv(pre)
analiz=analiz.replace(-9999,"NaN")
#Loading validation dat
veriler_v=pd.read_csv(tst)
veriler_v=veriler_v.replace(-9999,"NaN")
#Preparing parameters
va,vb=veriler.shape
aa,ab=analiz.shape
ta,tb=veriler_v.shape
parametreler=veriler.iloc[:,2:vb].values
param_validation=veriler_v.iloc[:,2:tb].values
#Preparing label (class) data
cls=veriler.iloc[:,1:2].values
cls_v=veriler_v.iloc[:,1:2].values
#Preparing analysis data
pre=analiz.iloc[:,2:ab-2].values
##preparing Coordinate data
koor=analiz.iloc[:,ab-2:ab].values
s_train=va
s_analiz=aa
koor=pd.DataFrame(data=koor,index=range(aa),columns=["x","y"])
#Converting NaN values to median
imputer= Imputer(missing_values='NaN', strategy = 'median', axis=0 )
parametreler=imputer.fit_transform(parametreler)
param_validation=imputer.fit_transform(param_validation)
pre=imputer.fit_transform(pre)
cls=imputer.fit_transform(cls)
cls_v=imputer.fit_transform(cls_v)
sc1=StandardScaler()
sc1.fit(parametreler)
parametreler=sc1.transform(parametreler)
pre=sc1.transform(pre)
param_validation=sc1.transform(param_validation)
#train-test splitting
pre=pd.DataFrame(data=pre)
x_train=pd.DataFrame(data=parametreler)
y_train=pd.DataFrame(data=cls)
x_test=pd.DataFrame(data=param_validation)
y_test=pd.DataFrame(data=cls_v)
#//////////////////////Tuning//////////////////////////////////////////////////
h_l_s = [5,10,15, 20, 30, 40, 50, 60, 70, 80,90,100]
activation=["identity", "logistic", "tanh", "relu"]
solvr=["lbfgs", "sgd", "adam"]
alph=[0.00001,0.0001,0.0005,0.001,0.01]
learning_r=["constant", "invscaling", "adaptive"]
learning_r_i=[0.0001,0.001,0.1,0.5]
max_iters=[250,500,1000,2000,5000]
momentums=[0.5,0.6,0.7,0.8,0.9]
train_results=[]
test_results=[]
os.chdir(sf)
#////////////////////////////////RandomizedSearchCV////////////////////////////
if RS=="true":
arcpy.AddMessage("RandomizedsearchCV method was selected")
arcpy.AddMessage("Please wait....This might take a while")
#.................................RandomizedSearchCV.................................
model=MLPClassifier(tol=1e-5)
parameters={"hidden_layer_sizes":[5,10,15, 20, 30, 40, 50, 60, 70, 80,90,100],"activation":["identity", "logistic", "tanh", "relu"],"solver":["lbfgs", "sgd", "adam"],
"alpha":[0.00001,0.0001,0.0005,0.001,0.01],"learning_rate":["constant", "invscaling", "adaptive"],"learning_rate_init":[0.0001,0.001,0.1,0.5],
"max_iter":[250,500,1000,2000,5000],"momentum":[0.5,0.6,0.7,0.8,0.9]}
clf=RandomizedSearchCV(estimator=model,param_distributions=parameters,cv=5,random_state=0)
clf.fit(x_train,y_train)
arcpy.AddMessage("best parameteres={}".format(clf.best_params_))
#////////////////////////////////Other tuning proccesses///////////////////////
#Tuning graphs will be saved as .png file
else:
if tuning_parameter=="hidden_layer_size":
for i in h_l_s:
arcpy.AddMessage("testing {} value".format(i))
mlp=MLPClassifier(hidden_layer_sizes=i,activation=act,solver=slv,
alpha=alpha,learning_rate=l_rate,learning_rate_init=l_rate_init,max_iter=max_it,momentum=mom)
mlp.fit(x_train, y_train)
train_pred = mlp.predict(x_train)
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_train, train_pred)
roc_auc = auc(false_positive_rate, true_positive_rate)
train_results.append(roc_auc)
arcpy.AddMessage("Success rate={}".format(roc_auc))
y_pred = mlp.predict(x_test)
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_pred)
roc_auc = auc(false_positive_rate, true_positive_rate)
test_results.append(roc_auc)
arcpy.AddMessage("predict rate={}".format(roc_auc))
from matplotlib.legend_handler import HandlerLine2D
line1, = plt.plot(h_l_s, train_results, "b", label="Train AUC")
line2, = plt.plot(h_l_s, test_results, "r", label="Test AUC")
plt.legend(handler_map={line1: HandlerLine2D(numpoints=2)})
# for a,b,c in zip(h_l_s,train_results,test_results):#show value of success and prediction
# plt.text(a,b,str(b))
# plt.text(a,c,str(c))
plt.ylabel("AUC score")
plt.xlabel("hidden_layer_size")
plt.savefig("hidden_layer_size.png",dpi=150)
plt.show()
plt.close("all")
elif tuning_parameter=="activation_function":
for i in activation:
arcpy.AddMessage("testing {} value".format(i))
mlp=MLPClassifier(hidden_layer_sizes=hls,activation=i,solver=slv,
alpha=alpha,learning_rate=l_rate,learning_rate_init=l_rate_init,max_iter=max_it,momentum=mom)
mlp.fit(x_train, y_train)
train_pred = mlp.predict(x_train)
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_train, train_pred)
roc_auc = auc(false_positive_rate, true_positive_rate)
train_results.append(roc_auc)
arcpy.AddMessage("Success rate={}".format(roc_auc))
y_pred = mlp.predict(x_test)
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_pred)
roc_auc = auc(false_positive_rate, true_positive_rate)
test_results.append(roc_auc)
arcpy.AddMessage("predict rate={}".format(roc_auc))
from matplotlib.legend_handler import HandlerLine2D
line1,=plt.plot(range(len(activation)), train_results, "b", label="Train AUC")
line2,=plt.plot(range(len(activation)), test_results, "r", label="Test AUC")
plt.xticks(range(len(activation)),activation)
plt.legend(handler_map={line1: HandlerLine2D(numpoints=2)})
plt.ylabel("AUC score")
plt.xlabel("Activation function")
plt.grid(True)
plt.savefig("activation_f.png",dpi=150)
plt.show()
plt.close("all")
elif tuning_parameter=="solver":
for i in solvr:
arcpy.AddMessage("testing {} value".format(i))
mlp=MLPClassifier(hidden_layer_sizes=hls,activation=act,solver=i,
alpha=alpha,learning_rate=l_rate,learning_rate_init=l_rate_init,max_iter=max_it,momentum=mom)
mlp.fit(x_train, y_train)
train_pred = mlp.predict(x_train)
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_train, train_pred)
roc_auc = auc(false_positive_rate, true_positive_rate)
train_results.append(roc_auc)
y_pred = mlp.predict(x_test)
arcpy.AddMessage("Success rate={}".format(roc_auc))
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_pred)
roc_auc = auc(false_positive_rate, true_positive_rate)
test_results.append(roc_auc)
arcpy.AddMessage("predict rate={}".format(roc_auc))
from matplotlib.legend_handler import HandlerLine2D
line1,=plt.plot(range(len(solvr)), train_results, "b", label="Train AUC")
line2,=plt.plot(range(len(solvr)), test_results, "r", label="Test AUC")
plt.xticks(range(len(solvr)),solvr)
plt.legend(handler_map={line1: HandlerLine2D(numpoints=2)})
plt.ylabel("AUC score")
plt.xlabel("Solver")
plt.grid(True)
plt.savefig("solver.png",dpi=150)
plt.show()
plt.close("all")
elif tuning_parameter=="alpha":
for i in alph:
arcpy.AddMessage("testing {} value".format(i))
mlp=MLPClassifier(hidden_layer_sizes=hls,activation=act,solver=slv,
alpha=i,learning_rate=l_rate,learning_rate_init=l_rate_init,max_iter=max_it,momentum=mom)
mlp.fit(x_train, y_train)
train_pred = mlp.predict(x_train)
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_train, train_pred)
roc_auc = auc(false_positive_rate, true_positive_rate)
train_results.append(roc_auc)
y_pred = mlp.predict(x_test)
arcpy.AddMessage("Success rate={}".format(roc_auc))
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_pred)
roc_auc = auc(false_positive_rate, true_positive_rate)
test_results.append(roc_auc)
arcpy.AddMessage("predict rate={}".format(roc_auc))
from matplotlib.legend_handler import HandlerLine2D
line1, = plt.plot(alph, train_results, "b", label="Train AUC")
line2, = plt.plot(alph, test_results, "r", label="Test AUC")
plt.legend(handler_map={line1: HandlerLine2D(numpoints=2)})
plt.ylabel("AUC score")
plt.xlabel("Alpha")
plt.grid(True)
plt.savefig("alpha.png",dpi=150)
plt.show()
plt.close("all")
elif tuning_parameter=="learning_rate":
for i in learning_r:
arcpy.AddMessage("testing {} value".format(i))
mlp=MLPClassifier(hidden_layer_sizes=hls,activation=act,solver=slv,
alpha=alpha,learning_rate=i,learning_rate_init=l_rate_init,max_iter=max_it,momentum=mom)
mlp.fit(x_train, y_train)
train_pred = mlp.predict(x_train)
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_train, train_pred)
roc_auc = auc(false_positive_rate, true_positive_rate)
train_results.append(roc_auc)
arcpy.AddMessage("Success rate={}".format(roc_auc))
y_pred = mlp.predict(x_test)
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_pred)
roc_auc = auc(false_positive_rate, true_positive_rate)
test_results.append(roc_auc)
arcpy.AddMessage("predict rate={}".format(roc_auc))
from matplotlib.legend_handler import HandlerLine2D
line1,=plt.plot(range(len(learning_r)), train_results, "b", label="Train AUC")
line2,=plt.plot(range(len(learning_r)), test_results, "r", label="Test AUC")
plt.xticks(range(len(learning_r)),learning_r)
plt.legend(handler_map={line1: HandlerLine2D(numpoints=2)})
plt.ylabel("AUC score")
plt.xlabel("Learning rate")
plt.grid(True)
plt.savefig("learning_rate.png",dpi=150)
plt.show()
plt.close("all")
elif tuning_parameter=="learning_rate_init":
for i in learning_r_i:
arcpy.AddMessage("testing {} value".format(i))
mlp=MLPClassifier(hidden_layer_sizes=hls,activation=act,solver=slv,
alpha=alpha,learning_rate=l_rate,learning_rate_init=i,max_iter=max_it,momentum=mom)
mlp.fit(x_train, y_train)
train_pred = mlp.predict(x_train)
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_train, train_pred)
roc_auc = auc(false_positive_rate, true_positive_rate)
train_results.append(roc_auc)
y_pred = mlp.predict(x_test)
arcpy.AddMessage("Success rate={}".format(roc_auc))
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_pred)
roc_auc = auc(false_positive_rate, true_positive_rate)
test_results.append(roc_auc)
arcpy.AddMessage("predict rate={}".format(roc_auc))
from matplotlib.legend_handler import HandlerLine2D
line1, = plt.plot(learning_r_i, train_results, "b", label="Train AUC")
line2, = plt.plot(learning_r_i, test_results, "r", label="Test AUC")
plt.legend(handler_map={line1: HandlerLine2D(numpoints=2)})
plt.ylabel("AUC score")
plt.xlabel("learning_rate_init")
plt.grid(True)
plt.savefig("learning_rate_init.png",dpi=150)
plt.show()
plt.close("all")
elif tuning_parameter=="max_iter":
for i in max_iters:
arcpy.AddMessage("testing {} value".format(i))
mlp=MLPClassifier(hidden_layer_sizes=hls,activation=act,solver=slv,
alpha=alpha,learning_rate=l_rate,learning_rate_init=l_rate_init,max_iter=i,momentum=mom)
mlp.fit(x_train, y_train)
train_pred = mlp.predict(x_train)
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_train, train_pred)
roc_auc = auc(false_positive_rate, true_positive_rate)
train_results.append(roc_auc)
arcpy.AddMessage("Success rate={}".format(roc_auc))
y_pred = mlp.predict(x_test)
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_pred)
roc_auc = auc(false_positive_rate, true_positive_rate)
test_results.append(roc_auc)
arcpy.AddMessage("predict rate={}".format(roc_auc))
from matplotlib.legend_handler import HandlerLine2D
line1, = plt.plot(max_iters, train_results, "b", label="Train AUC")
line2, = plt.plot(max_iters, test_results, "r", label="Test AUC")
plt.legend(handler_map={line1: HandlerLine2D(numpoints=2)})
plt.ylabel("AUC score")
plt.xlabel("Max_iter")
plt.grid(True)
plt.savefig("max_iter.png",dpi=150)
plt.show()
plt.close("all")
elif tuning_parameter=="momentum":
for i in momentums:
arcpy.AddMessage("testing {} value".format(i))
mlp=MLPClassifier(hidden_layer_sizes=hls,activation=act,solver=slv,
alpha=alpha,learning_rate=l_rate,learning_rate_init=l_rate_init,max_iter=max_it,momentum=i)
mlp.fit(x_train, y_train)
train_pred = mlp.predict(x_train)
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_train, train_pred)
roc_auc = auc(false_positive_rate, true_positive_rate)
train_results.append(roc_auc)
arcpy.AddMessage("Success rate={}".format(roc_auc))
y_pred = mlp.predict(x_test)
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_pred)
roc_auc = auc(false_positive_rate, true_positive_rate)
test_results.append(roc_auc)
arcpy.AddMessage("predict rate={}".format(roc_auc))
from matplotlib.legend_handler import HandlerLine2D
line1, = plt.plot(momentums, train_results, "b", label="Train AUC")
line2, = plt.plot(momentums, test_results, "r", label="Test AUC")
plt.legend(handler_map={line1: HandlerLine2D(numpoints=2)})
plt.ylabel("AUC score")
plt.xlabel("Momentum")
plt.grid(True)
plt.savefig("momentum.png",dpi=150)
plt.show()
plt.close("all")
arcpy.ClearWorkspaceCache_management()
|
StarcoderdataPython
|
5077192
|
<filename>tools/json_update.py
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*
"""
PCI ID Vendor/Device database collector
"""
from __future__ import unicode_literals, print_function
import json
import sys
import os
CUR_PATH = os.path.dirname(__file__)
ABS_PATH = os.path.abspath(CUR_PATH)
ROOT_DIR = os.path.dirname(ABS_PATH)
sys.path.insert(1, ROOT_DIR)
from helpers.pciids_db import VendorPciid # pylint: disable=wrong-import-position
from helpers.common import JSON_PATH # pylint: disable=wrong-import-position
if __name__ == '__main__':
# tries to refresh local json
IDS = VendorPciid('1002')
PCIIDS = IDS.get_vendor_pciids()
print('{}: {}'.format('Can update json', IDS.save_json))
if IDS.save_json:
print('Saving JSON under: {}'.format(JSON_PATH))
with open(JSON_PATH, 'w') as file_dump:
json.dump(PCIIDS, file_dump, indent=4, sort_keys=True)
|
StarcoderdataPython
|
4858737
|
# -*- coding: utf-8 -*-
#
import os
def parse_project_and_task_from_dag_id(dag_id):
"""Parse project and task from dag id.
Args:
dag_id (str): The id of DAG.
Returns:
(tuple of str): The first item is project. The second item is task.
If dag_id is invalid, will return empty string.
"""
if not dag_id:
return '', ''
ids = dag_id.split('__')
if len(ids) >= 3:
return ids[1], ids[2]
elif len(ids) == 2:
return ids[1], ''
else:
return '', ''
def get_default_operator_env(dag_id):
"""Get default environment for operator.
Args:
dag_id (str): The id of DAG.
Returns:
(dict): Environment.
"""
project, task = parse_project_and_task_from_dag_id(dag_id)
return {
'ZS_ENV': os.getenv('ZS_ENV', 'dev'),
'ZS_LOG_LEVEL': os.getenv('ZS_LOG_LEVEL', 'INFO'),
'ZS_FLUENTD_HOST': os.getenv('ZS_FLUENTD_HOST', 'localhost'),
'ZS_FLUENTD_PORT': os.getenv('ZS_FLUENTD_PORT', '24224'),
'ZS_PROJECT': project,
'ZS_TASK': task,
'ZS_SUBTASK': '{{ ti.task_id }}',
'ZS_JOB': '{{ run_id }}',
}
|
StarcoderdataPython
|
6608083
|
<filename>TestingStageEnvWin/coinbacktesting_bt.py<gh_stars>1-10
import coinrepo
import bt
def main():
'''entry point'''
# Get Test Data with all fields
symbol_list = ['BTC', 'ETH']
history = coinrepo.get_coinhistory(symbol_list)
history = history.set_index('Date')
# Pivot to have only price as timeseries
pricehistory = history.pivot(columns='Symbol')['Price']
# Create the strategy
s = bt.Strategy('s1', [bt.algos.RunMonthly(),
bt.algos.SelectAll(),
bt.algos.WeighEqually(),
bt.algos.Rebalance()])
# create a backtest and run it
test = bt.Backtest(s, pricehistory)
res = bt.run(test)
res.display()
# Save figures
plot = pricehistory.plot(figsize=(15,5))
fig = plot.get_figure()
fig.savefig("price.png")
plot1 = res.plot_weights(figsize=(15,5))
fig1 = plot1.get_figure()
fig1.savefig("bt_rest.png")
# # Test bt framework installed properly
# data = bt.get('aapl,msft,c,gs,ge', start='2010-01-01')
# print data.head()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3406966
|
<gh_stars>0
'''
first code
'''
import sys
def main():
print("Hello, world!!!")
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
11382049
|
print("please give me A+")
|
StarcoderdataPython
|
8142102
|
# -*- coding: utf-8 -*-
# python -m cProfile filename.py
# import base packages
import warnings
# warnings.filterwarnings("ignore")
def ignore_warn(*args, **kwargs):
pass
warnings.warn = ignore_warn
import os
import re
import time
import numpy as np
import pandas as pd
from pathlib import Path
import matplotlib.pyplot as plt
from sklearn.preprocessing import *
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.model_selection import train_test_split, StratifiedKFold, cross_val_score
# 目录树
def lst_tree(p='..', n=0):
p = Path(p)
if p.is_file(): # 判断是否是文件
print('|' + '\t|' * n + '-' * 4 + p.name)
elif p.is_dir(): # 判断是否是目录
print('|' + '\t|' * n + '-' * 4 + str(p.relative_to(p.parent)) + '\\')
for pt in p.iterdir():
lst_tree(pt, n + 1) # 递归
|
StarcoderdataPython
|
8023920
|
<gh_stars>10-100
from collections import defaultdict
import gzip
import re
GTF_HEADER = ['seqname', 'source', 'feature', 'start', 'end', 'score',
'strand', 'frame']
R_SEMICOLON = re.compile(r'\s*;\s*')
R_COMMA = re.compile(r'\s*,\s*')
R_KEYVALUE = re.compile(r'(\s+|\s*=\s*)')
def readGTF(filename):
"""Open an optionally gzipped GTF file and return a pandas.DataFrame.
"""
# Each column is a list stored as a value in this dict.
result = defaultdict(list)
for i, line in enumerate(lines(filename)):
for key in list(line.keys()):
# This key has not been seen yet, so set it to None for all
# previous lines.
if key not in result:
result[key] = [None] * i
# Ensure this row has some value for each column.
for key in list(result.keys()):
result[key].append(line.get(key, None))
return result
def lines(filename):
"""Open an optionally gzipped GTF file and generate a dict for each line.
"""
fn_open = gzip.open if filename.endswith('.gz') else open
with fn_open(filename) as fh:
for line in fh:
if line.startswith('#'):
continue
else:
yield parse(line)
def parse(line):
"""Parse a single GTF line and return a dict.
"""
result = {}
fields = line.rstrip().split('\t')
for i, col in enumerate(GTF_HEADER):
result[col] = _get_value(fields[i])
# INFO field consists of "key1=value;key2=value;...".
infos = [x for x in re.split(R_SEMICOLON, fields[8]) if x.strip()]
for i, info in enumerate(infos, 1):
# It should be key="value".
try:
key, _, value = re.split(R_KEYVALUE, info, 1)
# But sometimes it is just "value".
except ValueError:
key = 'INFO{}'.format(i)
value = info
# Ignore the field if there is no value.
if value:
result[key] = _get_value(value)
return result
def _get_value(value):
if not value:
return None
# Strip double and single quotes.
value = value.strip('"\'')
# Return a list if the value has a comma.
if ',' in value:
value = re.split(R_COMMA, value)
# These values are equivalent to None.
elif value in ['', '.', 'NA']:
return None
return value
|
StarcoderdataPython
|
1828027
|
<gh_stars>10-100
import cv2
import numpy as np
from .camera.parameters import CameraParams, IntrinsicParams, ExtrinsicParams
from .camera.coordinate_transformation import CoordinateTransformation, rotationMatrix3D#, reverseX, reverseY
from .camera import basic_tools
class InversePerspectiveMapping(object):
def __init__(self, param, sensor):
self.sensor = sensor
intrinsic_params = IntrinsicParams(sensor)
extrinsic_params = ExtrinsicParams(sensor)
self.camera_params = CameraParams(intrinsic_params, extrinsic_params)
self.img_width = 400#eval(sensor.attributes['image_size_x'])
self.img_height = 200#eval(sensor.attributes['image_size_y'])
#self.max_pixel = np.array([self.img_height, self.img_width]).reshape(2,1)
#self.min_pixel = np.array([0, 0]).reshape(2,1)
self.empty_image = np.zeros((self.img_height, self.img_width), dtype=np.dtype("uint8"))
self.longitudinal_length = param.longitudinal_length
self.ksize = param.ksize
f = float(self.img_height) / self.longitudinal_length
self.pesudo_K = np.array([ [f, 0, self.img_width/2],
[0, f, self.img_height],
[0, 0, 1] ])
self.reverseXY = basic_tools.np_dot(rotationMatrix3D(0,0,-np.pi/2))
def getIPM(self, image):
self.empty_image = np.zeros((self.img_height, self.img_width), dtype=np.dtype("uint8"))
index_array = np.argwhere(image > 200)
index_array = index_array[:,:2]
index_array = np.unique(index_array, axis=0)
index_array = np.array([index_array[:,1], index_array[:,0]])
vehicle_vec = CoordinateTransformation.image2DToWorld3D2(index_array, self.camera_params.K, self.camera_params.R, self.camera_params.t)
vehicle_vec[:,2,0] = 1.0
temp = np.dot(self.pesudo_K, self.reverseXY)
vehicle_vec = np.squeeze(vehicle_vec, axis = 2)
new_image_vec = np.dot(temp, vehicle_vec.T)
new_image_vec = new_image_vec[:2,:]
new_image_vec = new_image_vec[::-1,:]
new_image_y_pixel = new_image_vec[0,:].astype(int)
new_image_x_pixel = new_image_vec[1,:].astype(int)
#self.empty_image[new_image_y_pixel, new_image_x_pixel] = 255
mask = np.where((new_image_x_pixel >= 0)&(new_image_x_pixel < self.img_width))[0]
new_image_x_pixel = new_image_x_pixel[mask]
new_image_y_pixel = new_image_y_pixel[mask]
mask = np.where((new_image_y_pixel >= 0)&(new_image_y_pixel < self.img_height))[0]
new_image_x_pixel = new_image_x_pixel[mask]
new_image_y_pixel = new_image_y_pixel[mask]
self.empty_image[new_image_y_pixel, new_image_x_pixel] = 255
self.empty_image[np.clip(new_image_y_pixel+1,0, self.img_height-1),new_image_x_pixel] = 255
self.empty_image[np.clip(new_image_y_pixel-1,0, self.img_height-1),new_image_x_pixel] = 255
#self.empty_image = cv2.GaussianBlur(self.empty_image, (self.ksize, self.ksize), 25)
return self.empty_image
|
StarcoderdataPython
|
6619677
|
import datetime as _dt
from sqlite3 import Timestamp
import sqlalchemy as _sql
import sqlalchemy.orm as _orm
import passlib.hash as _hash
from sqlalchemy.schema import Column
from sqlalchemy.types import String, Integer, Enum, DateTime, Boolean, ARRAY, Text
from sqlalchemy import ForeignKey
from uuid import UUID, uuid4
from sqlalchemy.ext.hybrid import hybrid_property, hybrid_method
from sqlalchemy.sql import func
from fastapi_utils.guid_type import GUID, GUID_DEFAULT_SQLITE
from ..utils.utils import generate_short_id
import bigfastapi.db.database as _database
class VerificationCode(_database.Base):
__tablename__ = "verification_codes"
id = Column(String(255), primary_key=True, index=True, default=uuid4().hex)
user_id = Column(String(255), ForeignKey("users.id"))
code = Column(String(255), index=True, unique=True)
date_created = Column(DateTime, default=_dt.datetime.utcnow)
class PasswordResetCode(_database.Base):
__tablename__ = "password_reset_codes"
id = Column(String(255), primary_key=True, index=True, default=uuid4().hex)
user_id = Column(String(255), ForeignKey("users.id"))
code = Column(String(255), index=True, unique=True)
date_created = Column(DateTime, default=_dt.datetime.utcnow)
class Token(_database.Base):
__tablename__ = "tokens"
id = Column(String(255), primary_key=True, index=True, default=uuid4().hex)
user_id = Column(String(255), ForeignKey("users.id"))
token = Column(String(255), index=True)
date_created = Column(DateTime, default=_dt.datetime.utcnow)
class VerificationToken(_database.Base):
__tablename__ = "verification_tokens"
id = Column(String(255), primary_key=True, index=True, default=uuid4().hex)
user_id = Column(String(255), ForeignKey("users.id"))
token = Column(String(255), index=True)
date_created = Column(DateTime, default=_dt.datetime.utcnow)
class PasswordResetToken(_database.Base):
__tablename__ = "password_reset_tokens"
id = Column(String(255), primary_key=True, index=True, default=uuid4().hex)
user_id = Column(String(255), ForeignKey("users.id"))
token = Column(String(255), index=True)
date_created = Column(DateTime, default=_dt.datetime.utcnow)
|
StarcoderdataPython
|
1962355
|
# coding: utf-8
print("なにか入力してください")
#x = raw_input() # version 2のみ有効. version3のinputに相当する
x = input() #version2の場合はeval()に渡された評価式(1+2を渡すと3が返る). version3の場合は文字列として返る. version2は数値や"abc"のように評価できる式でないとエラーになる
print("あなたが入力したのは {} です".format(x))
print(type(x))
|
StarcoderdataPython
|
35227
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-29 18:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('places', '0027_auto_20171229_1606'),
]
operations = [
migrations.AddField(
model_name='fieldtype',
name='is_shown_in_about_place',
field=models.BooleanField(default=False, verbose_name='Show in About Place section'),
),
]
|
StarcoderdataPython
|
1733422
|
<gh_stars>10-100
from sqlalchemy import Column, Integer, String
from ...models.car_viewmodels import Car
from .base_model import BaseModel
class CarDataModel(BaseModel):
__tablename__ = "cars"
id = Column("id", Integer, primary_key=True)
year = Column("year", Integer)
make = Column("make", String(255))
model = Column("model", String(255))
def to_view_model(self) -> Car:
return Car(
year=self.year,
make=self.make,
model=self.model,
)
|
StarcoderdataPython
|
6683956
|
import cv2 as cv
import numpy as np
#像素运算02
def add_demo(m1,m2):
dest = cv.add(m1,m2)
cv.imshow("add_demo",dest)
def subtract_demo(m1,m2):
dest = cv.subtract(m1,m2)
cv.imshow("subtract_demo", dest)
def divide_demo(m1,m2):
dest = cv.divide(m1,m2)
cv.imshow("divide_demo", dest)
def multiply_demo(m1,m2):
dest = cv.multiply(m1,m2)
cv.imshow("multiply_demo", dest)
def others(m1,m2):
# M1 = cv.mean(m1) 均值
# M2 = cv.mean(m2)
M1,dev1 = cv.meanStdDev(m1) #均值,方差(对比度)
M2,dev2 = cv.meanStdDev(m2)
h,w = m1.shape[:2]
print(M1)
print(dev1)
print(M2)
print(dev2)
img = np.zeros([h,w],np.uint8)
m,dev = cv.meanStdDev(img)
print(m)
print(dev)
def contrast_bringhtness_demo(image,c,b):
h,w,ch = image.shape
blank = np.zeros([h,w,ch],image.dtype)
dest = cv.addWeighted(image,c,blank,1-c,b)
cv.imshow("con_br_demo",dest)
src1 = cv.imread("C:/1/LinuxLogo.jpg")
src2 = cv.imread("C:/1/WindowsLogo.jpg")
cv.namedWindow('image1', cv.WINDOW_AUTOSIZE)
cv.imshow('image1', src1)
cv.imshow('image2', src2)
print(src1.shape)
print(src2.shape)
# add_demo(src1,src2)
# subtract_demo(src1,src2)
# divide_demo(src1,src2)
# multiply_demo(src1,src2)
# others(src1,src2)
src = cv.imread("C:/1/1.jpg")
contrast_bringhtness_demo(src,2,0)
# gray = cv.cvtColor(src,cv.COLOR_BGR2GRAY)
# cv.imwrite("C:/1/111.jpg",gray)
cv.waitKey(0)
cv.destroyAllWindows()
|
StarcoderdataPython
|
3340782
|
import threading
from queue import Queue
def worker(num):
"""thread worker function"""
for i in range(100):
print('Worker', num)
return
if __name__ == '__main__':
threads = []
# for i in range(5):
# t = threading.Thread(target=worker, args=(i,))
# threads.append(t)
# t.start()
q = Queue()
q.put(0)
print(q.get())
print(q)
|
StarcoderdataPython
|
4917072
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def findTarget(self, root: TreeNode, k: int) -> bool:
def dfs(node, nodes):
if not node:
return False
c = k - node.val
if c in nodes:
return True
nodes.add(node.val)
return dfs(node.left, nodes) or dfs(node.right, nodes)
if not root:
return False
return dfs(root, set())
|
StarcoderdataPython
|
1749127
|
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import time
from pathlib import Path
from os import PathLike
from nlp_architect.models.absa import TRAIN_OUT
from nlp_architect.models.absa.train.acquire_terms import AcquireTerms
from nlp_architect.models.absa.train.rerank_terms import RerankTerms
from nlp_architect.models.absa.utils import parse_docs, _download_pretrained_rerank_model
from nlp_architect.utils.io import download_unzip
EMBEDDING_URL = 'http://nlp.stanford.edu/data', 'glove.840B.300d.zip'
EMBEDDING_PATH = TRAIN_OUT / 'word_emb_unzipped' / 'glove.840B.300d.txt'
RERANK_MODEL_DEFAULT_PATH = rerank_model_dir = TRAIN_OUT / 'reranking_model' / 'rerank_model.h5'
class TrainSentiment(object):
def __init__(self, parse: bool = True, rerank_model: PathLike = None):
self.start_time = time.time()
self.acquire_lexicon = AcquireTerms()
if parse:
from nlp_architect.pipelines.spacy_bist import SpacyBISTParser
self.parser = SpacyBISTParser()
else:
self.parser = None
if not rerank_model:
print('using pre-trained reranking model')
rerank_model = _download_pretrained_rerank_model(RERANK_MODEL_DEFAULT_PATH)
download_unzip(*EMBEDDING_URL, EMBEDDING_PATH, license_msg="Glove word embeddings.")
self.rerank = RerankTerms(vector_cache=True, rerank_model=rerank_model,
emb_model_path=EMBEDDING_PATH)
def run(self, data: PathLike = None, parsed_data: PathLike = None):
if not parsed_data:
if not self.parser:
raise RuntimeError("Parser not initialized (try parse=True at init )")
parsed_dir = TRAIN_OUT / 'parsed' / Path(data).stem
self.parse_data(data, parsed_dir)
parsed_data = parsed_dir
generated_aspect_lex = self.acquire_lexicon.acquire_lexicons(parsed_data)
generated_opinion_lex_reranked = \
self.rerank.predict(AcquireTerms.acquired_opinion_terms_path,
AcquireTerms.generic_opinion_lex_path)
return generated_opinion_lex_reranked, generated_aspect_lex
def parse_data(self, data: PathLike, parsed_dir: PathLike):
_, data_size = parse_docs(self.parser, data, out_dir=parsed_dir)
if data_size < 1000:
raise ValueError('The data contains only {0} sentences. A minimum of 1000 '
'sentences is required for training.'.format(data_size))
|
StarcoderdataPython
|
11200379
|
from .graphics import plot_importance
from .help import *
|
StarcoderdataPython
|
3310308
|
from __future__ import annotations
from rich import box
from rich.align import Align
from rich.console import Console, ConsoleOptions, RenderResult, RenderableType
from rich.style import StyleType
from textual import events
from textual.message import Message
from textual.reactive import Reactive
from textual.widget import Widget
from textual.widgets import ButtonPressed, Button
from rich.panel import Panel
class MyButtonPressed(Message):
def __init__(self, sender: Widget, value: str):
super().__init__(sender)
self.value = value
class MyButton(Button):
mouse_over: Reactive[bool] = Reactive(False)
has_focus: Reactive[bool] = Reactive(False)
def __init__(
self,
label: RenderableType,
name: str | None = None,
style: StyleType = "white on dark_blue",
value: str = "",
):
super().__init__(label, name, style)
self.value = value
def render(self) -> RenderableType:
return self
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
yield Panel(
Align.center(
self.label,
),
border_style="green" if self.mouse_over else "blue",
box=box.HEAVY if self.has_focus else box.ROUNDED,
style=self.button_style,
# height=height,
)
async def on_click(self, event: events.Click) -> None:
await self.emit(MyButtonPressed(self, self.value))
async def on_enter(self, event: events.Enter) -> None:
self.mouse_over = True
async def on_leave(self, event: events.Leave) -> None:
self.mouse_over = False
self.has_focus = False
async def on_mouse_down(self, _) -> None:
self.has_focus = True
async def on_mouse_up(self, _) -> None:
self.has_focus = False
|
StarcoderdataPython
|
8037785
|
import psycopg2
conexion = psycopg2.connect(
database="test_db",
user="postgres",
password="<PASSWORD>",
host="localhost",
port="5432"
)
try:
with conexion:
with conexion.cursor() as cursor:
query = """
INSERT INTO persona(nombre, apellido, email) VALUES(%s, %s, %s)
"""
valores = (
("Odie","Hilll","<EMAIL>"),
("Genevieve","Upton","<EMAIL>"),
("Lorenz","Kuhlman","<EMAIL>")
)
cursor.executemany(query, valores)
registros = cursor.rowcount
print("Se han insertado los datos... {}".format(registros))
except Exception as e:
print("El error fue: ",e)
finally:
conexion.close()
|
StarcoderdataPython
|
28443
|
"""Execute validated & constructed query on device.
Accepts input from front end application, validates the input and
returns errors if input is invalid. Passes validated parameters to
construct.py, which is used to build & run the Netmiko connections or
hyperglass-frr API calls, returns the output back to the front end.
"""
# Standard Library
from ssl import CertificateError
from typing import Iterable
# Third Party
import httpx
# Project
from hyperglass.log import log
from hyperglass.util import parse_exception
from hyperglass.encode import jwt_decode, jwt_encode
from hyperglass.exceptions import RestError, ResponseEmpty
from hyperglass.configuration import params
# Local
from ._common import Connection
class AgentConnection(Connection):
"""Connect to target device via hyperglass-agent."""
async def collect(self) -> Iterable: # noqa: C901
"""Connect to a device running hyperglass-agent via HTTP."""
log.debug("Query parameters: {}", self.query)
client_params = {
"headers": {"Content-Type": "application/json"},
"timeout": params.request_timeout,
}
if self.device.ssl is not None and self.device.ssl.enable:
with self.device.ssl.cert.open("r") as file:
cert = file.read()
if not cert:
raise RestError(
"SSL Certificate for device {d} has not been imported",
level="danger",
d=self.device.name,
)
http_protocol = "https"
client_params.update({"verify": str(self.device.ssl.cert)})
log.debug(
(
f"Using {str(self.device.ssl.cert)} to validate connection "
f"to {self.device.name}"
)
)
else:
http_protocol = "http"
endpoint = "{protocol}://{address}:{port}/query/".format(
protocol=http_protocol, address=self.device._target, port=self.device.port
)
log.debug("URL endpoint: {}", endpoint)
try:
async with httpx.AsyncClient(**client_params) as http_client:
responses = ()
for query in self.query:
encoded_query = await jwt_encode(
payload=query,
secret=self.device.credential.password.get_secret_value(),
duration=params.request_timeout,
)
log.debug("Encoded JWT: {}", encoded_query)
raw_response = await http_client.post(
endpoint, json={"encoded": encoded_query}
)
log.debug("HTTP status code: {}", raw_response.status_code)
raw = raw_response.text
log.debug("Raw Response:\n{}", raw)
if raw_response.status_code == 200:
decoded = await jwt_decode(
payload=raw_response.json()["encoded"],
secret=self.device.credential.password.get_secret_value(),
)
log.debug("Decoded Response:\n{}", decoded)
responses += (decoded,)
elif raw_response.status_code == 204:
raise ResponseEmpty(
params.messages.no_output, device_name=self.device.name,
)
else:
log.error(raw_response.text)
except httpx.exceptions.HTTPError as rest_error:
msg = parse_exception(rest_error)
log.error("Error connecting to device {}: {}", self.device.name, msg)
raise RestError(
params.messages.connection_error,
device_name=self.device.name,
error=msg,
)
except OSError as ose:
log.critical(str(ose))
raise RestError(
params.messages.connection_error,
device_name=self.device.name,
error="System error",
)
except CertificateError as cert_error:
log.critical(str(cert_error))
msg = parse_exception(cert_error)
raise RestError(
params.messages.connection_error,
device_name=self.device.name,
error=f"{msg}: {cert_error}",
)
if raw_response.status_code != 200:
log.error("Response code is {}", raw_response.status_code)
raise RestError(
params.messages.connection_error,
device_name=self.device.name,
error=params.messages.general,
)
if not responses:
log.error("No response from device {}", self.device.name)
raise RestError(
params.messages.connection_error,
device_name=self.device.name,
error=params.messages.no_response,
)
return responses
|
StarcoderdataPython
|
3274698
|
nome = str(input('Digite o seu nome completo: ')).strip()
print('Seu nome em maiúsculo:', nome.upper())
print('Seu nome em minúsculo:', nome.lower())
print('Seu nome tem ao todo: {} letras'.format(len(nome) - nome.count(' ')))
primeiroSeparado = nome.split()
print('Seu primeiro nome tem:', len(primeiroSeparado[0]),
'letras')
#outro jeito é usando nome.find(' ')
|
StarcoderdataPython
|
8138202
|
ficha = list()
while True:
nome = input('Nome: ').strip()
nota1 = float(input('Nota 1: '))
nota2 = float(input('Nota 2: '))
media = (nota1 + nota2) / 2
ficha.append([nome, [nota1, nota2], media])
# alunos.append(input('Nome: ').strip())
# notas[0].append(float(input('Nota 1: ')))
# notas[1].append(float(input('Nota 2: ')))
continuar = input('Deseja continuar? [S/N] ').upper().strip()
if continuar == 'N':
break
#
# ficha.append(alunos[:])
# ficha.append(notas[:])
print('-=' * 25)
print('{:<6}{:<20}{:<9}'.format('No.', 'NOME', 'MÉDIA'))
print('-' * 35)
for i, x in enumerate(ficha):
print(f'{i:<6}{x[0]:<20}{x[2]:<9.1f}')
while True:
print('-' * 35)
qual_aluno = int(input('Mostrar as notas de qual aluno? (999 interompe): '))
if qual_aluno == 999:
break
else:
print(f'As notas de {ficha[qual_aluno][0]} são {ficha[qual_aluno][1][0]} e {ficha[qual_aluno][1][1]}')
|
StarcoderdataPython
|
5009836
|
<filename>bus2pwl/bus2pwl.py
#!/usr/bin/python
# (C) <NAME> <<EMAIL>>
from __future__ import print_function, with_statement
import os
import re
import sys
from decimal import Decimal
# TODO:
# - allow comments in input file
# - passthru (header) comments
# - system info + datetime in output header
def usage():
print('''
Usage: python bus2pwl.py digitalinputs.bus
bus file format
===============
[one name=value parameter per line]
[space-separated column labels for voltage source names AND node names]
[one line per bit interval of 0 or 1 for each column, no spaces between]
Example .bus contents for testing an adder
==========================================
clockdelay=500p
clockrisefall = 100p
risefall=200p
bittime=1n
bitlow=0
bithigh=5
a3 a2 a1 a0 b3 b2 b1 b0
00000000
00010001
00010010
11111111
01011010
01011011
Include the generated file, which also includes the Voltage-source definitons
for the input nodes as:
.include "foo.pwl"
The "clockdelay=" parameter, if present, also generates a voltage source for a
clock as "Vclock clock 0 PWL ..." with a rising edge at every bittime with an
offset of clockdelay. Hence, set "clockdelay=" to the maximum setup time of
your registers and the data on each line will be clocked in at the right time.
Parameter "clockrisefall=" is optional to separately specify the clock rise/
fall time if it is different from the data lines rise/fall.
''')
def info(s):
print('INFO:', s)
def error(s):
print('ERROR:', s)
sys.exit(1)
def warn(s):
print('WARNING:', s)
def expand_bus_notation(names):
nodes = []
for n in names:
# parse into: name[left:right]suffix
name, lbrack, tail = n.partition('[')
left, colon, end = tail.partition(':')
right, rbrack, suffix = end.partition(']')
# only expand a complete bus notation
if lbrack and colon and rbrack:
try:
start = int(left)
stop = int(right)
except ValueError:
warn('Incomplete or non-integer range, passing thru: %s' % n)
nodes.append(n)
else:
inc = 1 if (stop > start) else -1
for i in range(start, (stop + inc), inc):
s = '%s[%i]%s' % (name, i, suffix)
nodes.append(s)
else: # pass-thru all others as-is
nodes.append(n)
return nodes
def generate_waveform(d):
t = Decimal('0.0')
#first bit interval starts at t=0, start from this value
lastbit = d[0]
bitv = Decimal(lastbit) * (bithigh - bitlow) + bitlow
s = '+ 0 %s' % str(bitv)
output(s)
trf = risefall
tb = bittime - risefall
t += trf + tb
for bit in d[1:]:
# only output a point when there is a change
if bit != lastbit:
ti = t + trf
tf = ti + tb
lastbitv = Decimal(lastbit) * (bithigh - bitlow) + bitlow
bitv = Decimal(bit) * (bithigh - bitlow) + bitlow
output('+ %s %s' % (str(t), str(lastbitv)))
output('+ %s %s' % (str(ti), str(bitv)))
#output('+ %s %s' % (str(tf), str(bitv)))
t += trf + tb
lastbit = bit
RE_UNIT = re.compile(r'^([0-9e\+\-\.]+)(t|g|meg|x|k|mil|m|u|n|p|f)?')
def unit(s):
"""Takes a string and returns the equivalent float.
'3.0u' -> 3.0e-6"""
mult = {'t' :Decimal('1.0e12'),
'g' :Decimal('1.0e9'),
'meg':Decimal('1.0e6'),
'x' :Decimal('1.0e6'),
'k' :Decimal('1.0e3'),
'mil':Decimal('25.4e-6'),
'm' :Decimal('1.0e-3'),
'u' :Decimal('1.0e-6'),
'n' :Decimal('1.0e-9'),
'p' :Decimal('1.0e-12'),
'f' :Decimal('1.0e-15')}
m = RE_UNIT.search(s.lower())
try:
if m.group(2):
return Decimal(Decimal(m.group(1)))*mult[m.group(2)]
else:
return Decimal(m.group(1))
except:
error("Bad unit: %s" % s)
def read_params(f):
"""Read name=value lines from the input file.
Validate agains required parameters.
Return dict of the pairs.
"""
requiredParams = ('risefall', 'bittime', 'bitlow', 'bithigh')
params = {'clockdelay':None, 'clockrisefall':None}
#get parameters
fposition = f.tell()
line = f.readline()
while '=' in line:
name, value = line.split('=')
name = name.strip()
value = value.strip()
params[name] = value
fposition = f.tell()
line = f.readline()
#fixup file position back to start of next line
f.seek(fposition)
#check
for p in requiredParams:
if p not in params:
error("%s is not specified, aborting." % p)
info('Parameters:')
for p,v in params.items():
info(' %s = %s' % (p, v))
return params
def parse_words(words):
"""Accepts a list of strings.
Returns a list of '1' or '0' strings.
"""
bits = []
for w in words:
if w.startswith('0x'):
n = 4 * (len(w) - 2)
w = bin(int(w[2:], 16))[2:].zfill(n)
elif w.startswith('0b'):
w = w[2:]
bits.extend([b for b in w])
return bits
def read_vectors(f, nodes):
"""Read the data vectors from the rest of the file.
"""
signals = {n:[] for n in nodes}
n_signals = len(nodes)
for line in f:
line = line.strip()
words = line.split()
bits = parse_words(words)
if len(bits) != n_signals:
error("Must have same # characters as column labels: %s" % line)
for i in range(n_signals):
signals[nodes[i]].append(bits[i])
return signals
def read_busfile(bus):
#read in the bus definition file
with open(bus) as f:
params = read_params(f)
#next line is column labels
line = f.readline()
names = [c.strip() for c in line.strip().split()]
nodes = expand_bus_notation(names)
params['nodes'] = nodes
info("Columns: %s" % nodes)
#read in signal vectors
signals = read_vectors(f, nodes)
params['signals'] = signals
return params
# python 2 vs 3 compatibility
try:
dict.iteritems
except AttributeError:
#this is python3
def iteritems(d):
return iter(d.items())
else:
def iteritems(d):
return d.iteritems()
if len(sys.argv) < 2:
usage()
sys.exit(1)
bus_name = sys.argv[1]
if not bus_name.endswith('.bus'):
usage()
print("Error: File must have a .bus extension")
sys.exit(1)
# read and parse input file
params = read_busfile(bus_name)
#get the numbers
risefall = unit(params['risefall'])
bittime = unit(params['bittime'])
bitlow = unit(params['bitlow'])
bithigh = unit(params['bithigh'])
#generate output file
pwl_name = bus_name.replace('.bus', '.pwl')
with open(pwl_name, 'w') as fpwl:
output = lambda s: print(s, file=fpwl)
#output clock definition if specified
if params['clockdelay']:
#calculate clock high time
if params['clockrisefall']:
clockrisefall = unit(params['clockrisefall'])
else:
clockrisefall = risefall
clockhigh = Decimal('0.5') * (bittime - clockrisefall)
clockperiod = bittime
params['clockrisefall'] = str(clockrisefall)
params['clockhigh'] = str(clockhigh)
params['clockperiod'] = str(clockperiod)
clk = 'Vclock clock 0 pulse(%(bitlow)s %(bithigh)s %(clockdelay)s %(clockrisefall)s %(clockrisefall)s %(clockhigh)s %(clockperiod)s)' % params
info(clk)
output(clk)
output('')
#output each input source
for name, signal in iteritems(params['signals']):
#first line
s = 'V%s %s 0 PWL' % (name, name)
info(s)
output(s)
generate_waveform(signal)
output('')
info('Output file: ' + pwl_name)
|
StarcoderdataPython
|
1755669
|
<filename>Chapter13_code/ch13_r02_restrict_access_to_web_accessible_paths/controllers/main.py
# -*- coding: utf-8 -*-
# © 2015 Therp BV <http://therp.nl>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import http
from openerp.http import request
class Main(http.Controller):
@http.route('/my_module/all-books', type='http', auth='none')
def all_books(self):
records = request.env['library.book'].sudo().search([])
result = '<html><body><table><tr><td>'
result += '</td></tr><tr><td>'.join(records.mapped('name'))
result += '</td></tr></table></body></html>'
return result
@http.route('/my_module/all-books/mark_mine', type='http', auth='public')
def all_books_mark_mine(self):
records = request.env['library.book'].sudo().search([])
result = '<html><body><table>'
for record in records:
result += '<tr>'
if record.author_ids & request.env.user.partner_id:
result += '<th>'
else:
result += '<td>'
result += record.name
if record.author_ids & request.env.user.partner_id:
result += '</th>'
else:
result += '</td>'
result += '</tr>'
result += '</table></body></html>'
return result
@http.route('/my_module/all-books/mine', type='http', auth='user')
def all_books_mine(self):
records = request.env['library.book'].search([
('author_ids', 'in', request.env.user.partner_id.ids),
])
result = '<html><body><table><tr><td>'
result += '</td></tr><tr><td>'.join(records.mapped('name'))
result += '</td></tr></table></body></html>'
return result
@http.route('/my_module/all-books/mine_base_group_user', type='http',
auth='base_group_user')
def all_books_mine_base_group_user(self):
return self.all_books_mine()
# this is for the exercise
@http.route('/my_module/all-books/mine_groups', type='http',
auth='groups(base.group_no_one)')
def all_books_mine_groups(self):
return self.all_books_mine()
|
StarcoderdataPython
|
3571054
|
# encoding: utf-8
from pkg_resources import resource_filename
from typing_extensions import Final
ISLE_DOWNLOAD_URL = "https://github.com/uiuc-sst/g2ps/tree/master/English/ISLEdict.txt"
DEFAULT_ISLE_DICT_PATH = resource_filename("pysle", "data/ISLEdict.txt")
class LengthOptions:
SHORTEST: Final = "shortest"
LONGEST: Final = "longest"
validOptions = [SHORTEST, LONGEST]
class ErrorReportingMode:
SILENCE: Final = "silence"
WARNING: Final = "warning"
ERROR: Final = "error"
validOptions = [SILENCE, WARNING, ERROR]
class AcceptabilityMode:
OK: Final = "ok"
ONLY: Final = "only"
NO: Final = "no"
validOptions = [OK, ONLY, NO]
|
StarcoderdataPython
|
1941343
|
<gh_stars>0
import hashlib
class Transaction:
def __init__(self, from_address, to_address, amount, timestamp):
self.from_address = from_address
self.to_address = to_address
self.amount = amount
self.timestamp = timestamp
def __repr__(self):
return (
f'Transaction('
f'from_address={self.from_address}, '
f'timestamp={self.to_address}, '
f'transactions={self.amount}, '
f'previous_hash={self.timestamp})'
)
|
StarcoderdataPython
|
221079
|
class Solution:
def isPowerOfTwo(self, n: int) -> bool:
return n and not (n & n - 1)
|
StarcoderdataPython
|
9788057
|
<gh_stars>1-10
from test import positions__offset_centre
from test_autolens.integration.tests.imaging.runner import run_a_mock
class TestCase:
def _test_positions__offset_centre(self):
run_a_mock(positions__offset_centre)
|
StarcoderdataPython
|
3415976
|
# snippet list generation
import os
# list of snippet files
snip_list = [x[:-3] for x in os.listdir (os.path.dirname (__file__))
if not x.startswith('_') and x.endswith('.py')]
snip_list.sort()
# function used by some or all snippets
def snippet_normalize (ctx, width, height):
ctx.scale (width, height)
ctx.set_line_width (0.04)
|
StarcoderdataPython
|
8006264
|
<filename>setup.py
from setuptools import setup
setup(
name='deep-sentiment',
version='0.1.0',
packages=['sentiment'],
url='https://github.com/lanPN85/deep-sentiment',
license='MIT',
author='<NAME>, <NAME>, <NAME>',
author_email='<EMAIL>',
description='LSTM-CNN sentiment analysis library',
install_requires=[
'keras==2.0.6', 'tensorflow>=1.2.1',
'gensim==2.2.0', 'nltk>=3.2.4', 'h5py>=2.7.0'
]
)
|
StarcoderdataPython
|
8130638
|
<filename>hydra/_internal/core_plugins/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .bash_completion import BashCompletion
from .basic_launcher import BasicLauncher
from .basic_sweeper import BasicSweeper
__all__ = ["BasicLauncher", "BashCompletion", "BasicSweeper"]
|
StarcoderdataPython
|
4918479
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# libthumbor - python extension to thumbor
# http://github.com/heynemann/libthumbor
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 <NAME> <EMAIL>
'''Encrypted URLs for thumbor encryption.'''
from __future__ import absolute_import
import base64
import hmac
import hashlib
from six import text_type, b, PY3
from libthumbor.url import unsafe_url, plain_image_url
class CryptoURL(object):
'''Class responsible for generating encrypted URLs for thumbor'''
def __init__(self, key):
'''
Initializes the encryptor with the proper key
:param key: secret key to use for hashing.
:param thread_safe: if True (default) CryptoURL will not reuse the hmac instance on every generate call,
instead a copy of the hmac object will be created. Consider setting this parameter to False when only one
thread has access to the CryptoURL object at a time.
'''
if isinstance(key, text_type):
key = str(key)
self.key = key
self.computed_key = (key * 16)[:16]
self.hmac = hmac.new(b(key), digestmod=hashlib.sha1)
def generate_new(self, options):
url = plain_image_url(**options)
_hmac = self.hmac.copy()
_hmac.update(text_type(url).encode('utf-8'))
signature = base64.urlsafe_b64encode(_hmac.digest())
if PY3:
signature = signature.decode('ascii')
return '/%s/%s' % (signature, url)
def generate(self, **options):
'''Generates an encrypted URL with the specified options'''
if options.get('unsafe', False):
return unsafe_url(**options)
else:
return self.generate_new(options)
|
StarcoderdataPython
|
11358081
|
<gh_stars>1-10
import CSDGAN.utils.db as db
import CSDGAN.utils.constants as cs
import utils.image_utils as iu
import utils.utils as uu
from CSDGAN.classes.image.ImageDataset import OnlineGeneratedImageDataset
from CSDGAN.classes.image.ImageNetD import ImageNetD
from CSDGAN.classes.image.ImageNetG import ImageNetG
from CSDGAN.classes.image.ImageNetE import ImageNetE
from CSDGAN.classes.NetUtils import GaussianNoise
from CSDGAN.classes.CGANUtils import CGANUtils
import time
from torch.utils import data
import imageio
import copy
import os
import numpy as np
import torch
import matplotlib.pyplot as plt
import torchvision.utils as vutils
class ImageCGAN(CGANUtils):
"""CGAN for image-based data sets"""
def __init__(self, train_gen, val_gen, test_gen, device, nc, nz, num_channels, sched_netG, path, le, ohe,
label_noise, label_noise_linear_anneal, discrim_noise, discrim_noise_linear_anneal,
netG_nf, netG_lr, netG_beta1, netG_beta2, netG_wd,
netD_nf, netD_lr, netD_beta1, netD_beta2, netD_wd,
netE_lr, netE_beta1, netE_beta2, netE_wd,
fake_data_set_size, fake_bs,
eval_num_epochs, early_stopping_patience, grid_num_examples=10):
super().__init__()
self.path = path # default file path for saved objects
self.init_paths()
# Data generator
self.train_gen = train_gen
self.data_gen = self.train_gen # For drawing architectures only
self.val_gen = val_gen
self.test_gen = test_gen
# Initialize properties
self.device = device
self.x_dim = self.extract_x_dim()
self.nc = nc
self.nz = nz
self.num_channels = num_channels
self.le = le
self.ohe = ohe
self.grid_num_examples = grid_num_examples
# Anti-discriminator properties
assert 0.0 <= label_noise <= 1.0, "Label noise must be between 0 and 1"
self.label_noise = label_noise
self.label_noise_linear_anneal = label_noise_linear_anneal
self.ln_rate = 0.0
self.discrim_noise = discrim_noise
self.discrim_noise_linear_anneal = discrim_noise_linear_anneal
self.dn_rate = 0.0
# Evaluator properties
self.fake_shuffle = True
self.fake_num_workers = 6
self.fake_data_set_size = fake_data_set_size
self.fake_bs = fake_bs
self.netE_params = {'lr': netE_lr, 'beta1': netE_beta1, 'beta2': netE_beta2, 'wd': netE_wd}
self.eval_num_epochs = eval_num_epochs
self.early_stopping_patience = early_stopping_patience
# Initialized through init_fake_gen method
self.fake_train_set = None
self.fake_train_gen = None
self.fake_val_set = None
self.fake_val_gen = None
# Instantiate sub-nets
self.netG = ImageNetG(nz=self.nz, num_channels=self.num_channels, nf=netG_nf, x_dim=self.x_dim, nc=self.nc, device=self.device, path=self.path,
grid_num_examples=self.grid_num_examples, lr=netG_lr, beta1=netG_beta1, beta2=netG_beta2, wd=netG_wd).to(self.device)
self.netD = ImageNetD(nf=netD_nf, num_channels=self.num_channels, nc=self.nc, noise=self.discrim_noise, device=self.device, x_dim=self.x_dim,
path=self.path, lr=netD_lr, beta1=netD_beta1, beta2=netD_beta2, wd=netD_wd).to(self.device)
self.netE = None # Initialized through init_evaluator method
self.nets = {self.netG, self.netD, self.netE}
# Training properties
self.epoch = 0
self.sched_netG = sched_netG
self.real_label = 1
self.fake_label = 0
self.stored_loss = []
self.stored_acc = []
self.fixed_imgs = [self.gen_fixed_img_grid()]
def train_gan(self, num_epochs, print_freq, eval_freq=None, run_id=None, logger=None, retrain=False):
"""
Primary method for training
:param num_epochs: Desired number of epochs to train for
:param print_freq: How frequently to print out training statistics (i.e., freq of 5 will result in information being printed every 5 epochs)
:param eval_freq: How frequently to evaluate with netE. If None, no evaluation will occur. Evaluation takes a significant amount of time.
:param run_id: If not None, will update database as it progresses through training in quarter increments.
:param logger: Logger to be used for logging training progress. Must exist if run_id is not None.
:param retrain: Whether model is being retrained
"""
assert logger if run_id else True, "Must pass a logger if run_id is passed"
total_epochs = self.epoch + num_epochs
if run_id:
checkpoints = [int(num_epochs * i / 4) for i in range(1, 4)]
if self.label_noise_linear_anneal:
self.ln_rate = self.label_noise / num_epochs
if self.discrim_noise_linear_anneal:
self.dn_rate = self.discrim_noise / num_epochs
uu.train_log_print(run_id=run_id, logger=logger, statement="Beginning training")
og_start_time = time.time()
start_time = time.time()
for epoch in range(num_epochs):
for x, y in self.train_gen:
y = torch.eye(self.nc)[y] if len(y.shape) == 1 else y
x, y = x.to(self.device), y.to(self.device)
self.train_one_step(x, y)
self.next_epoch()
if self.epoch % print_freq == 0 or (self.epoch == num_epochs):
uu.train_log_print(run_id=run_id, logger=logger, statement="Time: %ds" % (time.time() - start_time))
start_time = time.time()
self.print_progress(total_epochs=total_epochs, run_id=run_id, logger=logger)
if eval_freq is not None:
if self.epoch % eval_freq == 0 or (self.epoch == num_epochs):
self.init_fake_gen()
self.test_model(train_gen=self.fake_train_gen, val_gen=self.fake_val_gen)
uu.train_log_print(run_id=run_id, logger=logger, statement="Epoch: %d\tEvaluator Score: %.4f" % (self.epoch, np.max(self.stored_acc[-1])))
if run_id:
if self.epoch in checkpoints:
db.query_verify_live_run(run_id=run_id)
logger.info('Checkpoint reached.')
status_id = 'Train ' + str(checkpoints.index(self.epoch) + 1) + '/4'
status_id = status_id.replace('Train', 'Retrain') if retrain else status_id
db.query_set_status(run_id=run_id, status_id=cs.STATUS_DICT[status_id])
uu.train_log_print(run_id=run_id, logger=logger, statement="Total training time: %ds" % (time.time() - og_start_time))
uu.train_log_print(run_id=run_id, logger=logger, statement="Training complete")
def test_model(self, train_gen, val_gen):
"""
Train a CNN evaluator from scratch
:param train_gen: Specified train_gen, can either be real training generator or a created one from netG
:param val_gen: Same as above ^
"""
self.init_evaluator(train_gen, val_gen)
self.netE.train_evaluator(num_epochs=self.eval_num_epochs, eval_freq=1, real=False, es=self.early_stopping_patience)
torch.save(self.netG.state_dict(), self.path + "/stored_generators/Epoch_" + str(self.epoch) + "_Generator.pt")
loss, acc = self.netE.eval_once_real(self.test_gen)
self.stored_loss.append(loss.item())
self.stored_acc.append(acc.item())
def next_epoch(self):
"""Run netG and netD methods to prepare for next epoch. Mostly saves histories and resets history collection objects."""
self.epoch += 1
self.fixed_imgs.append(self.gen_fixed_img_grid())
self.netG.next_epoch()
self.netG.next_epoch_gen()
self.netD.next_epoch()
self.netD.next_epoch_discrim()
# Anneal noise rates
self.label_noise -= self.ln_rate
self.discrim_noise -= self.dn_rate
self.netD.noise = GaussianNoise(device=self.device, sigma=self.discrim_noise)
def init_evaluator(self, train_gen, val_gen):
"""
Initialize the netE sub-net. This is done as a separate method because we want to reinitialize netE each time we want to evaluate it.
We can also evaluate on the original, real data by specifying these training generators.
"""
self.netE = ImageNetE(train_gen=train_gen, val_gen=val_gen, test_gen=self.test_gen, device=self.device, x_dim=self.x_dim, le=self.le,
num_channels=self.num_channels, nc=self.nc, path=self.path, **self.netE_params).to(self.device)
self.nets = {self.netG, self.netD, self.netE}
def init_fake_gen(self):
# Initialize fake training set and validation set to be same size
self.fake_train_set = OnlineGeneratedImageDataset(netG=self.netG, size=self.fake_data_set_size, nz=self.nz, nc=self.nc, bs=self.fake_bs,
ohe=self.ohe, device=self.device, x_dim=self.x_dim)
self.fake_train_gen = data.DataLoader(self.fake_train_set, batch_size=self.fake_bs,
shuffle=self.fake_shuffle, num_workers=self.fake_num_workers)
self.fake_val_set = OnlineGeneratedImageDataset(netG=self.netG, size=self.fake_data_set_size, nz=self.nz, nc=self.nc, bs=self.fake_bs,
ohe=self.ohe, device=self.device, x_dim=self.x_dim)
self.fake_val_gen = data.DataLoader(self.fake_val_set, batch_size=self.fake_bs,
shuffle=self.fake_shuffle, num_workers=self.fake_num_workers)
def eval_on_real_data(self, num_epochs, train_gen=None, val_gen=None, test_gen=None, es=None):
"""
Evaluate the CGAN Evaluator Network on real examples
:param num_epochs: Number of epochs to train for
:param train_gen: PyTorch generator
:param val_gen: PyTorch generator
:param test_gen: PyTorch generator
:param es: Early-stopping patience. If None, early-stopping is not utilized.
:return: Accuracy of evaluation on CGAN's testing data
"""
if train_gen is None:
train_gen = self.train_gen
if val_gen is None:
val_gen = self.val_gen
if test_gen is None:
test_gen = self.test_gen
self.init_evaluator(train_gen, val_gen)
self.netE.train_evaluator(num_epochs=num_epochs, eval_freq=1, real=True, es=es)
_, og_result = self.netE.eval_once_real(test_gen)
og_result = og_result.numpy().take(0)
return og_result, copy.copy(self.netE)
def show_img(self, label):
"""Generate an image based on the desired class label index (integer 0-9)"""
assert label in self.le.classes_, "Make sure label is a valid class"
label = self.le.transform([label])[0]
label = torch.full((1, 1), label, dtype=torch.int64)
noise = torch.randn(1, self.nz, device=self.device)
processed_label = torch.zeros([1, self.nc], dtype=torch.uint8, device='cpu')
processed_label = processed_label.scatter(1, label, 1).float().to(self.device)
self.netG.eval()
with torch.no_grad():
output = self.netG(noise, processed_label).view(self.num_channels, self.x_dim[0], self.x_dim[1]).detach().cpu()
plt.imshow(output.permute(1, 2, 0))
plt.show()
def gen_fixed_img_grid(self):
"""
Produce a grid of generated images from netG's fixed noise vector. This can be used to visually track progress of the CGAN training.
:return: Tensor of images
"""
self.netG.eval()
with torch.no_grad():
fixed_imgs = self.netG(self.netG.fixed_noise, self.netG.fixed_labels)
return vutils.make_grid(tensor=fixed_imgs, nrow=self.grid_num_examples, normalize=True).detach().cpu()
def get_grid(self, index=-1, labels=None, num_examples=None):
"""Same as show_grid, but produces the specific grid (helper function)"""
# Check inputs
assert len(self.fixed_imgs) > 0, 'Model not yet trained'
if num_examples is None:
num_examples = self.grid_num_examples
assert num_examples <= self.grid_num_examples, 'Num examples must be less than or equal to ' + str(self.grid_num_examples)
if labels is None:
labels = self.le.classes_
# Instantiate output object
og_img = self.fixed_imgs[index]
new_img = torch.zeros([og_img.shape[0], len(labels) * self.x_dim[0] + 2 * (1 + len(labels)), num_examples * self.x_dim[1] + 2 * (1 + num_examples)],
dtype=torch.float32)
# Fill in new_img with relevant parts of og_img
for i, label in enumerate(labels):
for j in range(num_examples):
start_loc = np.where(label == self.le.classes_)[0][0]
new_img[:, i * self.x_dim[0] + 2 * (1 + i):(1 + i) * self.x_dim[0] + 2 * (2 + i), j * self.x_dim[1] + 2 * (1 + j):(1 + j) * self.x_dim[1] + 2 * (2 + j)] = \
og_img[:, start_loc * self.x_dim[0] + 2 * (1 + start_loc):(1 + start_loc) * self.x_dim[0] + 2 * (2 + start_loc),
j * self.x_dim[1] + 2 * (1 + j):(1 + j) * self.x_dim[1] + 2 * (2 + j)]
return new_img
def show_grid(self, index=-1, labels=None, num_examples=None):
"""
Print a specified fixed image grid from the self.fixed_imgs list
:param index: Evaluation index to display
:param labels: Which categories to show grid for
:param num_examples: Number of examples of each category to include in grid
:return: Nothing. Displays the desired image instead.
"""
# Check inputs
assert len(self.fixed_imgs) > 0, 'Model not yet trained'
if num_examples is None:
num_examples = self.grid_num_examples
assert num_examples <= self.grid_num_examples, 'Num examples must be less than or equal to ' + str(self.grid_num_examples)
if labels is None:
labels = self.le.classes_
# Get img
new_img = self.get_grid(index=index, labels=labels, num_examples=num_examples)
# Show img
fig = plt.figure(figsize=(8, 8))
plt.axis('off')
plt.imshow(np.transpose(new_img, (1, 2, 0)))
plt.show()
def build_gif(self, labels=None, num_examples=None, path=None, start=0, stop=None, freq=1, fps=5, final_img_frames=20):
"""
Loop through self.fixed_imgs and saves the images to a folder.
:param labels: List of which labels to produce. Defaults to all.
:param num_examples: Number of each label to produce. Defaults to self.grid_num_examples (10 generally).
:param path: Path to folder to save images. Folder will be created if it does not already exist.
:param start: Epoch to start gif on. Default 0.
:param stop: Epoch to end gif on. Default self.epoch (number of epochs trained so far).
:param freq: Interval of skipping epochs. Defaults to 1 (no skipping).
:param fps: Number of frames to display per second in gif. Defaults to 5.
:param final_img_frames: Number of times to repeat final image of gif before it will restart. Defaults to 20 (4 seconds with 5 fps).
:return: GIF (large file size, be careful!).
"""
# Check inputs
assert len(self.fixed_imgs) > 0, 'Model not yet trained'
if num_examples is None:
num_examples = self.grid_num_examples
assert num_examples <= self.grid_num_examples, 'Num examples must be less than or equal to ' + str(self.grid_num_examples)
if labels is None:
labels = self.le.classes_
if path is None:
path = self.path
if stop is None:
stop = self.epoch
os.makedirs(os.path.join(path, "imgs"), exist_ok=True)
ims = []
for epoch in range(start, stop + freq, freq):
fig = plt.figure(figsize=(8, 8))
plt.axis('off')
plt.suptitle('Epoch ' + str(epoch))
grid = self.get_grid(index=epoch, labels=labels, num_examples=num_examples)
plt.imshow(np.transpose(grid, (1, 2, 0)))
img_name = os.path.join(path, 'imgs', 'Epoch ' + str(epoch) + '.png')
plt.savefig(img_name)
ims.append(imageio.imread(img_name))
plt.close()
if epoch == (stop + freq): # Hacky method to stay on the final frame for longer
for i in range(final_img_frames):
ims.append(imageio.imread(img_name))
plt.close()
imageio.mimsave(os.path.join(path, 'generation_animation.gif'), ims, fps=fps)
def run_all_diagnostics(self, real_netE, benchmark_acc, show=False, save=None):
"""
Run all diagnostic methods
:param real_netE: netE trained on real data
:param benchmark_acc: Best score obtained from training Evaluator on real data
:param show: Whether to display the plots as well
:param save: Where to save the plot. If set to None, default path is used.
"""
if save is None:
save = self.path
self.plot_progress(benchmark_acc=benchmark_acc, show=show, save=save)
self.build_gif(path=save)
self.netG.build_hist_gif(path=save)
self.netD.build_hist_gif(path=save)
self.plot_training_plots(show=show, save=save)
self.netG.plot_layer_scatters(show=show, save=save)
self.netD.plot_layer_scatters(show=show, save=save)
self.netG.plot_layer_hists(show=show, save=save)
self.netD.plot_layer_hists(show=show, save=save)
self.troubleshoot_discriminator(show=show, save=save)
self.troubleshoot_evaluator(real_netE=real_netE, show=show, save=save)
cm_gen, cr_gen = self.netE.classification_stats(title='CGAN', show=show, save=save)
print("\nCGAN Evaluator Network Classification Stats:\n")
print(cm_gen)
print("\n")
print(cr_gen)
cm_real, cr_real = real_netE.classification_stats(title='Real', show=show, save=save)
print("\nReal Data Evaluator Network Classification Stats:\n")
print(cm_real)
print("\n")
print(cr_real)
def plot_progress(self, benchmark_acc, show, save=None):
"""
Plot scores of each evaluation model across training of CGAN
:param benchmark_acc: Best score obtained from training Evaluator on real data
:param show: Whether to show the plot
:param save: Where to save the plot. If set to None default path is used. If false, not saved.
"""
if save is None:
save = self.path
length = len(self.stored_acc)
plt.bar(x=range(length), height=self.stored_acc, tick_label=np.linspace(self.epoch // length, self.epoch, length, dtype=np.int64))
plt.plot(np.linspace(0, length, length), np.full(length, benchmark_acc), linestyle='dashed', color='r')
plt.xlabel('Evaluation', fontweight='bold')
plt.ylabel('Accuracy (%)', fontweight='bold')
plt.title('Evaluation Over Training Evaluations', fontweight='bold')
if show:
plt.show()
if save:
assert os.path.exists(save), "Check that the desired save path exists."
plt.savefig(os.path.join(save, cs.FILENAME_PLOT_PROGRESS))
def troubleshoot_discriminator(self, labels=None, num_examples=None, exit_early_iters=1000, gen=None, show=True, save=None):
"""
Produce several nrow x nc grids of examples of interest for troubleshooting the model
1. Grid of generated examples discriminator labeled as fake.
2. Grid of generated examples discriminator labeled as real.
3. Grid of real examples discriminator labeled as fake.
4. Grid of real examples discriminator labeled as real.
:param labels: Which classes to generate examples for. Default is all.
:param num_examples: Number of examples of each class to generate examples for. Default is 10.
:param exit_early_iters: Number of iterations to exit after if not enough images are found for grids 1 and 2
:param gen: Generator to use for grids 3 and 4
:param show: Whether to show the plots
:param save: Where to save the plots. If set to None default path is used. If false, not saved.
"""
if save is None:
save = self.path
if gen is None:
gen = self.test_gen # More data exists
if num_examples is None:
num_examples = self.grid_num_examples
assert num_examples <= self.grid_num_examples, 'Num examples must be less than or equal to ' + str(self.grid_num_examples)
if labels is None:
labels = self.le.classes_
grid1, grid2 = self.build_grid1_and_grid2(labels=labels, num_examples=num_examples, exit_early_iters=exit_early_iters)
grid3, grid4 = self.build_grid3_and_grid4(labels=labels, num_examples=num_examples, gen=gen)
grid1 = vutils.make_grid(tensor=grid1, nrow=num_examples, normalize=True).detach().cpu()
grid2 = vutils.make_grid(tensor=grid2, nrow=num_examples, normalize=True).detach().cpu()
grid3 = vutils.make_grid(tensor=grid3, nrow=num_examples, normalize=True).detach().cpu()
grid4 = vutils.make_grid(tensor=grid4, nrow=num_examples, normalize=True).detach().cpu()
f, axes = plt.subplots(2, 2, figsize=(12, 12))
axes[0, 0].axis('off')
axes[0, 1].axis('off')
axes[1, 0].axis('off')
axes[1, 1].axis('off')
axes[0, 0].title.set_text("Fake examples labeled as fake")
axes[0, 1].title.set_text("Fake examples labeled as real")
axes[1, 0].title.set_text("Real examples labeled as fake")
axes[1, 1].title.set_text("Real examples labeled as real")
axes[0, 0].imshow(np.transpose(grid1, (1, 2, 0)))
axes[0, 1].imshow(np.transpose(grid2, (1, 2, 0)))
axes[1, 0].imshow(np.transpose(grid3, (1, 2, 0)))
axes[1, 1].imshow(np.transpose(grid4, (1, 2, 0)))
st = f.suptitle("Troubleshooting examples of discriminator outputs", fontweight='bold', fontsize=20)
f.tight_layout()
st.set_y(0.96)
f.subplots_adjust(top=0.9)
if show:
f.show()
if save:
assert os.path.exists(save), "Check that the desired save path exists."
os.makedirs(os.path.join(save, 'troubleshoot_plots'), exist_ok=True)
f.savefig(os.path.join(save, 'troubleshoot_plots', 'discriminator.png'))
def troubleshoot_evaluator(self, real_netE, labels=None, num_examples=None, show=True, save=None):
"""
Produce several nrow x nc grids of examples of interest for troubleshooting the model
5. Grid of real examples that the evaluator failed to identify correctly (separate plot).
6. Grid of what the evaluator THOUGHT each example in grid 5 should be.
7. Grid of misclassified examples by model trained on real data.
8. Grid of what the evaluator THOUGHT each example in grid 7 should be.
:param real_netE: A version of netE trained on real data, rather than synthetic data
:param labels: Which classes to generate examples for. Default is all.
:param num_examples: Number of examples of each class to generate examples for. Default is 10.
:param show: Whether to show the plots
:param save: Where to save the plots. If set to None default path is used. If false, not saved.
"""
if save is None:
save = self.path
if num_examples is None:
num_examples = self.grid_num_examples
assert num_examples <= self.grid_num_examples, 'Num examples must be less than or equal to ' + str(self.grid_num_examples)
if labels is None:
labels = self.le.classes_
grid5, grid6 = self.build_eval_grids(netE=self.netE, labels=labels, num_examples=num_examples)
grid7, grid8 = self.build_eval_grids(netE=real_netE, labels=labels, num_examples=num_examples)
grid5 = vutils.make_grid(tensor=grid5, nrow=num_examples, normalize=True).detach().cpu()
grid6 = vutils.make_grid(tensor=grid6, nrow=num_examples, normalize=True).detach().cpu()
grid7 = vutils.make_grid(tensor=grid7, nrow=num_examples, normalize=True).detach().cpu()
grid8 = vutils.make_grid(tensor=grid8, nrow=num_examples, normalize=True).detach().cpu()
f, axes = plt.subplots(2, 2, figsize=(12, 12))
axes[0, 0].axis('off')
axes[0, 1].axis('off')
axes[1, 0].axis('off')
axes[1, 1].axis('off')
axes[0, 0].title.set_text("CGAN Eval Mistakes")
axes[0, 1].title.set_text("CGAN Eval Intended")
axes[1, 0].title.set_text("Real Data Eval Mistakes")
axes[1, 1].title.set_text("Real Data Eval Intended")
axes[0, 0].imshow(np.transpose(grid5, (1, 2, 0)))
axes[0, 1].imshow(np.transpose(grid6, (1, 2, 0)))
axes[1, 0].imshow(np.transpose(grid7, (1, 2, 0)))
axes[1, 1].imshow(np.transpose(grid8, (1, 2, 0)))
st = f.suptitle("Troubleshooting examples of evaluator outputs", fontweight='bold', fontsize=20)
f.tight_layout()
st.set_y(0.96)
f.subplots_adjust(top=0.9)
if show:
f.show()
if save:
assert os.path.exists(save), "Check that the desired save path exists."
os.makedirs(os.path.join(save, 'troubleshoot_plots'), exist_ok=True)
f.savefig(os.path.join(save, 'troubleshoot_plots', 'evaluator.png'))
def build_grid1_and_grid2(self, labels=None, num_examples=None, exit_early_iters=1000):
"""Generate images and feeds them to discriminator in order to find num_examples of each class specified"""
if num_examples is None:
num_examples = self.grid_num_examples
assert num_examples <= self.grid_num_examples, 'Num examples must be less than or equal to ' + str(self.grid_num_examples)
if labels is None:
labels = self.le.classes_
self.netG.eval()
self.netD.eval()
bs = self.fake_bs # Seems to be a good number with training above.
grid1 = torch.zeros(num_examples * len(labels), self.num_channels, self.x_dim[0], self.x_dim[1])
grid2 = torch.zeros(num_examples * len(labels), self.num_channels, self.x_dim[0], self.x_dim[1])
grid1_counts = {} # Represents the number of each class acquired so far for this grid
grid2_counts = {}
for i in range(len(labels)):
grid1_counts[i] = 0
grid2_counts[i] = 0
count = 0
while not (all(x == num_examples for x in grid1_counts.values()) and all(x == num_examples for x in grid2_counts.values())) and count < exit_early_iters:
noise = torch.randn(bs, self.nz, device=self.device)
random_labels = iu.convert_y_to_one_hot(y=torch.from_numpy(np.random.choice(self.le.transform(labels), bs)), nc=self.nc).to(self.device).type(torch.float32)
with torch.no_grad():
fakes = self.netG(noise, random_labels)
fwd = self.netD(fakes, random_labels)
for i in range(len(labels)):
grid1_contenders = fakes[(random_labels[:, self.le.transform(labels)[i]] == 1) * (fwd[:, 0] < 0.5)]
grid2_contenders = fakes[(random_labels[:, self.le.transform(labels)[i]] == 1) * (fwd[:, 0] > 0.5)]
grid1_retain = min(num_examples - grid1_counts[i], len(grid1_contenders))
grid2_retain = min(num_examples - grid2_counts[i], len(grid2_contenders))
grid1[(i * num_examples) + grid1_counts[i]:(i * num_examples) + grid1_counts[i] + grid1_retain] = grid1_contenders[:grid1_retain]
grid2[(i * num_examples) + grid2_counts[i]:(i * num_examples) + grid2_counts[i] + grid2_retain] = grid2_contenders[:grid2_retain]
grid1_counts[i] += grid1_retain
grid2_counts[i] += grid2_retain
count += 1
return grid1, grid2
def build_grid3_and_grid4(self, gen, labels=None, num_examples=None):
"""
Feed real images to discriminator in order to find num_examples of each specified class labeled as fake
Runs one full epoch over training data
"""
if num_examples is None:
num_examples = self.grid_num_examples
assert num_examples <= self.grid_num_examples, 'Num examples must be less than or equal to ' + str(self.grid_num_examples)
if labels is None:
labels = self.le.classes_
self.netD.eval()
grid3 = torch.zeros(num_examples * len(labels), self.num_channels, self.x_dim[0], self.x_dim[1])
grid4 = torch.zeros(num_examples * len(labels), self.num_channels, self.x_dim[0], self.x_dim[1])
grid3_counts = {} # Represents the number of each class acquired so far for this grid
grid4_counts = {}
for i in range(len(labels)):
grid3_counts[i] = 0
grid4_counts[i] = 0
for x, y in gen:
x, y = x.to(self.device), y.type(torch.float32).to(self.device)
y = iu.convert_y_to_one_hot(y=y.type(torch.LongTensor).cpu().detach(), nc=self.nc).to(self.device).type(torch.float32)
with torch.no_grad():
fwd = self.netD(x, y)
for i in range(len(labels)):
grid3_contenders = x[(y[:, self.le.transform(labels)[i]] == 1) * (fwd[:, 0] < 0.5)]
grid4_contenders = x[(y[:, self.le.transform(labels)[i]] == 1) * (fwd[:, 0] > 0.5)]
grid3_retain = min(num_examples - grid3_counts[i], len(grid3_contenders))
grid4_retain = min(num_examples - grid4_counts[i], len(grid4_contenders))
grid3[(i * num_examples) + grid3_counts[i]:(i * num_examples) + grid3_counts[i] + grid3_retain] = grid3_contenders[:grid3_retain]
grid4[(i * num_examples) + grid4_counts[i]:(i * num_examples) + grid4_counts[i] + grid4_retain] = grid4_contenders[:grid4_retain]
grid3_counts[i] += grid3_retain
grid4_counts[i] += grid4_retain
# Exit early if grid filled up
if all(x == num_examples for x in grid3_counts.values()) and all(x == num_examples for x in grid4_counts.values()):
return grid3, grid4
return grid3, grid4
def build_eval_grids(self, netE, labels=None, num_examples=None):
"""Construct grids 5-8 for troubleshoot_evaluator method"""
if num_examples is None:
num_examples = self.grid_num_examples
assert num_examples <= self.grid_num_examples, 'Num examples must be less than or equal to ' + str(self.grid_num_examples)
if labels is None:
labels = self.le.classes_
netE.eval()
grid1 = torch.zeros(num_examples * len(labels), self.num_channels, self.x_dim[0], self.x_dim[1])
grid2 = torch.zeros(num_examples * len(labels), self.num_channels, self.x_dim[0], self.x_dim[1])
grid1_counts = {} # Represents the number of each class acquired so far for this grid
for i in range(num_examples):
grid1_counts[i] = 0
for x, y in self.test_gen:
x, y = x.to(self.device), y.type(torch.float32).to(self.device)
y = iu.convert_y_to_one_hot(y=y.type(torch.LongTensor).cpu().detach(), nc=self.nc).to(self.device).type(torch.float32)
with torch.no_grad():
fwd = netE(x)
for i in range(len(labels)):
grid1_contenders = x[(torch.argmax(y, -1) != torch.argmax(fwd, -1)) * (torch.argmax(y, -1) == self.le.transform(labels)[i])]
if len(grid1_contenders) > 0:
grid1_intended = torch.argmax(fwd[(torch.argmax(y, -1) != torch.argmax(fwd, -1)) * (torch.argmax(y, -1) == self.le.transform(labels)[i])], -1)
grid2_contenders = torch.zeros(0, self.num_channels, self.x_dim[0], self.x_dim[1]).to(self.device)
for mistake in grid1_intended:
img = self.find_particular_img(gen=self.train_gen, net=None, mistake=None, label=self.le.inverse_transform([mistake.item()])[0])
if img is None:
img = torch.zeros((1, self.num_channels, self.x_dim[0], self.x_dim[1]), dtype=torch.float32).to(self.device)
else:
img = img.view(-1, self.num_channels, self.x_dim[0], self.x_dim[1])
grid2_contenders = torch.cat((grid2_contenders, img), dim=0)
grid1_retain = min(num_examples - grid1_counts[i], len(grid1_contenders))
grid1[(i * num_examples) + grid1_counts[i]:(i * num_examples) + grid1_counts[i] + grid1_retain] = grid1_contenders[:grid1_retain]
grid2[(i * num_examples) + grid1_counts[i]:(i * num_examples) + grid1_counts[i] + grid1_retain] = grid2_contenders[:grid1_retain]
grid1_counts[i] += grid1_retain
# Exit early if grid filled up
if all(x == num_examples for x in grid1_counts.values()):
return grid1, grid2
return grid1, grid2
def find_particular_img(self, gen, net, label, mistake, escape=999):
"""
Searches through the generator to find a single image of interest based on search parameters
:param gen: Generator to use. netG is a valid generator to use for fake data.
:param net: Network to use. Either netD or netE.
:param label: Label to return (0-9)
:param mistake: Whether the example should be a mistake (True or False)
:param escape: Error out if loop hits this number
:return: torch tensor of image (x_dim[0] x x_dim[1])
"""
assert gen in {self.train_gen, self.val_gen, self.test_gen, self.netG}, "Please use a valid generator (train/val/test/generator)"
assert (mistake is None) == (net is None), "Either both mistake and net must be None, or neither must be None"
assert net in {self.netD, self.netE, None}, "Please use a valid net (netD, netE, or None)"
assert mistake in {True, False, None}, "Mistake should be True, False, or None"
assert label in self.le.classes_, "Make sure label is a valid class"
label = self.le.transform([label]).take(0)
bs = self.fake_bs
if net is not None:
net.eval()
escape_counter = 0
while True: # Search until a match is found
escape_counter += 1
# Generate examples
if gen == self.netG:
noise = torch.randn(bs, self.nz, device=self.device)
y = iu.convert_y_to_one_hot(y=torch.full((bs, 1), label, dtype=torch.int64), nc=self.nc).to(self.device).type(torch.float32)
with torch.no_grad():
x = self.netG(noise, y)
else:
iterator = gen.__iter__()
x, y = next(iterator)
x, y = x.to(self.device), y.type(torch.float32).to(self.device)
boolz = y == label
x, y = x[boolz], y[boolz]
if len(x) > 0:
if mistake is None:
return x[0]
y = iu.convert_y_to_one_hot(y=y.type(torch.LongTensor).cpu().detach(), nc=self.nc).to(self.device).type(torch.float32)
with torch.no_grad():
if net == self.netD:
fwd = net(x, y)
else:
fwd = net(x)
# Check if conditions are met and exit, otherwise continue.
# netD and incorrect
fwd = fwd.reshape(-1)
if net == self.netD:
if mistake:
if gen == self.netG: # Incorrect means classifying as real
contenders = x[fwd > 0.5]
else:
contenders = x[fwd < 0.5]
# netD and correct
else:
if gen == self.netG: # Correct means classifying as fake
contenders = x[fwd < 0.5]
else:
contenders = x[fwd > 0.5]
# netE and incorrect
elif mistake:
contenders = x[torch.argmax(fwd, -1) != torch.argmax(y, -1)]
# netE and incorrect
else:
contenders = x[torch.argmax(fwd, -1) == torch.argmax(y, -1)]
# If 1 or more values returned, return that value and exit. Otherwise, continue.
if len(contenders) > 0:
return contenders[0]
if escape_counter == escape:
return None
def draw_cam(self, gen, net, label, mistake, show, path, scale=None, escape=999):
"""
Wrapper function for find_particular_img and draw_cam
:param gen: Generator to use. netG is a valid generator to use for fake data (otherwise data_gen).
:param net: Network to use. Either netD or netE.
:param label: Label to return
:param mistake: Whether the example should be a mistake (True or False)
:param show: Whether to show the image
:param path: Path to create image file. Needs full file name. Should end in .jpg
:param scale: Multiplier to scale image back to original values
:param escape: Error out if loop hits this number
"""
assert path.split(".")[-1] == "jpg", "Please make sure path ends in '.jpg'"
assert label in self.le.classes_, "Make sure label is a valid class"
if scale is None:
scale = 1 if self.num_channels > 1 else 255
img = self.find_particular_img(gen=gen, net=net, label=label, mistake=mistake, escape=escape)
assert img is not None, "Unable to find an image within the specified timeframe"
real = gen != self.netG
os.makedirs(os.path.dirname(path), exist_ok=True)
if net == self.netD:
label = self.le.transform([label])[0]
self.netD.draw_cam(img=img, label=label, path=path, scale=scale, show=show, real=real)
else:
self.netE.draw_cam(img=img, path=path, scale=scale, show=show, real=real)
def extract_x_dim(self):
iterator = iter(self.train_gen)
x, _, = next(iterator)
return x.shape[-2], x.shape[-1]
def gen_data(self, size, path, stratify=None, label=None):
"""Generates a data set formatted like the original data and saves to specified path"""
assert os.path.exists(path), "Output directory exists"
bs = min(self.fake_bs, size)
dataset = OnlineGeneratedImageDataset(netG=self.netG, size=size, nz=self.nz, nc=self.nc, bs=bs,
ohe=self.ohe, device=self.device, x_dim=self.x_dim, stratify=stratify)
gen = data.DataLoader(dataset, batch_size=bs,
shuffle=False, num_workers=self.fake_num_workers)
label = 'genned_img' if label is None else label
gen.dataset.next_epoch()
with torch.no_grad():
for i in range(gen.dataset.batches_per_epoch):
batch, labels = gen.dataset.next_batch()
for j, img in enumerate(batch):
vutils.save_image(img, os.path.join(path, label + '_' + str(i + j) + '.png'))
|
StarcoderdataPython
|
8071140
|
<gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['ScopedTokenArgs', 'ScopedToken']
@pulumi.input_type
class ScopedTokenArgs:
def __init__(__self__, *,
audiences: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
expires_in: Optional[pulumi.Input[int]] = None,
refreshable: Optional[pulumi.Input[bool]] = None,
scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
username: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ScopedToken resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] audiences: (Optional) A list of the other instances or services that should accept this token identified by their Service-IDs. Limited to total 255 characters. Default to '*@*' if not set. Service ID must begin with 'jfrt@'. For instructions to retrieve the Artifactory Service ID see this [documentation](https://www.jfrog.com/confluence/display/JFROG/Artifactory+REST+API#ArtifactoryRESTAPI-GetServiceID).
:param pulumi.Input[str] description: (Optional) Free text token description. Useful for filtering and managing tokens. Limited to 1024 characters.
:param pulumi.Input[int] expires_in: (Optional) The amount of time, in seconds, it would take for the token to expire. An admin shall be able to set whether expiry is mandatory, what is the default expiry, and what is the maximum expiry allowed. Must be non-negative. Default value is based on configuration in `access.config.yaml`. See [API documentation](https://www.jfrog.com/confluence/display/JFROG/Artifactory+REST+API#ArtifactoryRESTAPI-RevokeTokenbyIDrevoketokenbyid) for details.
:param pulumi.Input[bool] refreshable: (Optional) Is this token refreshable? Defaults to `false`
:param pulumi.Input[Sequence[pulumi.Input[str]]] scopes: (Optional) The scope of access that the token provides. Access to the REST API is always provided by default. Administrators can set any scope, while non-admin users can only set the scope to a subset of the groups to which they belong.
:param pulumi.Input[str] username: (Optional) The user name for which this token is created. The username is based on the authenticated user - either from the user of the authenticated token or based on the username (if basic auth was used). The username is then used to set the subject of the token: `<service-id>/users/<username>`. Limited to 255 characters.
"""
if audiences is not None:
pulumi.set(__self__, "audiences", audiences)
if description is not None:
pulumi.set(__self__, "description", description)
if expires_in is not None:
pulumi.set(__self__, "expires_in", expires_in)
if refreshable is not None:
pulumi.set(__self__, "refreshable", refreshable)
if scopes is not None:
pulumi.set(__self__, "scopes", scopes)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def audiences(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
(Optional) A list of the other instances or services that should accept this token identified by their Service-IDs. Limited to total 255 characters. Default to '*@*' if not set. Service ID must begin with 'jfrt@'. For instructions to retrieve the Artifactory Service ID see this [documentation](https://www.jfrog.com/confluence/display/JFROG/Artifactory+REST+API#ArtifactoryRESTAPI-GetServiceID).
"""
return pulumi.get(self, "audiences")
@audiences.setter
def audiences(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "audiences", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
(Optional) Free text token description. Useful for filtering and managing tokens. Limited to 1024 characters.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="expiresIn")
def expires_in(self) -> Optional[pulumi.Input[int]]:
"""
(Optional) The amount of time, in seconds, it would take for the token to expire. An admin shall be able to set whether expiry is mandatory, what is the default expiry, and what is the maximum expiry allowed. Must be non-negative. Default value is based on configuration in `access.config.yaml`. See [API documentation](https://www.jfrog.com/confluence/display/JFROG/Artifactory+REST+API#ArtifactoryRESTAPI-RevokeTokenbyIDrevoketokenbyid) for details.
"""
return pulumi.get(self, "expires_in")
@expires_in.setter
def expires_in(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "expires_in", value)
@property
@pulumi.getter
def refreshable(self) -> Optional[pulumi.Input[bool]]:
"""
(Optional) Is this token refreshable? Defaults to `false`
"""
return pulumi.get(self, "refreshable")
@refreshable.setter
def refreshable(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "refreshable", value)
@property
@pulumi.getter
def scopes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
(Optional) The scope of access that the token provides. Access to the REST API is always provided by default. Administrators can set any scope, while non-admin users can only set the scope to a subset of the groups to which they belong.
"""
return pulumi.get(self, "scopes")
@scopes.setter
def scopes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "scopes", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
"""
(Optional) The user name for which this token is created. The username is based on the authenticated user - either from the user of the authenticated token or based on the username (if basic auth was used). The username is then used to set the subject of the token: `<service-id>/users/<username>`. Limited to 255 characters.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
@pulumi.input_type
class _ScopedTokenState:
def __init__(__self__, *,
access_token: Optional[pulumi.Input[str]] = None,
audiences: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
expires_in: Optional[pulumi.Input[int]] = None,
expiry: Optional[pulumi.Input[int]] = None,
issued_at: Optional[pulumi.Input[int]] = None,
issuer: Optional[pulumi.Input[str]] = None,
refreshable: Optional[pulumi.Input[bool]] = None,
scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subject: Optional[pulumi.Input[str]] = None,
token_type: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ScopedToken resources.
:param pulumi.Input[str] access_token: Returns the access token to authenticate to Artifactory
:param pulumi.Input[Sequence[pulumi.Input[str]]] audiences: (Optional) A list of the other instances or services that should accept this token identified by their Service-IDs. Limited to total 255 characters. Default to '*@*' if not set. Service ID must begin with 'jfrt@'. For instructions to retrieve the Artifactory Service ID see this [documentation](https://www.jfrog.com/confluence/display/JFROG/Artifactory+REST+API#ArtifactoryRESTAPI-GetServiceID).
:param pulumi.Input[str] description: (Optional) Free text token description. Useful for filtering and managing tokens. Limited to 1024 characters.
:param pulumi.Input[int] expires_in: (Optional) The amount of time, in seconds, it would take for the token to expire. An admin shall be able to set whether expiry is mandatory, what is the default expiry, and what is the maximum expiry allowed. Must be non-negative. Default value is based on configuration in `access.config.yaml`. See [API documentation](https://www.jfrog.com/confluence/display/JFROG/Artifactory+REST+API#ArtifactoryRESTAPI-RevokeTokenbyIDrevoketokenbyid) for details.
:param pulumi.Input[int] expiry: Returns the token expiry
:param pulumi.Input[int] issued_at: Returns the token issued at date/time
:param pulumi.Input[str] issuer: Returns the token issuer
:param pulumi.Input[bool] refreshable: (Optional) Is this token refreshable? Defaults to `false`
:param pulumi.Input[Sequence[pulumi.Input[str]]] scopes: (Optional) The scope of access that the token provides. Access to the REST API is always provided by default. Administrators can set any scope, while non-admin users can only set the scope to a subset of the groups to which they belong.
:param pulumi.Input[str] subject: Returns the token type
:param pulumi.Input[str] token_type: Returns the token type
:param pulumi.Input[str] username: (Optional) The user name for which this token is created. The username is based on the authenticated user - either from the user of the authenticated token or based on the username (if basic auth was used). The username is then used to set the subject of the token: `<service-id>/users/<username>`. Limited to 255 characters.
"""
if access_token is not None:
pulumi.set(__self__, "access_token", access_token)
if audiences is not None:
pulumi.set(__self__, "audiences", audiences)
if description is not None:
pulumi.set(__self__, "description", description)
if expires_in is not None:
pulumi.set(__self__, "expires_in", expires_in)
if expiry is not None:
pulumi.set(__self__, "expiry", expiry)
if issued_at is not None:
pulumi.set(__self__, "issued_at", issued_at)
if issuer is not None:
pulumi.set(__self__, "issuer", issuer)
if refreshable is not None:
pulumi.set(__self__, "refreshable", refreshable)
if scopes is not None:
pulumi.set(__self__, "scopes", scopes)
if subject is not None:
pulumi.set(__self__, "subject", subject)
if token_type is not None:
pulumi.set(__self__, "token_type", token_type)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="accessToken")
def access_token(self) -> Optional[pulumi.Input[str]]:
"""
Returns the access token to authenticate to Artifactory
"""
return pulumi.get(self, "access_token")
@access_token.setter
def access_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_token", value)
@property
@pulumi.getter
def audiences(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
(Optional) A list of the other instances or services that should accept this token identified by their Service-IDs. Limited to total 255 characters. Default to '*@*' if not set. Service ID must begin with 'jfrt@'. For instructions to retrieve the Artifactory Service ID see this [documentation](https://www.jfrog.com/confluence/display/JFROG/Artifactory+REST+API#ArtifactoryRESTAPI-GetServiceID).
"""
return pulumi.get(self, "audiences")
@audiences.setter
def audiences(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "audiences", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
(Optional) Free text token description. Useful for filtering and managing tokens. Limited to 1024 characters.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="expiresIn")
def expires_in(self) -> Optional[pulumi.Input[int]]:
"""
(Optional) The amount of time, in seconds, it would take for the token to expire. An admin shall be able to set whether expiry is mandatory, what is the default expiry, and what is the maximum expiry allowed. Must be non-negative. Default value is based on configuration in `access.config.yaml`. See [API documentation](https://www.jfrog.com/confluence/display/JFROG/Artifactory+REST+API#ArtifactoryRESTAPI-RevokeTokenbyIDrevoketokenbyid) for details.
"""
return pulumi.get(self, "expires_in")
@expires_in.setter
def expires_in(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "expires_in", value)
@property
@pulumi.getter
def expiry(self) -> Optional[pulumi.Input[int]]:
"""
Returns the token expiry
"""
return pulumi.get(self, "expiry")
@expiry.setter
def expiry(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "expiry", value)
@property
@pulumi.getter(name="issuedAt")
def issued_at(self) -> Optional[pulumi.Input[int]]:
"""
Returns the token issued at date/time
"""
return pulumi.get(self, "issued_at")
@issued_at.setter
def issued_at(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "issued_at", value)
@property
@pulumi.getter
def issuer(self) -> Optional[pulumi.Input[str]]:
"""
Returns the token issuer
"""
return pulumi.get(self, "issuer")
@issuer.setter
def issuer(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "issuer", value)
@property
@pulumi.getter
def refreshable(self) -> Optional[pulumi.Input[bool]]:
"""
(Optional) Is this token refreshable? Defaults to `false`
"""
return pulumi.get(self, "refreshable")
@refreshable.setter
def refreshable(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "refreshable", value)
@property
@pulumi.getter
def scopes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
(Optional) The scope of access that the token provides. Access to the REST API is always provided by default. Administrators can set any scope, while non-admin users can only set the scope to a subset of the groups to which they belong.
"""
return pulumi.get(self, "scopes")
@scopes.setter
def scopes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "scopes", value)
@property
@pulumi.getter
def subject(self) -> Optional[pulumi.Input[str]]:
"""
Returns the token type
"""
return pulumi.get(self, "subject")
@subject.setter
def subject(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subject", value)
@property
@pulumi.getter(name="tokenType")
def token_type(self) -> Optional[pulumi.Input[str]]:
"""
Returns the token type
"""
return pulumi.get(self, "token_type")
@token_type.setter
def token_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "token_type", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
"""
(Optional) The user name for which this token is created. The username is based on the authenticated user - either from the user of the authenticated token or based on the username (if basic auth was used). The username is then used to set the subject of the token: `<service-id>/users/<username>`. Limited to 255 characters.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
class ScopedToken(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
audiences: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
expires_in: Optional[pulumi.Input[int]] = None,
refreshable: Optional[pulumi.Input[bool]] = None,
scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
username: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## Import
Artifactory **does not** retain scoped tokens and cannot be imported into state.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] audiences: (Optional) A list of the other instances or services that should accept this token identified by their Service-IDs. Limited to total 255 characters. Default to '*@*' if not set. Service ID must begin with 'jfrt@'. For instructions to retrieve the Artifactory Service ID see this [documentation](https://www.jfrog.com/confluence/display/JFROG/Artifactory+REST+API#ArtifactoryRESTAPI-GetServiceID).
:param pulumi.Input[str] description: (Optional) Free text token description. Useful for filtering and managing tokens. Limited to 1024 characters.
:param pulumi.Input[int] expires_in: (Optional) The amount of time, in seconds, it would take for the token to expire. An admin shall be able to set whether expiry is mandatory, what is the default expiry, and what is the maximum expiry allowed. Must be non-negative. Default value is based on configuration in `access.config.yaml`. See [API documentation](https://www.jfrog.com/confluence/display/JFROG/Artifactory+REST+API#ArtifactoryRESTAPI-RevokeTokenbyIDrevoketokenbyid) for details.
:param pulumi.Input[bool] refreshable: (Optional) Is this token refreshable? Defaults to `false`
:param pulumi.Input[Sequence[pulumi.Input[str]]] scopes: (Optional) The scope of access that the token provides. Access to the REST API is always provided by default. Administrators can set any scope, while non-admin users can only set the scope to a subset of the groups to which they belong.
:param pulumi.Input[str] username: (Optional) The user name for which this token is created. The username is based on the authenticated user - either from the user of the authenticated token or based on the username (if basic auth was used). The username is then used to set the subject of the token: `<service-id>/users/<username>`. Limited to 255 characters.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[ScopedTokenArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
Artifactory **does not** retain scoped tokens and cannot be imported into state.
:param str resource_name: The name of the resource.
:param ScopedTokenArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ScopedTokenArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
audiences: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
expires_in: Optional[pulumi.Input[int]] = None,
refreshable: Optional[pulumi.Input[bool]] = None,
scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
username: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ScopedTokenArgs.__new__(ScopedTokenArgs)
__props__.__dict__["audiences"] = audiences
__props__.__dict__["description"] = description
__props__.__dict__["expires_in"] = expires_in
__props__.__dict__["refreshable"] = refreshable
__props__.__dict__["scopes"] = scopes
__props__.__dict__["username"] = username
__props__.__dict__["access_token"] = None
__props__.__dict__["expiry"] = None
__props__.__dict__["issued_at"] = None
__props__.__dict__["issuer"] = None
__props__.__dict__["subject"] = None
__props__.__dict__["token_type"] = None
super(ScopedToken, __self__).__init__(
'artifactory:index/scopedToken:ScopedToken',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
access_token: Optional[pulumi.Input[str]] = None,
audiences: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
expires_in: Optional[pulumi.Input[int]] = None,
expiry: Optional[pulumi.Input[int]] = None,
issued_at: Optional[pulumi.Input[int]] = None,
issuer: Optional[pulumi.Input[str]] = None,
refreshable: Optional[pulumi.Input[bool]] = None,
scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subject: Optional[pulumi.Input[str]] = None,
token_type: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None) -> 'ScopedToken':
"""
Get an existing ScopedToken resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] access_token: Returns the access token to authenticate to Artifactory
:param pulumi.Input[Sequence[pulumi.Input[str]]] audiences: (Optional) A list of the other instances or services that should accept this token identified by their Service-IDs. Limited to total 255 characters. Default to '*@*' if not set. Service ID must begin with 'jfrt@'. For instructions to retrieve the Artifactory Service ID see this [documentation](https://www.jfrog.com/confluence/display/JFROG/Artifactory+REST+API#ArtifactoryRESTAPI-GetServiceID).
:param pulumi.Input[str] description: (Optional) Free text token description. Useful for filtering and managing tokens. Limited to 1024 characters.
:param pulumi.Input[int] expires_in: (Optional) The amount of time, in seconds, it would take for the token to expire. An admin shall be able to set whether expiry is mandatory, what is the default expiry, and what is the maximum expiry allowed. Must be non-negative. Default value is based on configuration in `access.config.yaml`. See [API documentation](https://www.jfrog.com/confluence/display/JFROG/Artifactory+REST+API#ArtifactoryRESTAPI-RevokeTokenbyIDrevoketokenbyid) for details.
:param pulumi.Input[int] expiry: Returns the token expiry
:param pulumi.Input[int] issued_at: Returns the token issued at date/time
:param pulumi.Input[str] issuer: Returns the token issuer
:param pulumi.Input[bool] refreshable: (Optional) Is this token refreshable? Defaults to `false`
:param pulumi.Input[Sequence[pulumi.Input[str]]] scopes: (Optional) The scope of access that the token provides. Access to the REST API is always provided by default. Administrators can set any scope, while non-admin users can only set the scope to a subset of the groups to which they belong.
:param pulumi.Input[str] subject: Returns the token type
:param pulumi.Input[str] token_type: Returns the token type
:param pulumi.Input[str] username: (Optional) The user name for which this token is created. The username is based on the authenticated user - either from the user of the authenticated token or based on the username (if basic auth was used). The username is then used to set the subject of the token: `<service-id>/users/<username>`. Limited to 255 characters.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ScopedTokenState.__new__(_ScopedTokenState)
__props__.__dict__["access_token"] = access_token
__props__.__dict__["audiences"] = audiences
__props__.__dict__["description"] = description
__props__.__dict__["expires_in"] = expires_in
__props__.__dict__["expiry"] = expiry
__props__.__dict__["issued_at"] = issued_at
__props__.__dict__["issuer"] = issuer
__props__.__dict__["refreshable"] = refreshable
__props__.__dict__["scopes"] = scopes
__props__.__dict__["subject"] = subject
__props__.__dict__["token_type"] = token_type
__props__.__dict__["username"] = username
return ScopedToken(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accessToken")
def access_token(self) -> pulumi.Output[str]:
"""
Returns the access token to authenticate to Artifactory
"""
return pulumi.get(self, "access_token")
@property
@pulumi.getter
def audiences(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
(Optional) A list of the other instances or services that should accept this token identified by their Service-IDs. Limited to total 255 characters. Default to '*@*' if not set. Service ID must begin with 'jfrt@'. For instructions to retrieve the Artifactory Service ID see this [documentation](https://www.jfrog.com/confluence/display/JFROG/Artifactory+REST+API#ArtifactoryRESTAPI-GetServiceID).
"""
return pulumi.get(self, "audiences")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
(Optional) Free text token description. Useful for filtering and managing tokens. Limited to 1024 characters.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="expiresIn")
def expires_in(self) -> pulumi.Output[int]:
"""
(Optional) The amount of time, in seconds, it would take for the token to expire. An admin shall be able to set whether expiry is mandatory, what is the default expiry, and what is the maximum expiry allowed. Must be non-negative. Default value is based on configuration in `access.config.yaml`. See [API documentation](https://www.jfrog.com/confluence/display/JFROG/Artifactory+REST+API#ArtifactoryRESTAPI-RevokeTokenbyIDrevoketokenbyid) for details.
"""
return pulumi.get(self, "expires_in")
@property
@pulumi.getter
def expiry(self) -> pulumi.Output[int]:
"""
Returns the token expiry
"""
return pulumi.get(self, "expiry")
@property
@pulumi.getter(name="issuedAt")
def issued_at(self) -> pulumi.Output[int]:
"""
Returns the token issued at date/time
"""
return pulumi.get(self, "issued_at")
@property
@pulumi.getter
def issuer(self) -> pulumi.Output[str]:
"""
Returns the token issuer
"""
return pulumi.get(self, "issuer")
@property
@pulumi.getter
def refreshable(self) -> pulumi.Output[Optional[bool]]:
"""
(Optional) Is this token refreshable? Defaults to `false`
"""
return pulumi.get(self, "refreshable")
@property
@pulumi.getter
def scopes(self) -> pulumi.Output[Sequence[str]]:
"""
(Optional) The scope of access that the token provides. Access to the REST API is always provided by default. Administrators can set any scope, while non-admin users can only set the scope to a subset of the groups to which they belong.
"""
return pulumi.get(self, "scopes")
@property
@pulumi.getter
def subject(self) -> pulumi.Output[str]:
"""
Returns the token type
"""
return pulumi.get(self, "subject")
@property
@pulumi.getter(name="tokenType")
def token_type(self) -> pulumi.Output[str]:
"""
Returns the token type
"""
return pulumi.get(self, "token_type")
@property
@pulumi.getter
def username(self) -> pulumi.Output[Optional[str]]:
"""
(Optional) The user name for which this token is created. The username is based on the authenticated user - either from the user of the authenticated token or based on the username (if basic auth was used). The username is then used to set the subject of the token: `<service-id>/users/<username>`. Limited to 255 characters.
"""
return pulumi.get(self, "username")
|
StarcoderdataPython
|
3317248
|
# -*- coding: utf-8 -*-
# Copyright CNRS 2012,
# <NAME> (LULI)
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os, os.path
import re
import gzip
import socket
import getpass
import six
import time
from datetime import datetime
from io import StringIO
from multiprocessing import Pool
import numpy as np
import tables
from jinja2 import Template
def parse_one_file(args):
"""
Read gnuplot frame and return it's content as ndarray
"""
path, xshape = args
# this fixes the bug with the exponent overflow (such as 1.23233-126)
with gzip.open(path, 'rt') as fo:
txt = fo.read()
txt = re.sub(r'(?<=\d)-(?=\d)', 'E-', txt)
f = StringIO(six.u(txt))
d = np.loadtxt(f)
#f.close()
del f
d = d.reshape((xshape[1], xshape[2],-1))
return d
p = Pool()
class ParseDued(object):
"""
Used for parsing dued gnuplot output files.
Contains following objects:
- d: ndarray containing data.
shape: (timesteps, i_shape, j_shape, number_of_variables
as specified in frm\d+.gpl.gz)
- t: [t_0, dt, number of timesteps]
"""
def __init__(self, folder, parallel=True, units='hedp', flash_comp=False):
"""
Parse the forder containing dued ouput.
Parameters:
-----------
- folder [str] : path to DUED simulation folder.
- parallel [bool] : use multithreading.
- units [str] : units to use in ['hedp', 'cgs']. flash_comp
forces cgs.
- flash_comp [str]: transform units and grid so it can be
more easily plotted besides a flash simulation.
"""
folder = os.path.abspath(folder)
self.sim_path = folder
self.data_path = os.path.join(os.path.abspath(folder),'out/gpl')
if units not in ['hedp', 'cgs']:
raise ValueError("'units' should be in ['hedp', 'cgs']")
self.units = units
self.flash_comp = flash_comp
if flash_comp:
self.units == "cgs"
if not os.path.exists(self.data_path):
print('Error: Path does not exists {0}'.format(self.data_path))
sys.exit(1)
self.output_name = os.path.split(folder)[1]
frames = sorted([os.path.join(self.data_path, el)\
for el in os.listdir(self.data_path) if re.match(r'frm\d+.gpl.gz', el)])
self._get_shape(frames)
if parallel:
_map = p.map
else:
_map = map
res = list(_map(parse_one_file,
zip(frames, [self.xshape for el in range(len(frames))])))
self.d = np.array(res)
# cropping fantom cells, not sure this is correct
self.d = self.d[:,1:-1,1:-1,:]
self._reshape_data()
print("{0} - Parsed {1:.3f} ns ({2} plot files)".format(
time.strftime("%H:%M:%S", time.gmtime()),
self.d[:,0,0,-1].max(),
self.d.shape[0]
))
def to_xdmf(self, filename=None):
"""
Save to XDMF
"""
if not filename:
filename = self.output_name
self._save_h5(filename, self.d)
self._generate_xml(filename, self.d)
print("{0} - XMDF file '{1}.xdmf', sucessfully created".format(
time.strftime("%H:%M:%S", time.gmtime()),
self.output_name,
))
def _get_shape(self, frames):
# read array shape from header if necessary
self.xshape = np.zeros(3, dtype=np.int)
self.xshape[0] = len(frames)
with gzip.open(frames[0],"rb") as f:
header = np.fromstring(f.readline()[7:], sep=' ')
self.xshape[1:] = header[1:3]
print("{0} - Expected parsing time: {1:.1f} s".format(
time.strftime("%H:%M:%S", time.gmtime()),
self.xshape.prod()*3e-5))
def _reshape_data(self):
"""
Transform DUED grid so it can be plotted side by side with FLASH output
"""
if self.flash_comp:
# change x and y axis
self.d[...,[2,3]] = -self.d[...,[3,2]]
self.d[...,[4,5]] = -self.d[...,[5,4]]
if self.units == 'cgs':
self.d[...,2:4] = self.d[...,2:4]*1e-4 # to μm
self.d[:,0,0,-1] = self.d[:,0,0,-1]*1e-9 # to ns
#self.d[...,6:9] = self.d[...,6:9]*11640.
#self.d[...,10:13] = self.d[...,10:13]*1e12
def _save_h5(self, filename, d):
"""
Save parsed dued output to hdf5
"""
#zlib_filter = tables.Filters(complevel=5,complib='zlib')
h5file = tables.openFile(filename+'.h5', mode = "w")#, filter=zlib_filter)
for key, val in dict(X=d[...,2],Y=d[...,3],Z=np.zeros(d[...,3].shape), vel=d[...,4:6]).items():
cgroup = h5file.createGroup(h5file.root, key)
for idx in range(d.shape[0]):
h5file.createArray(cgroup, 'frame_{0:04d}'.format(idx),
val[idx], "")
for key, val in dict(dens=16,tele=6,tion=7,trad=8, zbar=9,pres=10,pion=11,pele=12,eint=13,eion=14,
eele=15,Ne=17,Ni=18,densN=19, Mass=20).items():
cgroup = h5file.createGroup(h5file.root, key)
for idx in range(d.shape[0]):
h5file.createArray(cgroup, 'frame_{0:04d}'.format(idx),
d[idx,:-1,:-1,val], "")
dens0 = d[0,:-1,:-1,16]
targ = np.nan*np.ones(dens0.shape)
for idx, val in enumerate(np.unique(dens0)):
targ = np.where(dens0==val, idx, targ)
h5file.createArray('/', 'targ', targ)
with open(os.path.join(self.sim_path, 'dued.nml'), 'r') as f:
h5file.setNodeAttr('/', 'dued_namelist', f.read())
h5file.setNodeAttr('/', 'sim_path', self.sim_path)
h5file.setNodeAttr('/', 'hostname', socket.gethostname())
h5file.setNodeAttr('/', 'user', getpass.getuser())
h5file.setNodeAttr('/', 'date', str(datetime.now()))
h5file.setNodeAttr('/', 'shape', self.d[...,0].shape)
h5file.createArray('/', 'time', d[:,0,0,-1]*1e-9)
h5file.close()
def _generate_xml(self, filename, d):
"""
Generate XML description for the hdf5 file
"""
tmpl = Template("""<?xml version="1.0" ?>
<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd" []>
<Xdmf xmlns:xi="http://www.w3.org/2003/XInclude" Version="2.1">
<Domain>
<Grid CollectionType="Temporal" GridType="Collection">
{% for idx in range(d.shape[0]) %}
<Grid Name="Mesh" GridType="Uniform">
<Time Value="{{t[idx]}}" />
<Topology TopologyType="2DSMesh" Dimensions="{{d.shape[1]}} {{d.shape[2]}}"/>
<Geometry GeometryType="X_Y_Z">
<DataItem NumberType="Float" Precision="8" Dimensions="{{d.shape[1]}} {{d.shape[2]}}" Format="HDF">{{filename}}.h5:/X/frame_{{'%04d' % idx}}</DataItem>
<DataItem NumberType="Float" Precision="8" Dimensions="{{d.shape[1]}} {{d.shape[2]}}" Format="HDF">{{filename}}.h5:/Y/frame_{{'%04d' % idx}}</DataItem>
<DataItem NumberType="Float" Precision="8" Dimensions="{{d.shape[1]}} {{d.shape[2]}}" Format="HDF">{{filename}}.h5:/Z/frame_{{'%04d' % idx}}</DataItem>
</Geometry>
{% for el in var -%}
<Attribute Name="{{el.name}}" AttributeType="{{el.attr_type}}" Center="{{el.center}}">
<DataItem NumberType="Float" Precision="8" Dimensions="{% if not el.dim %}{{(d.shape[1]-1)}} {{(d.shape[2]-1)}}{% else %}{{(d.shape[1])}} {{(d.shape[2])}} 2{% endif %}" Format="HDF">{{filename}}.h5:/{{el.key}}/frame_{{'%04d' % idx}}</DataItem>
</Attribute>
{% endfor -%}
</Grid>
{% endfor %}
</Grid>
</Domain>
</Xdmf>
""")
var_dict = [dict(key=el[0],name=el[1], attr_type=el[2] and 'Vector' or 'Scalar',
dim=el[2], center= (el[0]=='vel') and 'Node' or 'Cell') for el in [
( 'dens' , 'dens' , 0 ) ,
( 'vel' , 'Velocity' , 1 ) ,
( 'tele' , 'tele' , 0 ) ,
( 'tion' , 'tion' , 0 ) ,
( 'trad' , 'trad' , 0 ) ,
( 'zbar' , 'zbar' , 0 ) ,
( 'pres' , 'pres' , 0 ) ,
( 'pion' , 'pion' , 0 ) ,
( 'pele' , 'pele' , 0 ) ,
( 'eint' , 'eint' , 0 ) ,
( 'eion' , 'eion' , 0 ) ,
( 'eele' , 'eele' , 0 ) ,
( 'Ne' , 'ne' , 0 ) ,
( 'Ni' , 'ni' , 0 ) ,
( 'densN' , 'dens normalised' , 0 ) ,
( 'Mass' , 'cell mass' , 0 ) ,
]]
# var name, var name long, (0:scalar, 1:vector)
with open(filename+'.xdmf','w') as f:
f.write(tmpl.render(d=d[...,0],
filename=filename,
var=var_dict,
t= d[:,0,0,-1]))
def call_from_cli():
import argparse
import sys
parser = argparse.ArgumentParser(description="""
This script allows to convert dued gnuplot output to XDMF format, readable by Visit.
Requires python2.7, numpy, jinja2 and PyTables
""")
parser.add_argument('folder', help='simulation folder')
parser.add_argument('-nt', '--nothreading', help='disable threading',
default=False, action='store_true')
parser.add_argument('-u', '--units', help="Choose units hedp (default) or cgs",
default='hedp', action='store')
parser.add_argument('-c', '--flashcomp', help="Make it easier to compare with FLASH",
default=False, action='store_true')
parser.add_argument('--version', action='version', version='%(prog)s 0.1.1')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
ParseDued(args.folder, parallel=(not args.nothreading), units=args.units, flash_comp=args.flashcomp).to_xdmf()
|
StarcoderdataPython
|
6696229
|
<filename>flask/app/site/routes/dataset_search.py<gh_stars>0
import flask
from flask import session
from smtplib import SMTP
from flask import Flask, Blueprint, render_template, request, Response, redirect, url_for, jsonify
from app.api.models import datasets, UserModel, _runSql
import numpy
module = Blueprint('dataset_search', __name__)
@module.route("/samples_grid", methods=['GET', 'POST'])
def samples_grid():
data = request.get_json()
dataset_id = data['dataset_id']
ds = datasets.Dataset(dataset_id)
sampleTable = ds.sampleTable()
jsonSampleTable = sampleTable.to_json(orient="split")
if session["loggedIn"] == True:
if jsonSampleTable != None:
return jsonSampleTable
else:
return {"Message": "No samples found for dataset_id: " + dataset_id}
else:
return "Access Denied"
@module.route("/atlas_samples_grid", methods=['GET', 'POST'])
def atlas_samples_grid():
data = request.get_json()
atlas_type = data['atlas_type']
atlas_model = atlas_type['atlas_model']
atlas_project = atlas_type['tierModel']
update_column = atlas_model + '_' + atlas_project
dataset_id = data['dataset_id']
ds = datasets.Dataset(dataset_id)
atlasSampleTable = ds.atlasSampleTable()
newdf = atlasSampleTable[['sample_id'] + [update_column] + ['annotator', 'evidence', 'phenotype', 'activation_status', 'display_metadata', 'include_blood', 'include_imac'] ] # construct a dataframe with the selected tier included in it.
include_blood = newdf['include_blood'].unique()
include_imac = newdf['include_imac'].unique()
if include_blood[0] == False:
print("drop all blood tier data")
# finaldf = atlasSampleTable[['sample_id'] + [update_column] + ['annotator', 'evidence', 'phenotype', 'activation_status', 'display_metadata', 'include_blood', 'include_imac'] ]
elif include_imac[0] == False:
print("drop all imac tier data")
elif include_blood[0] and include_imac[0] == False:
print("drop all tier data")
elif include_blood[0] and include_imac[0] == True:
print("include all tier data")
jsonSampleTable = newdf.to_json(orient="split")
if session["loggedIn"] == True:
return jsonSampleTable
else:
return "Access Denied"
|
StarcoderdataPython
|
9726851
|
<gh_stars>1-10
from conans import CMake, ConanFile, tools
from conans.errors import ConanInvalidConfiguration
import os
class Bullet3Conan(ConanFile):
name = "bullet3"
description = "Bullet Physics SDK: real-time collision detection and multi-physics simulation for VR, games, visual effects, robotics, machine learning etc."
homepage = "https://github.com/bulletphysics/bullet3"
topics = "conan", "bullet", "physics", "simulation", "robotics", "kinematics", "engine",
license = "ZLIB"
url = "https://github.com/conan-io/conan-center-index"
exports_sources = "CMakeLists.txt"
generators = "cmake"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"bullet3": [True, False],
"graphical_benchmark": [True, False],
"double_precision": [True, False],
"bt2_thread_locks": [True, False],
"soft_body_multi_body_dynamics_world": [True, False],
"network_support": [True, False],
"extras": [True, False]
}
default_options = {
"shared": False,
"fPIC": True,
"bullet3": False,
"graphical_benchmark": False,
"double_precision": False,
"bt2_thread_locks": False,
"soft_body_multi_body_dynamics_world": False,
"network_support": False,
"extras": False
}
_source_subfolder = "source_subfolder"
_cmake = None
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
if self.settings.compiler == "Visual Studio" and self.options.shared:
raise ConanInvalidConfiguration("Shared libraries on Visual Studio not supported")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename("bullet3-{}".format(self.version), self._source_subfolder)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["BUILD_BULLET3"] = self.options.bullet3
self._cmake.definitions["INSTALL_LIBS"] = True
self._cmake.definitions["USE_GRAPHICAL_BENCHMARK"] = self.options.graphical_benchmark
self._cmake.definitions["USE_DOUBLE_PRECISION"] = self.options.double_precision
self._cmake.definitions["BULLET2_USE_THREAD_LOCKS"] = self.options.bt2_thread_locks
self._cmake.definitions["USE_SOFT_BODY_MULTI_BODY_DYNAMICS_WORLD"] = self.options.soft_body_multi_body_dynamics_world
self._cmake.definitions["BUILD_ENET"] = self.options.network_support
self._cmake.definitions["BUILD_CLSOCKET"] = self.options.network_support
self._cmake.definitions["BUILD_CPU_DEMOS"] = False
self._cmake.definitions["BUILD_OPENGL3_DEMOS"] = False
self._cmake.definitions["BUILD_BULLET2_DEMOS"] = False
self._cmake.definitions["BUILD_EXTRAS"] = self.options.extras
self._cmake.definitions["BUILD_UNIT_TESTS"] = False
if self.settings.compiler == "Visual Studio":
self._cmake.definitions["USE_MSVC_RUNTIME_LIBRARY_DLL"] = "MD" in self.settings.compiler.runtime
self._cmake.configure()
return self._cmake
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
cmake = self._configure_cmake()
cmake.install()
self.copy("LICENSE.txt", src=os.path.join(self.source_folder, self._source_subfolder), dst="licenses")
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
def package_info(self):
libs = []
if self.options.bullet3:
libs += [
"Bullet2FileLoader",
"Bullet3Collision",
"Bullet3Dynamics",
"Bullet3Geometry",
"Bullet3OpenCL_clew",
]
libs += [
"BulletDynamics",
"BulletCollision",
"LinearMath",
"BulletSoftBody",
"Bullet3Common",
"BulletInverseDynamics",
]
if self.options.extras:
libs += [ "BulletInverseDynamicsUtils",
"BulletRobotics",
"BulletFileLoader",
"BulletXmlWorldImporter",
"BulletWorldImporter",
"ConvexDecomposition",
"HACD",
"GIMPACTUtils"
]
if self.settings.os == "Windows" and self.settings.build_type in ("Debug", "MinSizeRel", "RelWithDebInfo"):
libs = [lib + "_{}".format(self.settings.build_type) for lib in libs]
self.cpp_info.names["cmake_find_package"] = "Bullet"
self.cpp_info.names["cmake_find_package_multi"] = "Bullet"
self.cpp_info.names["pkg_config"] = "bullet"
self.cpp_info.libs = libs
self.cpp_info.includedirs = ["include", os.path.join("include", "bullet")]
if self.options.extras:
self.cpp_info.includedirs.append(os.path.join("include", "bullet_robotics"))
if self.options.double_precision:
self.cpp_info.defines.append("BT_USE_DOUBLE_PRECISION")
|
StarcoderdataPython
|
3587425
|
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
from jobs.models import Job
from django.urls import reverse_lazy
from .forms import JobForm
from django.shortcuts import redirect
from django.db.models import Q
class JobView(ListView):
model = Job
template_name = 'jobs.html'
ordering = ['-id']
def dispatch(self, *args, **kwargs):
if not self.request.user.is_authenticated:
return redirect('home')
return super(JobView, self).dispatch(*args, **kwargs)
class JobsDetailedView(DetailView):
model = Job
template_name = 'jobs_detailed.html'
def dispatch(self, *args, **kwargs):
if not self.request.user.is_authenticated:
return redirect('home')
return super(JobsDetailedView, self).dispatch(*args, **kwargs)
class AddJobsView(CreateView):
model = Job
form_class = JobForm
template_name = 'jobs_add.html'
def dispatch(self, *args, **kwargs):
if not self.request.user.is_authenticated:
return redirect('home')
if not self.request.user.role == 'HR':
return redirect('jobs')
return super(AddJobsView, self).dispatch(*args, **kwargs)
class UpdateJobsView(UpdateView):
model = Job
form_class = JobForm
template_name = 'jobs_edit.html'
def dispatch(self, *args, **kwargs):
if not self.request.user.is_authenticated:
return redirect('home')
if not self.request.user.role == 'HR':
return redirect('jobs')
return super(UpdateJobsView, self).dispatch(*args, **kwargs)
class DeleteJobsView(DeleteView):
model = Job
template_name = 'jobs_delete.html'
success_url = reverse_lazy('jobs')
def dispatch(self, *args, **kwargs):
if not self.request.user.is_authenticated:
return redirect('home')
if not self.request.user.role == 'HR':
return redirect('jobs')
return super(DeleteJobsView, self).dispatch(*args, **kwargs)
class SearchResultsView(ListView):
model = Job
template_name = 'jobs_search.html'
context_object_name = 'filtered_jobs'
def get_queryset(self):
search_query = self.request.GET.get('search_query')
return Job.objects.filter(Q(title__icontains=search_query))
def get(self, request, *args, **kwargs):
self.object_list = self.get_queryset()
context = self.get_context_data()
return self.render_to_response(context)
def dispatch(self, *args, **kwargs):
if not self.request.user.is_authenticated:
return redirect('home')
return super(SearchResultsView, self).dispatch(*args, **kwargs)
|
StarcoderdataPython
|
6572659
|
from typing import Dict, List, NamedTuple, Optional
from firebolt.common.urls import INSTANCE_TYPES_URL
from firebolt.common.util import cached_property
from firebolt.model.instance_type import InstanceType, InstanceTypeKey
from firebolt.model.region import Region
from firebolt.service.base import BaseService
class InstanceTypeLookup(NamedTuple):
"""Helper tuple for looking up instance types by names"""
region_name: str
instance_type_name: str
class InstanceTypeService(BaseService):
@cached_property
def instance_types(self) -> List[InstanceType]:
"""List of instance types available on Firebolt."""
response = self.client.get(url=INSTANCE_TYPES_URL, params={"page.first": 5000})
return [InstanceType.parse_obj(i["node"]) for i in response.json()["edges"]]
@cached_property
def instance_types_by_key(self) -> Dict[InstanceTypeKey, InstanceType]:
"""Dict of {InstanceTypeKey to InstanceType}"""
return {i.key: i for i in self.instance_types}
@cached_property
def instance_types_by_name(self) -> Dict[InstanceTypeLookup, InstanceType]:
"""Dict of {InstanceTypeLookup to InstanceType}"""
return {
InstanceTypeLookup(
region_name=self.resource_manager.regions.get_by_id(
id_=i.key.region_id
).name,
instance_type_name=i.name,
): i
for i in self.instance_types
}
def cheapest_instance_in_region(self, region: Region) -> Optional[InstanceType]:
# Get only awailable instances in region
response = self.client.get(
url=INSTANCE_TYPES_URL,
params={"page.first": 5000, "filter.id_region_id_eq": region.key.region_id},
)
instance_types = [
InstanceType.parse_obj(i["node"]) for i in response.json()["edges"]
]
# Filter out instances without storage
instance_list = [
i
for i in instance_types
if i.storage_size_bytes and i.storage_size_bytes != "0"
]
if not instance_list:
return None
cheapest = min(
instance_list,
key=lambda x: x.price_per_hour_cents
if x.price_per_hour_cents
else float("Inf"),
)
return cheapest
def get_by_key(self, instance_type_key: InstanceTypeKey) -> InstanceType:
"""Get an instance type by key."""
return self.instance_types_by_key[instance_type_key]
def get_by_name(
self,
instance_type_name: str,
region_name: Optional[str] = None,
) -> InstanceType:
"""
Get an instance type by name.
Args:
instance_type_name: Name of the instance (eg. "i3.4xlarge").
region_name:
Name of the AWS region from which to get the instance.
If not provided, use the default region name from the client.
Returns:
The requested instance type.
"""
# Will raise an error if neither set
region_name = region_name or self.resource_manager.regions.default_region.name
return self.instance_types_by_name[
InstanceTypeLookup(
region_name=region_name,
instance_type_name=instance_type_name,
)
]
|
StarcoderdataPython
|
3333665
|
import os
from setuptools import setup
long_description = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
setup(
name='kociemba',
version='1.2',
description='Python/C implementation of Herbert Kociemba\'s Two-Phase algorithm for solving Rubik\'s Cube',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='kociemba rubik cube solver twophase',
url='https://github.com/muodov/kociemba',
author='muodov',
author_email='<EMAIL>',
license='GPLv2',
packages=['kociemba'],
package_data={
'': [
'cprunetables/*',
'pykociemba/*.py',
'pykociemba/prunetables/*',
'ckociemba/include/*.h',
'ckociemba/*.c'],
},
classifiers=(
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
),
entry_points = {
'console_scripts': ['kociemba=kociemba.command_line:main'],
},
setup_requires=['pytest-runner', "cffi>=1.0.0"],
tests_require=['pytest', ],
zip_safe=False,
cffi_modules=["kociemba/build_ckociemba.py:ffi"],
install_requires=["cffi>=1.0.0", 'future'],
)
|
StarcoderdataPython
|
5054927
|
# Python test set -- part 3, built-in operations.
print '3. Operations'
print 'XXX Not yet implemented'
|
StarcoderdataPython
|
9778052
|
#!/usr/bin/env python
import os, sys, re, time, json
from flash_program_ll import burn_bin_files
try:
import serial
from serial.tools import miniterm
except:
print("\nNot found pyserial, please install it by: \nsudo python%d -m pip install pyserial" % (sys.version_info.major))
sys.exit(-1)
def get_bin_file():
""" get binary file from sys.argv --bin=/xxx/yyy/zzz.bin """
bin_files = []
pattern = re.compile(r'--(.*)=(.*)')
for arg in sys.argv[1:]:
if arg.startswith("--"):
match = pattern.match(arg)
if match:
key = match.group(1)
value = match.group(2)
if key == 'bin':
bin_files.append(value)
return bin_files
def read_json(json_file):
data = None
if os.path.isfile(json_file):
with open(json_file, 'r') as f:
data = json.load(f)
return data
def write_json(json_file, data):
with open(json_file, 'w') as f:
f.write(json.dumps(data, indent=4, separators=(',', ': ')))
def get_config():
""" get configuration from .config_burn file, if it is not existed,
generate default configuration of chip_haas1000 """
configs = {}
config_file = os.path.join(os.getcwd(), '.config_burn')
if os.path.isfile(config_file):
configs = read_json(config_file)
if not configs:
configs = {}
if 'chip_haas1000' not in configs:
configs['chip_haas1000'] = {}
if 'serialport' not in configs['chip_haas1000']:
configs['chip_haas1000']['serialport'] = ""
if 'baudrate' not in configs['chip_haas1000']:
configs['chip_haas1000']['baudrate'] = "1500000"
if 'binfile' not in configs['chip_haas1000']:
configs['chip_haas1000']['binfile'] = []
return configs['chip_haas1000']
def save_config(config):
""" save configuration to .config_burn file, only update chip_haas1000 portion """
if config:
configs = {}
config_file = os.path.join(os.getcwd(), '.config_burn')
if os.path.isfile(config_file):
configs = read_json(config_file)
if not configs:
configs = {}
configs['chip_haas1000'] = config
write_json(config_file, configs)
def check_uart(portnum, baudrate):
serialport = serial.Serial()
serialport.port = portnum
serialport.baudrate = baudrate
serialport.parity = "N"
serialport.bytesize = 8
serialport.stopbits = 1
serialport.timeout = 1
try:
serialport.open()
except Exception as e:
print("check_uart open serialport: %s error " % portnum)
return False
serialport.close()
return True
def main():
# step 1: get binary file
needsave = False
myconfig = get_config()
bin_files = get_bin_file()
if bin_files:
myconfig["binfile"] = bin_files
needsave = True
if not myconfig["binfile"]:
print("no specified binary file")
return
print("binary file is %s" % myconfig["binfile"])
# step 2: get serial port
if not myconfig["serialport"]:
myconfig["serialport"] = miniterm.ask_for_port()
if not myconfig["serialport"]:
print("no specified serial port")
return
else:
needsave = True
while check_uart(myconfig["serialport"], myconfig['baudrate']) == False:
myconfig["serialport"] = miniterm.ask_for_port()
print("serial port is %s" % myconfig["serialport"])
print("the settings were restored in the file %s" % os.path.join(os.getcwd(), '.config_burn'))
# step 3: burn binary file into flash
bin_files = []
for bin_file in myconfig["binfile"]:
filename = bin_file
address = "0"
if "#" in bin_file:
filename = bin_file.split("#", 1)[0]
address = bin_file.split("#", 1)[1]
bin_files.append((filename, address))
print("bin_files is ", bin_files)
burn_bin_files(myconfig["serialport"], myconfig['baudrate'], bin_files)
if needsave:
save_config(myconfig)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3560987
|
# Generated by Django 3.0.4 on 2020-07-06 20:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rapport', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='rapportsignalprobleme',
name='confirmed',
field=models.BooleanField(default=None),
),
]
|
StarcoderdataPython
|
3550389
|
from rest_framework import serializers
from equipment_assigments.models import Equipment_Assigment
class Equipment_AssigmentSerializer(serializers.ModelSerializer):
class Meta:
model = Equipment_Assigment
fields = (
'id',
'id_user',
'id_equipment',
)
|
StarcoderdataPython
|
9658819
|
import cv2
import numpy as np
import face_recognition
import os
import datetime
import pyrebase
config = {
"apiKey": "<KEY>",
"authDomain": "mark-it-ec28b.firebaseapp.com",
"projectId": "mark-it-ec28b",
"storageBucket": "mark-it-ec28b.appspot.com",
"messagingSenderId": "187768173767",
"appId": "1:187768173767:web:e9ba36b17e9112fc6cfae2",
"measurementId": "G-42HYDHB4Q1",
"databaseURL": "gs://mark-it-ec28b.appspot.com"
}
firebase = pyrebase.initialize_app(config)
storage = firebase.storage()
path_on_cloud = "Attendance/Attendance.csv"
path_local = "Attendance.csv"
path = 'Image Database'
images = []
classNames = []
myList = os.listdir(path)
print(myList)
for cls in myList:
curImg = cv2.imread(f'{path}/{cls}')
images.append(curImg)
classNames.append(os.path.splitext(cls)[0])
print(classNames)
def findEncodings(images):
encodeList = []
for img in images:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
encode = face_recognition.face_encodings(img)[0]
encodeList.append(encode)
return encodeList
def markAttendance(name):
with open('Attendance.csv', 'r+') as f:
myDataList = f.readlines()
nameList = []
for line in myDataList:
entry = line.split(',')
nameList.append(entry[0])
if name not in nameList:
now = datetime.datetime.now()
dtString = now.strftime('%H:%M:%S')
f.writelines(f'\n{name}, {dtString}')
# upload csv to firebase
storage.child(path_on_cloud).put(path_local)
encodeListKnown = findEncodings(images)
print('Encoding Complete')
cap = cv2.VideoCapture(0)
while True:
success, img = cap.read()
imgS = cv2.resize(img, (0, 0), None, 0.25, 0.25)
imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
facesCurFrame = face_recognition.face_locations(imgS)
encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)
for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):
matches = face_recognition.compare_faces(encodeListKnown, encodeFace)
faceDis = face_recognition.face_distance(encodeListKnown, encodeFace)
# print(faceDis)
matchIndex = np.argmin(faceDis)
if matches[matchIndex]:
name = classNames[matchIndex].upper()
# print(name)
y1, x2, y2, x1 = faceLoc
y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)
cv2.putText(img, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
markAttendance(name)
storage.child(path_on_cloud).put(path_local)
cv2.imshow('Webcam', img)
cv2.waitKey(1)
|
StarcoderdataPython
|
1667838
|
import numpy as np
import matplotlib.pyplot as plt
def calcY_HM(g, t):
v = g*t
h = ((g*(t**2))/(g))
return h
def calcX_HM(v,t):
S = v*t
return S
def plotGraph(time, XY):
t = np.arange(0.0, time, 0.125)
fig, ax = plt.subplots()
ax.plot(XY[0], XY[1])
ax.set(xlabel='X', ylabel='Y', title='horizontal movement')
ax.grid()
fig.savefig("test.png")
plt.show()
def main():
time = 1 #input("time: ")
velocity = 10 #input("velocity: ")
running = True
Y = []
X = []
allPoints = []
g = 9.8
for i in np.arange(0, time, 0.125):
X.append(calcX_HM(velocity,i))
allPoints.append(X)
for i in np.arange (0, time, 0.125):
Y.append(-(calcY_HM(g, i)))
allPoints.append(Y)
print(allPoints)
while running:
plotGraph(time, allPoints)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3235312
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from habitat_sim._ext.habitat_sim_bindings import (
ArticulatedObjectManager,
CollisionGroupHelper,
CollisionGroups,
JointMotorSettings,
JointMotorType,
JointType,
ManagedArticulatedObject,
ManagedBulletArticulatedObject,
ManagedBulletRigidObject,
ManagedRigidObject,
MotionType,
PhysicsSimulationLibrary,
RaycastResults,
RayHitInfo,
RigidConstraintSettings,
RigidConstraintType,
RigidObjectManager,
VelocityControl,
)
__all__ = [
"ManagedRigidObject",
"ManagedBulletRigidObject",
"ManagedArticulatedObject",
"ManagedBulletArticulatedObject",
"RigidObjectManager",
"ArticulatedObjectManager",
"PhysicsSimulationLibrary",
"MotionType",
"VelocityControl",
"RayHitInfo",
"RaycastResults",
"CollisionGroups",
"CollisionGroupHelper",
"JointType",
"JointMotorSettings",
"JointMotorType",
"RigidConstraintType",
"RigidConstraintSettings",
]
|
StarcoderdataPython
|
5147193
|
"""
Functions for handling dates.
Contains:
gd2jd -- converts gregorian date to julian date
jd2gd -- converts julian date to gregorian date
Wish list:
Function to convert heliocentric julian date!
These functions were taken from Enno Middleberg's site of useful
astronomical python references:
http://www.astro.rub.de/middelberg/python/python.html
"Feel free to download, use, modify and pass on these scripts, but
please do not remove my name from it." --<NAME>
"""
# 2009-02-15 13:12 IJC: Converted to importable function
def gd2jd(*date):
"""gd2jd.py converts a UT Gregorian date to Julian date.
Usage: gd2jd.py (2009, 02, 25, 01, 59, 59)
To get the current Julian date:
import time
gd2jd(time.gmtime())
Hours, minutes and/or seconds can be omitted -- if so, they are
assumed to be zero.
Year and month are converted to type INT, but all others can be
type FLOAT (standard practice would suggest only the final element
of the date should be float)
"""
print date
print date[0]
date = date[0]
date = list(date)
if len(date)<3:
print "You must enter a date of the form (2009, 02, 25)!"
return -1
elif len(date)==3:
for ii in range(3): date.append(0)
elif len(date)==4:
for ii in range(2): date.append(0)
elif len(date)==5:
date.append(0)
yyyy = int(date[0])
mm = int(date[1])
dd = float(date[2])
hh = float(date[3])
min = float(date[4])
sec = float(date[5])
print yyyy,mm,dd,hh,min,sec
UT=hh+min/60+sec/3600
print "UT="+`UT`
total_seconds=hh*3600+min*60+sec
fracday=total_seconds/86400
print "Fractional day: %f" % fracday
# print dd,mm,yyyy, hh,min,sec, UT
if (100*yyyy+mm-190002.5)>0:
sig=1
else:
sig=-1
JD = 367*yyyy - int(7*(yyyy+int((mm+9)/12))/4) + int(275*mm/9) + dd + 1721013.5 + UT/24 - 0.5*sig +0.5
months=["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
print "\n"+months[mm-1]+" %i, %i, %i:%i:%i UT = JD %f" % (dd, yyyy, hh, min, sec, JD),
# Now calculate the fractional year. Do we have a leap year?
daylist=[31,28,31,30,31,30,31,31,30,31,30,31]
daylist2=[31,29,31,30,31,30,31,31,30,31,30,31]
if (yyyy%4 != 0):
days=daylist2
elif (yyyy%400 == 0):
days=daylist2
elif (yyyy%100 == 0):
days=daylist
else:
days=daylist2
daysum=0
for y in range(mm-1):
daysum=daysum+days[y]
daysum=daysum+dd-1+UT/24
if days[1]==29:
fracyear=yyyy+daysum/366
else:
fracyear=yyyy+daysum/365
print " = " + `fracyear`+"\n"
return
def jd2gd(jd):
"""Task to convert a list of julian dates to gregorian dates
description at http://mathforum.org/library/drmath/view/51907.html
Original algorithm in Jean Meeus, "Astronomical Formulae for
Calculators"
2009-02-15 13:36 IJC: Converted to importable, callable function
"""
jd=jd+0.5
Z=int(jd)
F=jd-Z
alpha=int((Z-1867216.25)/36524.25)
A=Z + 1 + alpha - int(alpha/4)
B = A + 1524
C = int( (B-122.1)/365.25)
D = int( 365.25*C )
E = int( (B-D)/30.6001 )
dd = B - D - int(30.6001*E) + F
if E<13.5:
mm=E-1
if E>13.5:
mm=E-13
if mm>2.5:
yyyy=C-4716
if mm<2.5:
yyyy=C-4715
months=["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
daylist=[31,28,31,30,31,30,31,31,30,31,30,31]
daylist2=[31,29,31,30,31,30,31,31,30,31,30,31]
h=int((dd-int(dd))*24)
min=int((((dd-int(dd))*24)-h)*60)
sec=86400*(dd-int(dd))-h*3600-min*60
# Now calculate the fractional year. Do we have a leap year?
if (yyyy%4 != 0):
days=daylist2
elif (yyyy%400 == 0):
days=daylist2
elif (yyyy%100 == 0):
days=daylist
else:
days=daylist2
hh = 24.0*(dd % 1.0)
min = 60.0*(hh % 1.0)
sec = 60.0*(min % 1.0)
dd = dd-(dd%1.0)
hh = hh-(hh%1.0)
min = min-(min%1.0)
print str(jd)+" = "+str(months[mm-1])+ ',' + str(dd) +',' +str(yyyy)
print string.zfill(h,2)+":"+string.zfill(min,2)+":"+string.zfill(sec,2)+" UTC"
print (yyyy, mm, dd, hh, min, sec)
return
|
StarcoderdataPython
|
3402653
|
<reponame>Honno/coinflip<gh_stars>1-10
import webbrowser
from tempfile import NamedTemporaryFile
from click.testing import CliRunner
from hypothesis import HealthCheck
from hypothesis import settings
from hypothesis.stateful import Bundle
from hypothesis.stateful import RuleBasedStateMachine
from hypothesis.stateful import rule
from coinflip.cli import commands
from coinflip.cli import console
from .strategies import mixedbits
class CliStateMachine(RuleBasedStateMachine):
def __init__(self):
super(CliStateMachine, self).__init__()
self.runner = CliRunner()
console.print = noop
webbrowser.open = noop
randtest_results = Bundle("randtest_results")
reports = Bundle("reports")
@rule()
def main(self):
result = self.runner.invoke(commands.main, [])
assert_success(result)
@rule()
def example_run(self):
result = self.runner.invoke(commands.example_run, [])
assert_success(result)
@rule(target=randtest_results, sequence=mixedbits())
def run(self, sequence):
data = NamedTemporaryFile(delete=False)
out = NamedTemporaryFile(delete=False)
with data as f:
for x in sequence:
x_bin = str(x).encode("utf-8")
line = x_bin + b"\n"
f.write(line)
f.seek(0)
result = self.runner.invoke(commands.run, [f.name, out.name])
assert_success(result)
return out.name
@rule(path=randtest_results)
def read(self, path):
result = self.runner.invoke(commands.read, [path])
assert_success(result)
@rule(target=reports, path=randtest_results)
def report(self, path):
out = NamedTemporaryFile(delete=False)
result = self.runner.invoke(commands.report, [path, out.name])
assert_success(result)
return out.name
TestCliStateMachine = CliStateMachine.TestCase # top-level TestCase picked up by pytest
TestCliStateMachine.settings = settings(
suppress_health_check=[HealthCheck.data_too_large, HealthCheck.too_slow],
deadline=None,
)
def noop(*args, **kwargs):
return None
def assert_success(result):
assert result.exit_code == 0, result.stderr if result.stderr_bytes else str(result)
|
StarcoderdataPython
|
12851456
|
<filename>scripts/betterX_labs_attributes.py<gh_stars>0
## Web File
def insertWeb(filetype, json, cursor, conn, uid):
if (filetype == 'web'):
web_page_node(json,uid,cursor,conn) # [pages] / [pageNode]
web_entry_node(json, uid, cursor, conn) # [pages] / [entriesNode]
def web_entry_response(json_entries_node, uid, cursor, conn, parentid):
tblName = 'lab_web_entries_response'
featureAttrs = {'status', 'statusText', 'httpVersion', 'cookieNumber', 'redirectURL', 'headersSize', 'bodySize'}
featureAttrs2 = {'Date', 'Server', 'X-Powered-By', 'Content-Encoding', 'Content-Length', 'Keep-Alive', 'Connection', 'Content-Type'}
featureAttrs3 = {'size', 'compression', 'mimeType', 'encoding'}
vals = {}
values = []
cntattr = 0
for tis in featureAttrs:
vals[cntattr] = tis
values.append(json_entries_node['response'][tis])
cntattr = cntattr + 1
vals[cntattr] = 'web_entries_id'
values.append(parentid)
cntattr = cntattr + 1
attrsInJson,typesInJson = toCommaStringDict(vals)
#print type(attrsInJson)
#print attrsInJson
vals2 = {}
values2 = []
cntattr2 = 0
for tis2 in featureAttrs2:
vals2,values2 = appendJsonKey(json_entries_node['response']['headers'], tis2, vals2, values2, cntattr2)
cntattr2 = cntattr2 + 1
renameArrayItem(vals2, 'Date', 'header_Date')
renameArrayItem(vals2, 'Server', 'header_Server')
renameArrayItem(vals2, 'X-Powered-By', 'header_XPoweredBy')
renameArrayItem(vals2, 'Content-Encoding', 'header_ContentEncoding')
renameArrayItem(vals2, 'Content-Length', 'header_ContentLength')
renameArrayItem(vals2, 'Keep-Alive', 'header_KeepAlive')
renameArrayItem(vals2, 'Connection', 'header_Connection')
renameArrayItem(vals2, 'Content-Type', 'header_ContentType')
attrsInJson2,typesInJson2 = toCommaStringDict(vals2)
#print type(attrsInJson2)
#print attrsInJson2
vals3 = {}
values3 = []
cntattr3 = 0
for tis3 in featureAttrs3:
vals3,values3 = appendJsonKey(json_entries_node['response']['content'], tis3, vals3, values3, cntattr3)
cntattr3 = cntattr3 + 1
renameArrayItem(vals3, 'size', 'content_size')
renameArrayItem(vals3, 'compression', 'content_compression')
renameArrayItem(vals3, 'mimeType', 'content_mimeType')
renameArrayItem(vals3, 'encoding', 'content_encoding')
attrsInJson3,typesInJson3 = toCommaStringDict(vals3)
#print type(attrsInJson3)
#print attrsInJson3
attrsInJsonCombined = attrsInJson
typesInJsonCombined = typesInJson
if ( attrsInJson2 != ''):
attrsInJsonCombined = attrsInJsonCombined + ',' + attrsInJson2
typesInJsonCombined = typesInJsonCombined + ',' + typesInJson2
values.extend(values2)
if ( attrsInJson3 != ''):
attrsInJsonCombined = attrsInJsonCombined + ',' + attrsInJson3
typesInJsonCombined = typesInJsonCombined + ',' + typesInJson3
values.extend(values3)
dbinsert(tblName,attrsInJsonCombined,typesInJsonCombined,cursor,values,conn)
def web_entry_request(json_entries_node, uid, cursor, conn, parentid):
tblName = 'lab_web_entries_request'
featureAttrs = {'method', 'url', 'httpVersion', 'cookieNumber', 'headerSize', 'bodySize'}
featureAttrs2 = {'Host', 'User-Agent', 'Accept', 'Accept-Encoding', 'Connection', 'Content-Length', 'Keep-Alive'}
vals = {}
values = []
cntattr = 0
for tis in featureAttrs:
vals[cntattr] = tis
values.append(json_entries_node['request'][tis])
cntattr = cntattr + 1
vals[cntattr] = 'web_entries_id'
values.append(parentid)
cntattr = cntattr + 1
attrsInJson,typesInJson = toCommaStringDict(vals)
#print type(attrsInJson)
#print attrsInJson
vals2 = {}
values2 = []
cntattr2 = 0
for tis2 in featureAttrs2:
vals2,values2 = appendJsonKey(json_entries_node['request']['headers'], tis2, vals2, values2, cntattr2)
cntattr2 = cntattr2 + 1
renameArrayItem(vals2, 'Host', 'header_Host')
renameArrayItem(vals2, 'User-Agent', 'header_UserAgent')
renameArrayItem(vals2, 'Accept', 'header_Accept')
renameArrayItem(vals2, 'Accept-Encoding', 'header_AcceptEncoding')
renameArrayItem(vals2, 'Connection', 'header_Connection')
renameArrayItem(vals2, 'Content-Length', 'header_ContentLength')
renameArrayItem(vals2, 'Keep-Alive', 'header_KeepAlive')
attrsInJson2,typesInJson2 = toCommaStringDict(vals2)
#print type(attrsInJson2)
#print attrsInJson2
attrsInJsonCombined = attrsInJson
typesInJsonCombined = typesInJson
if ( attrsInJson2 != ''):
attrsInJsonCombined = attrsInJson + ',' + attrsInJson2
typesInJsonCombined = typesInJson + ',' + typesInJson2
values.extend(values2)
dbinsert(tblName,attrsInJsonCombined,typesInJsonCombined,cursor,values,conn)
def web_entry_node(json, uid, cursor, conn):
tblName = 'lab_web_entries'
featureAttrs = {'pageid', 'entryStartTime', 'time', 'serverIPAddress', 'connection'}
featureAttrs2 = {'blocked', 'dns', 'connect', 'send', 'wait', 'receive', 'ssl'}
featureAttrs3 = {'beforeRequestCacheEntries', 'afterRequestCacheEntries', 'hitCount'}
for jiv in json['pages']:
for innerjiv in jiv['entriesNode']:
cntattr = 0
attrsInJson = ''
typesInJson = ''
keytypevals = {}
values = []
for tis in featureAttrs:
keytypevals,values = appendJsonKey(innerjiv, tis, keytypevals, values, cntattr)
cntattr = cntattr + 1
attrsInJson,typesInJson = toCommaStringDict(keytypevals)
cntattr2 = 0
attrsInJson2 = ''
typesInJson2 = ''
keytypevals2 = {}
values2 = []
for tis2 in featureAttrs2:
keytypevals2,values2 = appendJsonKey(innerjiv['timings'], tis2, keytypevals2, values2, cntattr2)
cntattr2 = cntattr2 + 1
attrsInJson2,typesInJson2 = toCommaStringDict(keytypevals2)
cntattr3 = 0
attrsInJson3 = ''
typesInJson3 = ''
keytypevals3 = {}
values3 = []
for tis3 in featureAttrs3:
keytypevals3,values3 = appendJsonKey(innerjiv['cache'], tis3, keytypevals3, values3, cntattr3)
cntattr3 = cntattr3 + 1
attrsInJson3,typesInJson3 = toCommaStringDict(keytypevals3)
##combine
attrsInJsonCombined = attrsInJson + ',' + attrsInJson2 + ',' + attrsInJson3
typesInJsonCombined = typesInJson + ',' + typesInJson2 + ',' + typesInJson3
values.extend(values2)
values.extend(values3)
#insert
dbinsert(tblName,attrsInJsonCombined,typesInJsonCombined,cursor,values,conn)
##entry request
web_entry_id = getMaxId(tblName,cursor,conn)
web_entry_request(innerjiv, uid, cursor, conn, web_entry_id)
web_entry_response(innerjiv, uid, cursor, conn, web_entry_id)
def web_page_node(json, uid, cursor, conn):
tblName = 'lab_web_pages'
featureAttrs = {'tabid', 'pageStartTime', 'pageid', 'pagetitle', 'pageOnContentLoad', 'pageOnLoad', 'origin'}
cntattr = 0
for jiv in json['pages']:
attrsInJson = ''
typesInJson = ''
keytypevals = {}
values = []
for tis in featureAttrs:
keytypevals,values = appendJsonKey(jiv['pageNode'], tis, keytypevals, values, cntattr)
cntattr = cntattr + 1
keytypevals[cntattr] = 'uid'
cntattr = cntattr + 1
values.append(uid)
renameArrayItem(keytypevals, 'pageid', 'id')
attrsInJson,typesInJson = toCommaStringDict(keytypevals)
dbinsert(tblName,attrsInJson,typesInJson,cursor,values,conn)
## Helper Functions
def dbinsert(tblName,fields,fieldTypes,cursor,values,conn):
sql_command = "insert into " + tblName + " (" + fields + ") values (" + fieldTypes + ")"
#print sql_command
#print values
cursor.execute(sql_command, values)
conn.commit()
def getMaxId(tblName,cursor, conn):
sql = "select max(id) from " + tblName
cursor.execute(sql)
results = cursor.fetchall()
return str(results[0][0])
def isJsonKey(json, tisKey):
for key,val in json.items():
if (key == tisKey):
return True
break
return False
def appendJsonKey(json, key, vals, values, cntattr):
if (isJsonKey(json,key)):
vals[cntattr] = str(key)
values.append(json[key])
return vals,values
def toCommaStringDict(keytypevals):
ret = ''
ret2 = ''
for key in keytypevals:
ret = ret + '`' + keytypevals[key] + '`' + ','
ret2 = ret2 + '%s' + ','
if (len(ret) > 0):
ret = ret[:-1]
ret2 = ret2[:-1]
return ret,ret2
def renameArrayItem(arr, frm, to):
for key in arr:
try:
if( arr[key] == frm):
arr[key] = to
except:
dummy = 0
return arr
def appendJsonKeyConcat(json, key, vals, values, cntattr):
ret = ''
if (isJsonKey(json,key)):
for i in json[key]:
ret = (ret + ' ' + i).strip()
vals[cntattr] = str(key)
values.append(ret)
return vals,values
|
StarcoderdataPython
|
3422828
|
#Faça um algoritmo que o usuário infomre quantas idades serão informadas e exiba a maior.
a=int(input("Digite quantas vezes vc quer informar a idade"))
i=0
m=0
for i in range (a):
n=int(input("Digite uma idade"))
if (n>m):
m=n
print(m)
|
StarcoderdataPython
|
96294
|
from copy import copy
import numpy as np
from gym_chess import ChessEnvV1
from gym_chess.envs.chess_v1 import (
KING_ID,
QUEEN_ID,
ROOK_ID,
BISHOP_ID,
KNIGHT_ID,
PAWN_ID,
)
from gym_chess.test.utils import run_test_funcs
# Blank board
BASIC_BOARD = np.array([[0] * 8] * 8, dtype=np.int8)
# Pawn basic movements
def test_pawn_basic_moves():
BOARD = copy(BASIC_BOARD)
BOARD[6, 0] = PAWN_ID
BOARD[1, 0] = -PAWN_ID
env = ChessEnvV1(opponent="none", initial_state=BOARD)
# player_1
actions = env.get_possible_actions()
env.step(actions[0])
# player_2
actions = env.get_possible_actions()
env.step(actions[0])
# player_3
actions = env.get_possible_actions()
env.step(actions[0])
# player_4
actions = env.get_possible_actions()
env.step(actions[0])
EXPECTED_BOARD = copy(BASIC_BOARD)
EXPECTED_BOARD[4, 0] = PAWN_ID
EXPECTED_BOARD[3, 0] = -PAWN_ID
assert (env.state == EXPECTED_BOARD).all()
if __name__ == "__main__":
run_test_funcs(__name__)
|
StarcoderdataPython
|
8195819
|
try:
from wrapper import *
except ImportError:
from .wrapper import *
|
StarcoderdataPython
|
8043460
|
<reponame>hershg/ray
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ray
@ray.remote
def f():
return 0
@ray.remote
def g():
import time
start = time.time()
while time.time() < start + 1:
ray.get([f.remote() for _ in range(10)])
# 10MB -> hangs after ~5 iterations
# 20MB -> hangs after ~20 iterations
# 50MB -> hangs after ~50 iterations
ray.init(redis_max_memory=1024 * 1024 * 50)
i = 0
for i in range(100):
i += 1
a = g.remote()
[ok], _ = ray.wait([a])
print("iter", i)
|
StarcoderdataPython
|
11314777
|
<filename>prioritization/prioritization_runner.py
import prioritization as pr
alphaRangeNum = 5
projects = ['Chart', 'Closure', 'Lang', 'Math', 'Time']
fromVersion = [1, 1, 1, 1, 1]
toVersion = [13, 50, 33, 50, 14]
#projects = ['Chart']
#fromVersion = [1]
#toVersion = [13]
sum_additional_elapsed_time = 0
sum_total_elapsed_time = 0
file = open("../../WTP-data/prioritization_exectime.txt","a")
for index, project in enumerate(projects):
print('*** Project: %s ***' % project)
versions = 0
for versionNumber in range(fromVersion[index], toVersion[index]+1):
print("* Version %d" % versionNumber)
additional_elapsed_time, total_elapsed_time = pr.runPrioritization(project, versionNumber, alphaRangeNum)
if (additional_elapsed_time == -1):
continue
else:
versions = versions+1
print("additional_elapsed_time: ", additional_elapsed_time)
print("total_elapsed_time: ", total_elapsed_time)
sum_additional_elapsed_time = sum_additional_elapsed_time + additional_elapsed_time
sum_total_elapsed_time = sum_total_elapsed_time + total_elapsed_time
mean_additional_elapsed_time = sum_additional_elapsed_time/versions
mean_total_elapsed_time = sum_total_elapsed_time/versions
file.write("%s,%f,%f\n" % (project, mean_additional_elapsed_time,mean_total_elapsed_time))
file.close()
|
StarcoderdataPython
|
298504
|
#!/usr/bin/python
import numpy as np
import roslib; roslib.load_manifest('hrl_fabric_based_tactile_sensor')
roslib.load_manifest('hrl_meka_skin_sensor_darpa_m3')
import rospy
import hrl_meka_skin_sensor_darpa_m3.skin_patch_calibration as spc
from std_msgs.msg import Empty
class Fabric_Skin_Calibration(spc.SkinCalibration):
def __init__(self):
spc.SkinCalibration.__init__(self)
def raw_data_to_force(self, raw_data):
# this might change depending on the pull-up value (e.g.
# different pullup values on the PR2 and Cody)
try:
d_biased = self.subtract_bias(raw_data, 0)
#calib_data = -d_biased / 50. # calibration!
#calib_data = -d_biased / 30. # calibration!
#calib_data = -d_biased / 15. # calibration!
calib_data = -d_biased / self.calibration_slope # calibration!
idxs = (np.where(calib_data < self.max_ignore_value))[0]
calib_data[idxs] = 0.
return calib_data
except ValueError:
rospy.logerr('raw_data.shape: '+str(raw_data.shape))
rospy.signal_shutdown('Error in the fabric skin driver or calibration node')
def zero_sensor_cb(msg):
fsc.compute_bias(rdc, 10)
if __name__ == '__main__':
import optparse
p = optparse.OptionParser()
p.add_option('--slope', action='store',
dest='slope', type='float',
help='slope of calibration line')
p.add_option('--max_ignore_value', '--miv', action='store',
dest='miv', type='float',
help='max force to ignore (return as zero)')
opt, args = p.parse_args()
rospy.init_node('fabric_skin_calibration_node')
fsc = Fabric_Skin_Calibration()
fsc.precompute_taxel_location_and_normal()
fsc.calibration_slope = opt.slope
fsc.max_ignore_value = opt.miv
rdc = spc.RawDataClient('taxels/raw_data')
fsc.compute_bias(rdc, 100)
rospy.Subscriber('zero_sensor', Empty, zero_sensor_cb)
while not rospy.is_shutdown():
d = rdc.get_raw_data(True)
fsc.publish_taxel_array(d)
|
StarcoderdataPython
|
6607416
|
n = int(input())
sum = 0
k = 0
while sum < n:
k += 1
sum += k
sum -= k
count = n - sum
if k % 2 == 0:
print("{1}/{0}".format(k - count + 1, count))
else:
print("{0}/{1}".format(k - count + 1, count))
|
StarcoderdataPython
|
3349220
|
<reponame>waduhek/shopping<filename>cart/models.py
from django.db import models
from django.contrib.auth.models import User
from shop.models import Product
class Cart(models.Model):
cart_id = models.CharField(max_length=250, blank=True)
date_added = models.DateField(auto_now_add=True)
class Meta:
db_table = 'Cart'
def __str__(self):
return self.cart_id
class CartItem(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE)
cart = models.ForeignKey(Cart, on_delete=models.CASCADE)
quantity = models.PositiveIntegerField()
active = models.BooleanField(default=True)
class Meta:
db_table = 'CartItem'
def subTotal(self):
return self.quantity * self.product.price
def __str__(self):
return self.product
class Address(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.CharField(max_length=255, default=None)
addressLine1 = models.CharField(max_length=255)
addressLine2 = models.CharField(max_length=255)
state = models.CharField(max_length=100)
city = models.CharField(max_length=100)
pincode = models.CharField(max_length=50, default=None)
class Meta:
db_table = 'Address'
def __str__(self):
return "Address:", self.user
|
StarcoderdataPython
|
8171682
|
import pandas as pd
from config.base_config import column_config_path, data_path
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from preprocess.Preprocess import PreprocessMissvalue, PreprocessOutlier
from feature.FeatureProcess import FeaturesStandard, FeaturesEncoder, FeaturesDecomposition, FeaturesSelection
data = pd.read_csv(data_path)
# simple process
label = 'Survived'
cat_cols = ['Pclass', 'Siblings/Spouses Aboard', 'Parents/Children Aboard']
data[cat_cols] = data[cat_cols].astype('category')
# read column config
column_config = pd.read_csv(column_config_path)
modules = column_config.columns.tolist()[2:]
columns = column_config.features.tolist()
# default setting
# default for missvalue
column_config.loc[(column_config.types=='category') & (column_config.missvalue.isna()),'missvalue'] ="SimpleImputer(strategy='most_frequent')"
column_config.loc[(column_config.types=='numeric') & (column_config.missvalue.isna()),'missvalue'] = "SimpleImputer(strategy='median')"
# default for encoder
column_config.loc[(column_config.types=='category') & (column_config.encoder.isna()),'encoder'] = "OneHotEncoder"
column_config.fillna('NA', inplace=True)
# construct transformers
column_g = column_config.groupby(modules)
transformer_list = []
for index, each in enumerate(column_g):
sub_df = each[1]
sub_tran = each[0]
sub_feat = sub_df['features'].tolist()
trans = []
for i,v in enumerate(sub_tran):
module = modules[i]
if module == 'outlier':
func=PreprocessOutlier
elif module == 'missvalue':
func = PreprocessMissvalue
elif module == 'standard':
func = FeaturesStandard
elif module == 'encoder':
func = FeaturesEncoder
if v !='NA':
trans.append((module, func(v)))
sub_pipe = Pipeline(trans)
transformer_list.append(('group_'+str(index), sub_pipe, sub_feat))
processes = ColumnTransformer(transformer_list)
dataset = processes.fit_transform(data)
|
StarcoderdataPython
|
3336100
|
<filename>nets/numeric.py
"""
The ``functional`` modules defines basic functions to generate tensors and transform data.
"""
# Basic imports
import numpy as np
try:
import cupy as cp
except ModuleNotFoundError:
pass
# NETS Package
import nets
from nets.cuda import numpy_or_cupy
from nets.data import dataset
def set(t, key, value):
r"""Set new value(s) to a tensor.
.. warning::
Setting manually values of a tensor will invalidate its gradients.
Args:
t (Tensor): tensor to compare.
key (scalar or tensor): indices to set.
value (scalar or tensor): new values.
Returns:
Tensor
"""
t = nets.to_tensor(t)
value = nets.to_tensor(value)
# To device
if t.device == 'cpu' and value.device != 'cpu':
t.cuda()
elif value.device == 'cpu' and t.device != 'cpu':
value.cuda()
cpu = True
if isinstance(key, tuple):
for k in key:
if isinstance(k, nets.Tensor):
if k.device != 'cpu':
cpu = False
if not cpu:
t.cuda()
value.cuda()
for k in key:
if isinstance(k, nets.Tensor):
k.cuda()
if isinstance(key, nets.Tensor):
key = key.data
elif isinstance(key, tuple):
keys = []
for k in key:
if isinstance(k, nets.Tensor):
keys.append(k.data)
else:
keys.append(k)
key = tuple(keys)
t.data[key] = value.data
# Setting a tensor invalidate its gradient
t.detach()
return t
def gt(t, other):
r"""Return a boolean tensor for *greater than* condition.
.. math::
\text{gt}_{\text{other}}(t) = t > other
Args:
t (Tensor): tensor to compare.
other (Tensor like): object to compare the tensor.
Returns:
Tensor
"""
t = nets.to_tensor(t)
other = nets.to_tensor(other)
data = t.data > other.data
return nets.Tensor(data, device=t.device)
def ge(t, other):
r"""Return a boolean tensor for *greater or equal* condition.
.. math::
\text{gt}_{\text{other}}(t) = t \ge other
Args:
t (Tensor): tensor to compare
other (Tensor like): object to compare the tensor
Returns:
Tensor
"""
t = nets.to_tensor(t)
other = nets.to_tensor(other)
data = t.data >= other.data
return nets.Tensor(data, device=t.device)
def lt(t, other):
r"""Return a boolean tensor for *lower than* condition.
.. math::
\text{gt}_{\text{other}}(t) = t < other
Args:
t (Tensor): tensor to compare
other (Tensor like): object to compare the tensor
Returns:
Tensor
"""
t = nets.to_tensor(t)
other = nets.to_tensor(other)
data = t.data < other.data
return nets.Tensor(data, device=t.device)
def le(t, other):
r"""Return a boolean tensor for *lower or equal* condition.
.. math::
\text{gt}_{\text{other}}(t) = t \le other
Args:
t (Tensor): tensor to compare
other (Tensor like): object to compare the tensor
Returns:
Tensor
"""
t = nets.to_tensor(t)
other = nets.to_tensor(other)
data = t.data <= other.data
return nets.Tensor(data, device=t.device)
def eq(t, other):
r"""Return a boolean tensor for *equal* condition.
.. math::
\text{gt}_{\text{other}}(t) = t == other
Args:
t (Tensor): tensor to compare
other (Tensor like): object to compare the tensor
Returns:
Tensor
"""
t = nets.to_tensor(t)
other = nets.to_tensor(other)
cond = t.data == other.data
return nets.Tensor(cond, device=t.device)
def ne(t, other):
r"""Return a boolean tensor for *not equal* condition.
.. math::
\text{gt}_{\text{other}}(t) = t not other
Args:
t (Tensor): tensor to compare
other (Tensor like): object to compare the tensor
Returns:
Tensor
"""
t = nets.to_tensor(t)
other = nets.to_tensor(other)
data = not t.data == other.data
return nets.Tensor(data, device=t.device)
def unravel_index(indices, shape, order='C', requires_grad=False, device='cpu'):
"""Converts a flat index or array of flat indices into a tuple of coordinate arrays.
Args:
indices ([type]): An integer array whose elements are indices into the flattened version of an array of dimensions shape.
shape ([type]): The shape of the tensor to use for unraveling indices.
order (str, optional): Determines whether the indices should be viewed as indexing
in row-major (C-style, ``'C'``) or column-major (Fortran-style, ``'F``) order. Defaults to ``'C'``.
requires_grad (bool): if ``True`` will track gradients.
device (str): name of the device where the tensor is located. Default to ``'cpu'``.
Returns:
Tensor
"""
if device == 'cpu':
data = np.unravel_index(indices, shape, order=order)
else:
data = cp.unravel_index(indices, shape, order=order)
return nets.Tensor(data, requires_grad=requires_grad, device=device)
def rollaxis(t, axis, start=0):
"""Roll the specified axis backwards, until it lies in a given position.
Args:
t (Tensor): Input tensor.
axis (int): The axis to be rolled.
The positions of the other axes do not change relative to one another.
start (int, optional): When ``start <= axis``, the axis is rolled back until it lies in this position.
When ``start > axis``, the axis is rolled until it lies before this position.
The default, 0, results in a "complete" roll.
Returns:
Tensor
"""
nc = numpy_or_cupy(t)
data = nc.rollaxis(t.data, axis, start=start)
return nets.Tensor(data, requires_grad=t.requires_grad, device=t.device)
def zeros(shape, requires_grad=False, device='cpu', **kwargs):
"""Create a zeros tensor of a given shape.
Args:
shape (tuple): shape of the 0-tensor.
requires_grad (bool): if ``True`` will track gradients.
device (str): name of the device where the tensor is located. Default to ``'cpu'``.
Returns:
Tensor
"""
if device == 'cpu':
data = np.zeros(shape, **kwargs)
else:
data = cp.zeros(shape, **kwargs)
return nets.Tensor(data, requires_grad=requires_grad, device=device)
def zeros_like(t, **kwargs):
"""Create a zeros tensor as the same shape of a given tensor.
Args:
tensor (Tensor): [description]
Returns:
Tensor
"""
return zeros(t.shape, requires_grad=t.requires_grad, device=t.device, **kwargs)
def ones(shape, requires_grad=False, device='cpu', **kwargs):
"""Create a ones tensor of a given shape.
Args:
shape (tuple): shape of the 0-tensor.
requires_grad (bool): if ``True`` will track gradients.
device (str): name of the device where the tensor is located. Default to ``'cpu'``.
Returns:
Tensor
"""
if device == 'cpu':
data = np.ones(shape, **kwargs)
else:
data = cp.ones(shape, **kwargs)
return nets.Tensor(data, requires_grad=requires_grad, device=device)
def ones_like(t, **kwargs):
"""Create a ones tensor as the same shape of a given tensor.
Args:
tensor (Tensor): [description]
Returns:
Tensor
"""
return ones(t.shape, requires_grad=t.requires_grad, device=t.device, **kwargs)
def eye(size, requires_grad=False, device='cpu', **kwargs):
"""Create an eye matrix.
Args:
size (int): size of the matrix.
requires_grad (bool): if ``True`` will track gradients.
device (str): name of the device where the tensor is located. Default to ``'cpu'``.
Returns:
Tensor
"""
if device == 'cpu':
data = np.eye(size, **kwargs)
else:
data = cp.eye(size, **kwargs)
return nets.Tensor(data, requires_grad=requires_grad, device=device)
def identity(size, requires_grad=False, device='cpu', **kwargs):
"""Create an identity matrix.
Args:
size (int): size of the matrix.
requires_grad (bool): if ``True`` will track gradients.
device (str): name of the device where the tensor is located. Default to ``'cpu'``.
Returns:
Tensor
"""
if device == 'cpu':
data = np.identity(size, **kwargs)
else:
data = cp.identity(size, **kwargs)
return nets.Tensor(data, requires_grad=requires_grad, device=device, **kwargs)
def arange(*args, requires_grad=False, device='cpu', **kwargs):
"""Create a range of values.
Args:
requires_grad (bool): if ``True`` will track gradients.
device (str): name of the device where the tensor is located. Default to ``'cpu'``.
Returns:
Tensor
"""
if device == 'cpu':
data = np.arange(*args, **kwargs)
else:
data = cp.arange(*args, **kwargs)
return nets.Tensor(data, requires_grad=requires_grad, device=device)
def astype(t, new_type):
"""Create a range of values.
Args:
new_type (str): new type of the data.
Returns:
Tensor
"""
data = t.data.astype(new_type)
return nets.Tensor(data, requires_grad=t.requires_grad, device=t.device)
|
StarcoderdataPython
|
6451535
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
from ..builder import PIPELINES
@PIPELINES.register_module()
class LoadImageFromFile:
"""Loading image(s) from file.
Required key: "image_file".
Added key: "img".
Args:
to_float32 (bool): Whether to convert the loaded image to a float32
numpy array. If set to False, the loaded image is an uint8 array.
Defaults to False.
color_type (str): Flags specifying the color type of a loaded image,
candidates are 'color', 'grayscale' and 'unchanged'.
channel_order (str): Order of channel, candidates are 'bgr' and 'rgb'.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
"""
def __init__(self,
to_float32=False,
color_type='color',
channel_order='rgb',
file_client_args=dict(backend='disk')):
self.to_float32 = to_float32
self.color_type = color_type
self.channel_order = channel_order
self.file_client_args = file_client_args.copy()
self.file_client = None
def _read_image(self, path):
img_bytes = self.file_client.get(path)
img = mmcv.imfrombytes(
img_bytes, flag=self.color_type, channel_order=self.channel_order)
if img is None:
raise ValueError(f'Fail to read {path}')
if self.to_float32:
img = img.astype(np.float32)
return img
@staticmethod
def _bgr2rgb(img):
if img.ndim == 3:
return mmcv.bgr2rgb(img)
elif img.ndim == 4:
return np.concatenate([mmcv.bgr2rgb(img_) for img_ in img], axis=0)
else:
raise ValueError('results["img"] has invalid shape '
f'{img.shape}')
def __call__(self, results):
"""Loading image(s) from file."""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
image_file = results.get('image_file', None)
if isinstance(image_file, (list, tuple)):
# Load images from a list of paths
results['img'] = [self._read_image(path) for path in image_file]
elif image_file is not None:
# Load single image from path
results['img'] = self._read_image(image_file)
else:
if 'img' not in results:
# If `image_file`` is not in results, check the `img` exists
# and format the image. This for compatibility when the image
# is manually set outside the pipeline.
raise KeyError('Either `image_file` or `img` should exist in '
'results.')
if isinstance(results['img'], (list, tuple)):
assert isinstance(results['img'][0], np.ndarray)
else:
assert isinstance(results['img'], np.ndarray)
if self.color_type == 'color' and self.channel_order == 'rgb':
# The original results['img'] is assumed to be image(s) in BGR
# order, so we convert the color according to the arguments.
if isinstance(results['img'], (list, tuple)):
results['img'] = [
self._bgr2rgb(img) for img in results['img']
]
else:
results['img'] = self._bgr2rgb(results['img'])
results['image_file'] = None
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'to_float32={self.to_float32}, '
f"color_type='{self.color_type}', "
f'file_client_args={self.file_client_args})')
return repr_str
@PIPELINES.register_module()
class LoadVideoFromFile:
"""Loading video(s) from file.
Required key: "video_file".
Added key: "video".
Args:
to_float32 (bool): Whether to convert the loaded video to a float32
numpy array. If set to False, the loaded video is an uint8 array.
Defaults to False.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
"""
def __init__(self,
to_float32=False,
file_client_args=dict(backend='disk')):
self.to_float32 = to_float32
self.file_client_args = file_client_args.copy()
self.file_client = None
def _read_video(self, path):
container = mmcv.VideoReader(path)
sample = dict(
height=int(container.height),
width=int(container.width),
fps=int(container.fps),
num_frames=int(container.frame_cnt),
video=[])
for _ in range(container.frame_cnt):
sample['video'].append(container.read())
sample['video'] = np.stack(sample['video'], axis=0)
return sample
def __call__(self, results):
"""Loading video(s) from file."""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
video_file = results.get('video_file', None)
if isinstance(video_file, (list, tuple)):
# Load videos from a list of paths
for path in video_file:
video = self._read_video(path)
for key in video:
results[key].append(video[key])
elif video_file is not None:
# Load single video from path
results.update(self._read_video(video_file))
else:
if 'video' not in results:
# If `video_file`` is not in results, check the `video` exists
# and format the image. This for compatibility when the image
# is manually set outside the pipeline.
raise KeyError('Either `video_file` or `video` should exist '
'in results.')
if isinstance(results['video'], (list, tuple)):
assert isinstance(results['video'][0], np.ndarray)
else:
assert isinstance(results['video'], np.ndarray)
results['video'] = [results['video']]
results['num_frames'] = [v.shape[0] for v in results['video']]
results['height'] = [v.shape[1] for v in results['video']]
results['width'] = [v.shape[2] for v in results['video']]
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'to_float32={self.to_float32}, '
f'file_client_args={self.file_client_args})')
return repr_str
|
StarcoderdataPython
|
3207592
|
import operator
import random
import statistics
import timeit
from typing import Any, List, Type
import tabulate
import pysegmenttree._pysegmenttree_py
import pysegmenttree.c_extensions
def get_random_query(start: int, end: int):
query = [random.randint(start, end), random.randint(start, end)]
query.sort()
return query
def bench_build(tree_cls: Type, size: int = 1_000_000):
print(f"\n{tree_cls.__name__}: build")
print(f"Tree size: {size}")
random.seed(42)
container = [random.randint(-100, 100) for _ in range(size)]
context = {**globals(), **locals()}
return timeit.repeat(
f"{tree_cls.__module__}.{tree_cls.__name__}(container)",
globals=context,
number=1,
repeat=5,
)
def bench_query(tree_cls: Type, size: int = 100_000, queries: int = 10000):
print(f"\n{tree_cls.__name__}: query")
print(f"Tree size: {size}, queries count: {queries}")
random.seed(42)
container = [random.randint(-100, 100) for _ in range(size)]
tree = tree_cls(container)
prepared_queries = [get_random_query(0, size - 1) for _ in range(queries)]
context = {**globals(), **locals()}
return timeit.repeat(
"for query in prepared_queries: tree.query(*query)",
globals=context,
number=1,
repeat=5,
)
def bench_update(tree_cls: Type, size: int = 100_000, queries: int = 10000):
print(f"\n{tree_cls.__name__}: update")
print(f"Tree size: {size}, queries count: {queries}")
random.seed(42)
container = [random.randint(-100, 100) for _ in range(size)]
tree = tree_cls(container)
prepared_queries = [
[random.randint(0, size - 1), random.randint(-100, 100)] for _ in range(queries)
]
context = {**globals(), **locals()}
return timeit.repeat(
"for query in prepared_queries: tree.update(*query)",
globals=context,
number=1,
repeat=5,
)
IMPLEMENTATIONS = [
pysegmenttree._pysegmenttree_py.PySegmentTree,
pysegmenttree.c_extensions.IntSegmentTree,
pysegmenttree.c_extensions.FloatSegmentTree,
]
BENCHES = {
"build": bench_build,
"query": bench_query,
"update": bench_query,
}
if __name__ == "__main__":
results_table = [["-", *(impl.__name__ for impl in IMPLEMENTATIONS)]]
for bench, func in BENCHES.items():
results_table.append([bench])
for tree_cls in IMPLEMENTATIONS:
timeit_results = func(tree_cls)
mean = statistics.mean(timeit_results)
results_table[-1].append(mean)
print(tabulate.tabulate(results_table, headers="firstrow", tablefmt="grid"))
|
StarcoderdataPython
|
9799050
|
from gym.envs.registration import register
register(
id='RandomWalk-v0',
entry_point='rlsuite.envs.random_walk.random_walk:RandomWalk',
max_episode_steps=50,
)
|
StarcoderdataPython
|
6629442
|
import os
import torch
from torch.utils.tensorboard import SummaryWriter
def build_scheduler(opt, params):
lr_decay_factor = params.get('lr_decay_factor')
lr_decay_steps = params.get('lr_decay_steps')
if lr_decay_factor:
return torch.optim.lr_scheduler.StepLR(opt, lr_decay_steps, lr_decay_factor)
else:
return None
class build_writers:
def __init__(self, working_dir, is_test=False):
self.writer_dir = os.path.join(working_dir, 'logs/')
self.is_test = is_test
def __enter__(self):
train_writer_dir = os.path.join(self.writer_dir, 'train')
val_writer_dir = os.path.join(self.writer_dir, 'val')
self.train_writer = SummaryWriter(train_writer_dir)
self.val_writer = SummaryWriter(val_writer_dir)
if self.is_test:
test_writer_dir = os.path.join(self.writer_dir, 'test')
self.test_writer = SummaryWriter(test_writer_dir)
return self.train_writer, self.val_writer, self.test_writer
else:
return self.train_writer, self.val_writer
def __exit__(self, type, value, traceback):
self.train_writer.close()
self.val_writer.close()
if self.is_test:
self.test_writer.close()
|
StarcoderdataPython
|
357701
|
<reponame>michaelqknguyen/Budget-Buddy
import pytest
from django.urls import reverse, resolve
from budgetbuddy.paychecks.tests.factories import PaycheckFactory, PaystubFactory, DeductionFactory
pytestmark = pytest.mark.django_db
class TestPaycheckUrl:
def test_paychecks(self):
assert reverse("paychecks:paychecks") == "/paychecks/"
assert resolve(f"/paychecks/").view_name == "paychecks:paychecks"
def test_single_paycheck(self):
proto_paycheck = PaycheckFactory()
assert (
reverse("paychecks:paycheck", kwargs={"paycheck_id": proto_paycheck.id})
== f"/paychecks/{proto_paycheck.id}"
)
assert resolve(f"/paychecks/{proto_paycheck.id}").view_name == "paychecks:paycheck"
def test_add_paycheck(self):
assert reverse("paychecks:paycheck_create") == "/paychecks/add"
assert resolve(f"/paychecks/add").view_name == "paychecks:paycheck_create"
def test_edit_paycheck(self):
proto_paycheck = PaycheckFactory()
assert (
reverse("paychecks:paycheck_edit", kwargs={"pk": proto_paycheck.id})
== f"/paychecks/{proto_paycheck.id}/edit"
)
assert resolve(f"/paychecks/{proto_paycheck.id}/edit").view_name == "paychecks:paycheck_edit"
class TestPaystubUrl:
def test_add_paystub(self):
proto_paycheck = PaycheckFactory()
assert(
reverse("paychecks:paystub_add", kwargs={"paycheck_id": proto_paycheck.id})
== f"/paychecks/paystub/{proto_paycheck.id}/add"
)
assert resolve(f"/paychecks/paystub/{proto_paycheck.id}/add").view_name == "paychecks:paystub_add"
def test_delete_paystub(self):
proto_paystub = PaystubFactory()
assert (
reverse("paychecks:paystub_delete", kwargs={"pk": proto_paystub.id})
== f"/paychecks/paystub/{proto_paystub.id}/delete"
)
assert resolve(f"/paychecks/paystub/{proto_paystub.id}/delete").view_name == "paychecks:paystub_delete"
class TestDeductionUrl:
def test_add_deduction(self):
assert reverse("paychecks:deduction_add") == f"/paychecks/deduction/add"
assert resolve(f"/paychecks/deduction/add").view_name == "paychecks:deduction_add"
def test_edit_deduction(self):
proto_deduction = DeductionFactory()
assert (
reverse("paychecks:deduction_edit", kwargs={"pk": proto_deduction.id})
== f"/paychecks/deduction/{proto_deduction.id}/edit"
)
assert resolve(f"/paychecks/deduction/{proto_deduction.id}/edit").view_name == "paychecks:deduction_edit"
def test_delete_deduction(self):
proto_deduction = DeductionFactory()
assert (
reverse("paychecks:deduction_delete", kwargs={"pk": proto_deduction.id})
== f"/paychecks/deduction/{proto_deduction.id}/delete"
)
assert resolve(f"/paychecks/deduction/{proto_deduction.id}/delete").view_name == "paychecks:deduction_delete"
|
StarcoderdataPython
|
9764302
|
<gh_stars>10-100
from .dixel import Dixel
from .sham_dixel import ShamDixel
from .mock_dixel import MockStudy, MockSeries, MockInstance
from .report import RadiologyReport, MammographyReport, LungScreeningReport, BoneAgeReport
from .views import DixelView
from .provenance import Provenance
from .sham_maps import huid_sham_map
from .dixel_hashes import DixelHashes
|
StarcoderdataPython
|
3226594
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""MicroPhone & Play Sound"""
from __future__ import print_function
import pyaudio
import wave
from six.moves import queue
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
CHUNK = 512
# MicrophoneStream - original code in https://goo.gl/7Xy3TT
class MicrophoneStream(object):
"""Opens a recording stream as a generator yielding the audio chunks."""
def __init__(self, rate, chunk):
self._rate = rate
self._chunk = chunk
# Create a thread-safe buffer of audio data
self._buff = queue.Queue()
self.closed = True
def __enter__(self):
self._audio_interface = pyaudio.PyAudio()
self._audio_stream = self._audio_interface.open(
format=pyaudio.paInt16,
channels=1, rate=self._rate,
input=True, frames_per_buffer=self._chunk,
# Run the audio stream asynchronously to fill the buffer object.
# This is necessary so that the input device's buffer doesn't
# overflow while the calling thread makes network requests, etc.
stream_callback=self._fill_buffer,
)
self.closed = False
return self
def __exit__(self, type, value, traceback):
self._audio_stream.stop_stream()
self._audio_stream.close()
self.closed = True
# Signal the generator to terminate so that the client's
# streaming_recognize method will not block the process termination.
self._buff.put(None)
self._audio_interface.terminate()
def _fill_buffer(self, in_data, frame_count, time_info, status_flags):
"""Continuously collect data from the audio stream, into the buffer."""
self._buff.put(in_data)
return None, pyaudio.paContinue
def generator(self):
while not self.closed:
# Use a blocking get() to ensure there's at least one chunk of
# data, and stop iteration if the chunk is None, indicating the
# end of the audio stream.
chunk = self._buff.get()
if chunk is None:
return
data = [chunk]
# Now consume whatever other data's still buffered.
while True:
try:
chunk = self._buff.get(block=False)
if chunk is None:
return
data.append(chunk)
except queue.Empty:
break
yield b''.join(data)
# [END audio_stream]
def play_file(fname):
# create an audio object
wf = wave.open(fname, 'rb')
p = pyaudio.PyAudio()
chunk = 1024
# open stream based on the wave object which has been input.
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
# read data (based on the chunk size)
data = wf.readframes(chunk)
# play stream (looping from beginning of file to the end)
while len(data) > 0:
# writing to the stream is what *actually* plays the sound.
stream.write(data)
data = wf.readframes(chunk)
# cleanup stuff.
stream.close()
p.terminate()
|
StarcoderdataPython
|
3208343
|
<filename>setup.py
from setuptools import setup
VERSION = "1.3.1"
setup(
name="Oz",
version=VERSION,
description="A batteries-included web framework built on Tornado",
author="<NAME>",
author_email="<EMAIL>",
url="http://github.com/dailymuse/oz",
zip_safe=False,
packages=[
"oz",
"oz.aws_cdn",
"oz.bandit",
"oz.blinks",
"oz.core",
"oz.error_pages",
"oz.json_api",
"oz.redis",
"oz.redis_sessions",
"oz.sqlalchemy",
"oz.error_pages"
],
package_data={
"oz": [
"skeleton/*.py",
"skeleton/plugin/*.py",
"skeleton/plugin/tests/*.py",
]
},
install_requires=[
"tornado>=3.1",
"optfn>=0.4.1"
],
extras_require={
"oz.aws_cdn": ["boto>=2.47.0"],
"oz.redis": ["redis>=2.6.0"],
"oz.sqlalchemy": ["sqlalchemy>=0.7.8"],
"datadog": ["ddtrace>=0.12.1"]
},
entry_points={
"console_scripts": ["oz = oz.cli:main"]
}
)
|
StarcoderdataPython
|
5034665
|
<reponame>modichirag/21cmhod
#!/usr/bin/env python3
#
# Plots the power spectra and Fourier-space biases for the HI.
#
import numpy as np
import sys, os
import matplotlib.pyplot as plt
from scipy.interpolate import LSQUnivariateSpline as Spline
from scipy.interpolate import InterpolatedUnivariateSpline as ius
from scipy.signal import savgol_filter
#
from matplotlib import rcParams
rcParams['font.family'] = 'serif'
#
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--size', help='which box size simulation', default='small')
args = parser.parse_args()
boxsize = args.size
suff = 'm1_00p3mh-alpha-0p8-subvol'
if boxsize == 'big':
suff = suff + '-big'
bs = 1024
else: bs = 256
figpath = '../../figs/%s/'%(suff)
try: os.makedirs(figpath)
except: pass
models = ['ModelA', 'ModelB', 'ModelC']
##def make_omHI_plot(fname, fsize=12):
## """Does the work of making the distribution figure."""
## zlist = [2.0,2.5,3.0,3.5,4.0,4.5,5.0,5.5,6.0]
## clist = ['b','c','g','m','r']
## # Now make the figure.
##
## fig,axis = plt.subplots(figsize=(6, 5))
##
## # Read in the data and convert to "normal" OmegaHI convention.
## dd = np.loadtxt("../../data/omega_HI_obs.txt")
## Ez = np.sqrt( 0.3*(1+dd[:,0])**3+0.7 )
## axis.errorbar(dd[:,0],1e-3*dd[:,1]/Ez**2,yerr=1e-3*dd[:,2]/Ez**2,\
## fmt='s',mfc='None')
## # Plot the fit line.
## zz = np.linspace(0,7,100)
## Ez = np.sqrt( 0.3*(1+zz)**3+0.7 )
## axis.plot(zz,4e-4*(1+zz)**0.6/Ez**2,'k-')
##
## #for im, model in enumerate(['ModelA', 'ModelB']):
## for im, model in enumerate(models):
## dpath = '../../data/outputs/%s/%s/'%(suff, model)
## print(model)
##
## omHI = np.loadtxt(dpath + "OmHI.txt")
## #omHI[:, 1] /= 10
## axis.plot(omHI[:, 0], omHI[:, 1], 'C%do'%im, label=model)
##
## ss = ius(omHI[::-1, 0], omHI[::-1, 1])
## axis.plot(np.linspace(2,6,100),ss(np.linspace(2,6,100)),'C%d'%im)
##
## axis.set_yscale('log')
## axis.legend(fontsize=fsize)
## for tick in axis.xaxis.get_major_ticks():
## tick.label.set_fontsize(fsize)
## for tick in axis.yaxis.get_major_ticks():
## tick.label.set_fontsize(fsize)
##
## # Put on some more labels.
## axis.set_xlabel(r'$z$')
## axis.set_ylabel(r'$\Omega_{HI}$')
## # and finish up.
## plt.tight_layout()
## plt.savefig(fname)
## #
##
##
##
def make_omHI_plot(fname, fsize=12):
"""Does the work of making the distribution figure."""
zlist = [2.0,2.5,3.0,3.5,4.0,4.5,5.0,5.5,6.0]
clist = ['b','c','g','m','r']
# Now make the figure.
fig,axis = plt.subplots(figsize=(6, 5))
# Read in the data and convert to "normal" OmegaHI convention.
dd = np.loadtxt("../../data/omega_HI_obs.txt")
#Ez = np.sqrt( 0.3*(1+dd[:,0])**3+0.7 )
#axis.errorbar(dd[:,0],1e-3*dd[:,1]/Ez**2,yerr=1e-3*dd[:,2]/Ez**2,\
# fmt='s',mfc='None')
axis.errorbar(dd[:,0],1e-3*dd[:,1],yerr=1e-3*dd[:,2],fmt='s',mfc='None', color='m')
# Plot the fit line.
zz = np.linspace(0,7,100)
Ez = np.sqrt( 0.3*(1+zz)**3+0.7 )
axis.plot(zz,4e-4*(1+zz)**0.6,'k-')
#for im, model in enumerate(['ModelA', 'ModelB', 'ModelC']):
for im, model in enumerate(models):
dpath = '../../data/outputs/%s/%s/'%(suff, model)
omz = []
for iz, zz in enumerate(zlist):
# Read the data from file.
aa = 1.0/(1.0+zz)
omHI = np.loadtxt(dpath + "HI_dist_{:06.4f}.txt".format(aa)).T
omHI = (omHI[1]*omHI[2]).sum()/bs**3/27.754e10
omHI *= (1+zz)**3
if iz == 0: axis.plot(zz, omHI, 'C%do'%im, label=model)
else: axis.plot(zz, omHI, 'C%do'%im)
omz.append(omHI)
ss = ius(zlist, omz)
axis.plot(np.linspace(2,6,100),ss(np.linspace(2,6,100)),'C%d'%im)
axis.set_yscale('log')
axis.legend(fontsize=fsize)
for tick in axis.xaxis.get_major_ticks():
tick.label.set_fontsize(fsize)
for tick in axis.yaxis.get_major_ticks():
tick.label.set_fontsize(fsize)
# Put on some more labels.
axis.set_xlabel(r'$z$')
axis.set_ylabel(r'$\Omega_{HI}$')
# and finish up.
plt.tight_layout()
plt.savefig(fname)
#
def make_satdist_plot(fname, fsize=12):
"""Plot fraction of HI in satellites as function of halo mass"""
zlist = [2.0,2.5,3.0,3.5,4.0,4.5,5.0,5.5,6.0]
fig,ax = plt.subplots(3,3,figsize=(13, 13), sharex=True, sharey=True)
zlist = [2.0,2.5,3.0,4.0,5.0,6.0]
fig,ax = plt.subplots(2,3,figsize=(13, 9), sharex=True, sharey=True)
clist = ['b','c','g','m','r']
# Now make the figure.
for im, model in enumerate(['ModelA', 'ModelB']):
#for im, model in enumerate(models):
dpath = '../../data/outputs/%s/%s/'%(suff, model)
print(model)
for iz, zz in enumerate(zlist):
# Read the data from file.
axis = ax.flatten()[iz]
aa = 1.0/(1.0+zz)
dist = np.loadtxt(dpath + "HI_dist_{:06.4f}.txt".format(aa))[:,:]
dist = dist[dist[:,1] !=0]
xx = dist[:, 0]
satfrac = dist[:, 4]/(dist[:, 2] + 1e-10)
axis.plot(xx, satfrac, 'C%d'%im, marker='.', label=model)
#Formatting
axis.set_title('z = %0.1f'%zz, fontsize=fsize)
axis.set_xscale('log')
axis.set_ylim(0, 1.1)
axis.grid(which='both')
if iz == 0: axis.legend(fontsize=fsize)
for tick in axis.xaxis.get_major_ticks():
tick.label.set_fontsize(fsize)
for tick in axis.yaxis.get_major_ticks():
tick.label.set_fontsize(fsize)
# Put on some more labels.
for axis in ax[-1]: axis.set_xlabel(r'M$(\rm M_{\odot}/h)$', fontsize=fsize)
for axis in ax[:, 0]: axis.set_ylabel(r'$\rm\frac{HI_{satellite}}{HI_{halo}}$', fontsize=fsize+2)
# and finish up.
plt.tight_layout()
plt.savefig(fname)
#
def make_HIfrac_dh_plot(fname, fsize=12):
"""Plot HIfraction of total in given mass bin"""
zlist = [2.0,2.5,3.0,3.5,4.0,4.5,5.0,5.5,6.0]
fig,ax = plt.subplots(3,3,figsize=(13, 13), sharex=True, sharey=True)
zlist = [2.0,2.5,3.0,4.0,5.0,6.0]
fig,ax = plt.subplots(2,3,figsize=(13, 9), sharex=True, sharey=True)
clist = ['b','c','g','m','r']
# Now make the figure.
for im, model in enumerate(models):
dpath = '../../data/outputs/%s/%s/'%(suff, model)
print(model)
for iz, zz in enumerate(zlist):
# Read the data from file.
axis = ax.flatten()[iz]
aa = 1.0/(1.0+zz)
dist = np.loadtxt(dpath + "HI_dist_{:06.4f}.txt".format(aa))[:,:]
dist = dist[dist[:,1] !=0]
xx = dist[:, 0]
nn = dist[:, 1]
h1frac = (dist[:, 2]*nn)/(dist[:, 2]*nn).sum()
axis.plot(xx, h1frac, 'C%d'%im, marker='.', label=model)
#cenfrac = dist[:, 3]/nn/(dist[:, 2]/nn + 1e-10)
#axis.plot(xx, cenfrac, 'C%d.'%im, label=model)
#Formatting
axis.set_title('z = %0.1f'%zz, fontsize=fsize)
axis.set_xscale('log')
#axis.set_ylim(0, 1.1)
axis.grid(which='both')
if iz == 0: axis.legend(fontsize=fsize)
for tick in axis.xaxis.get_major_ticks():
tick.label.set_fontsize(fsize)
for tick in axis.yaxis.get_major_ticks():
tick.label.set_fontsize(fsize)
# Put on some more labels.
for axis in ax[-1]: axis.set_xlabel(r'M$(\rm M_{\odot}/h)$', fontsize=fsize)
for axis in ax[:, 0]: axis.set_ylabel(r'$\frac{1}{\rm{HI}_{total}}\frac{\rm{dHI}}{\rm{dlogM}_h}$', fontsize=fsize)
# and finish up.
plt.tight_layout()
plt.savefig(fname)
#
def make_hmf_plot(fname, fsize=13):
"""Plot halo mass function as a check for the code"""
zlist = [2.0,2.5,3.0,3.5,4.0,4.5,5.0,5.5,6.0]
clist = ['b','c','g','m','r']
# Now make the figure.
fig,ax = plt.subplots(3,3,figsize=(13, 13))
for im, model in enumerate(models):
dpath = '../../data/outputs/%s/%s/'%(suff, model)
print(model)
for iz, zz in enumerate(zlist):
# Read the data from file.
axis = ax.flatten()[iz]
aa = 1.0/(1.0+zz)
dist = np.loadtxt(dpath + "HI_dist_{:06.4f}.txt".format(aa))[:,:]
dist = dist[dist[:,1] !=0]
nn = dist[:, 1]
xx = dist[:, 0]
axis.plot(xx, nn, 'C%do'%im, label=model)
axis.set_title('z = %0.1f'%zz, fontsize=fsize)
#Formatting
axis.set_xscale('log')
axis.set_yscale('log')
#axis.set_ylim(0, 1.1)
axis.grid(which='both')
if iz == 0: axis.legend(fontsize=fsize)
for tick in axis.xaxis.get_major_ticks():
tick.label.set_fontsize(fsize)
for tick in axis.yaxis.get_major_ticks():
tick.label.set_fontsize(fsize)
# Put on some more labels.
for axis in ax[-1]: axis.set_xlabel(r'M$(\rm M_{\odot}/h)$', fontsize=fsize)
for axis in ax[:, 0]: axis.set_ylabel(r'N halos', fontsize=fsize)
# and finish up.
plt.tight_layout()
plt.savefig(fname)
#
def make_H1mh_plot(fname, fsize=13):
"""Plot mHI-mHalo relation for 2 models"""
zlist = [2.0,2.5,3.0,3.5,4.0,4.5,5.0,5.5,6.0]
fig,ax = plt.subplots(3,3,figsize=(13, 13), sharex=True, sharey=True)
zlist = [2.0,2.5,3.0,4.0,5.0,6.0]
fig,ax = plt.subplots(2,3,figsize=(13, 9), sharex=True, sharey=True)
clist = ['b','c','g','m','r']
# Now make the figure.
for im, model in enumerate(models):
dpath = '../../data/outputs/%s/%s/'%(suff, model)
print(model)
for iz, zz in enumerate(zlist):
# Read the data from file.
axis = ax.flatten()[iz]
aa = 1.0/(1.0+zz)
#dist = np.loadtxt(dpath + "HI_dist_{:06.4f}.txt".format(aa))[1:-1,:]
dist = np.loadtxt(dpath + "HI_dist_{:06.4f}.txt".format(aa))[:,:]
dist = dist[dist[:,1] !=0]
xx = dist[:, 0]
yy = dist[:, 2]
axis.plot(xx, yy, 'C%d'%im, marker='.', label=model)
#Formatting
axis.set_title('z = %0.1f'%zz, fontsize=fsize)
axis.set_xscale('log')
axis.set_yscale('log')
axis.set_ylim(8e4, 1.1e11)
axis.grid(which='both')
axis.grid(which='both')
if iz == 0: axis.legend(fontsize=fsize)
for tick in axis.xaxis.get_major_ticks():
tick.label.set_fontsize(fsize)
for tick in axis.yaxis.get_major_ticks():
tick.label.set_fontsize(fsize)
# Put on some more labels.
for axis in ax[-1]: axis.set_xlabel(r'M$(\rm M_{\odot}/h)$', fontsize=fsize)
for axis in ax[:, 0]: axis.set_ylabel(r'M$\rm _{HI}(M_{\odot}/h)$', fontsize=fsize)
# and finish up.
plt.tight_layout()
plt.savefig(fname)
#
if __name__=="__main__":
make_satdist_plot(figpath + 'HI_sat_fraction.pdf')
make_HIfrac_dh_plot(figpath + 'HIfrac_dhalo.pdf')
make_hmf_plot(figpath + 'HMF.pdf')
make_H1mh_plot(figpath + 'HI_Mh.pdf')
make_omHI_plot(figpath + 'omHI.pdf')
#
|
StarcoderdataPython
|
69303
|
<reponame>kkkanil/st2<filename>st2common/tests/unit/test_api_model_validation.py
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import unittest2
from st2common.models.api.base import BaseAPI
__all__ = [
'APIModelValidationTestCase'
]
class MockAPIModel1(BaseAPI):
model = None
schema = {
'title': 'MockAPIModel',
'description': 'Test',
'type': 'object',
'properties': {
'id': {
'description': 'The unique identifier for the action runner.',
'type': ['string', 'null'],
'default': None
},
'name': {
'description': 'The name of the action runner.',
'type': 'string',
'required': True
},
'description': {
'description': 'The description of the action runner.',
'type': 'string'
},
'enabled': {
'type': 'boolean',
'default': True
},
'parameters': {
'type': 'object'
},
'permission_grants': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'resource_uid': {
'type': 'string',
'description': 'UID of a resource to which this grant applies to.',
'required': False,
'default': 'unknown'
},
'enabled': {
'type': 'boolean',
'default': True
},
'description': {
'type': 'string',
'description': 'Description',
'required': False
}
}
},
'default': []
}
},
'additionalProperties': False
}
class MockAPIModel2(BaseAPI):
model = None
schema = {
'title': 'MockAPIModel2',
'description': 'Test',
'type': 'object',
'properties': {
'id': {
'description': 'The unique identifier for the action runner.',
'type': 'string',
'default': None
},
'permission_grants': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'resource_uid': {
'type': 'string',
'description': 'UID of a resource to which this grant applies to.',
'required': False,
'default': None
},
'description': {
'type': 'string',
'required': True
}
}
},
'default': []
},
'parameters': {
'type': 'object',
'properties': {
'id': {
'type': 'string',
'default': None
},
'name': {
'type': 'string',
'required': True
}
},
'additionalProperties': False,
}
},
'additionalProperties': False
}
class APIModelValidationTestCase(unittest2.TestCase):
def test_validate_default_values_are_set(self):
# no "permission_grants" attribute
mock_model_api = MockAPIModel1(name='name')
self.assertEqual(getattr(mock_model_api, 'id', 'notset'), 'notset')
self.assertEqual(mock_model_api.name, 'name')
self.assertEqual(getattr(mock_model_api, 'enabled', None), None)
self.assertEqual(getattr(mock_model_api, 'permission_grants', None), None)
mock_model_api_validated = mock_model_api.validate()
# Validate it doesn't modify object in place
self.assertEqual(getattr(mock_model_api, 'id', 'notset'), 'notset')
self.assertEqual(mock_model_api.name, 'name')
self.assertEqual(getattr(mock_model_api, 'enabled', None), None)
# Verify cleaned object
self.assertEqual(mock_model_api_validated.id, None)
self.assertEqual(mock_model_api_validated.name, 'name')
self.assertEqual(mock_model_api_validated.enabled, True)
self.assertEqual(mock_model_api_validated.permission_grants, [])
# "permission_grants" attribute present, but child missing
mock_model_api = MockAPIModel1(name='name', enabled=False,
permission_grants=[{}, {'description': 'test'}])
self.assertEqual(mock_model_api.name, 'name')
self.assertEqual(mock_model_api.enabled, False)
self.assertEqual(mock_model_api.permission_grants, [{}, {'description': 'test'}])
mock_model_api_validated = mock_model_api.validate()
# Validate it doesn't modify object in place
self.assertEqual(mock_model_api.name, 'name')
self.assertEqual(mock_model_api.enabled, False)
self.assertEqual(mock_model_api.permission_grants, [{}, {'description': 'test'}])
# Verify cleaned object
self.assertEqual(mock_model_api_validated.id, None)
self.assertEqual(mock_model_api_validated.name, 'name')
self.assertEqual(mock_model_api_validated.enabled, False)
self.assertEqual(mock_model_api_validated.permission_grants,
[{'resource_uid': 'unknown', 'enabled': True},
{'resource_uid': 'unknown', 'enabled': True, 'description': 'test'}])
def test_validate_nested_attribute_with_default_not_provided(self):
mock_model_api = MockAPIModel2()
self.assertEqual(getattr(mock_model_api, 'id', 'notset'), 'notset')
self.assertEqual(getattr(mock_model_api, 'permission_grants', 'notset'), 'notset')
self.assertEqual(getattr(mock_model_api, 'parameters', 'notset'), 'notset')
mock_model_api_validated = mock_model_api.validate()
# Validate it doesn't modify object in place
self.assertEqual(getattr(mock_model_api, 'id', 'notset'), 'notset')
self.assertEqual(getattr(mock_model_api, 'permission_grants', 'notset'), 'notset')
self.assertEqual(getattr(mock_model_api, 'parameters', 'notset'), 'notset')
# Verify cleaned object
self.assertEqual(mock_model_api_validated.id, None)
self.assertEqual(mock_model_api_validated.permission_grants, [])
self.assertEqual(getattr(mock_model_api_validated, 'parameters', 'notset'), 'notset')
def test_validate_allow_default_none_for_any_type(self):
mock_model_api = MockAPIModel2(permission_grants=[{'description': 'test'}],
parameters={'name': 'test'})
self.assertEqual(getattr(mock_model_api, 'id', 'notset'), 'notset')
self.assertEqual(mock_model_api.permission_grants, [{'description': 'test'}])
self.assertEqual(mock_model_api.parameters, {'name': 'test'})
mock_model_api_validated = mock_model_api.validate()
# Validate it doesn't modify object in place
self.assertEqual(getattr(mock_model_api, 'id', 'notset'), 'notset')
self.assertEqual(mock_model_api.permission_grants, [{'description': 'test'}])
self.assertEqual(mock_model_api.parameters, {'name': 'test'})
# Verify cleaned object
self.assertEqual(mock_model_api_validated.id, None)
self.assertEqual(mock_model_api_validated.permission_grants,
[{'description': 'test', 'resource_uid': None}])
self.assertEqual(mock_model_api_validated.parameters, {'id': None, 'name': 'test'})
|
StarcoderdataPython
|
3257732
|
<gh_stars>0
# @memecian
# toRegional.py
# Turns text into Discord Emojis, specifically regional indicators.
inputString = input("Input Letters: ")
for c in inputString:
if c.isalpha():
print(":regional_indicator_" + c + ":", end = "")
else:
print(c, end = "")
input("\nPress Enter to quit")
|
StarcoderdataPython
|
4804470
|
<reponame>fcce-proj/zaifbot
from decimal import Decimal
class Tick:
def __init__(self, currency_pair):
self.size = Decimal(str(currency_pair.info['aux_unit_step']))
self._decimal_digits = currency_pair.info['aux_unit_point']
def truncate_price(self, price):
price = Decimal(str(price))
remainder = price % self.size
truncated_price = price - remainder
if self._decimal_digits == 0:
return int(truncated_price)
return float(truncated_price)
|
StarcoderdataPython
|
3298108
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.bigquery_migration_v2alpha.types import assessment_task
from google.cloud.bigquery_migration_v2alpha.types import migration_error_details
from google.cloud.bigquery_migration_v2alpha.types import migration_metrics
from google.cloud.bigquery_migration_v2alpha.types import translation_task
from google.protobuf import any_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import error_details_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.bigquery.migration.v2alpha",
manifest={
"MigrationWorkflow",
"MigrationTask",
"MigrationSubtask",
"MigrationTaskOrchestrationResult",
},
)
class MigrationWorkflow(proto.Message):
r"""A migration workflow which specifies what needs to be done
for an EDW migration.
Attributes:
name (str):
Output only. Immutable. The unique identifier for the
migration workflow. The ID is server-generated.
Example: ``projects/123/locations/us/workflows/345``
display_name (str):
The display name of the workflow. This can be
set to give a workflow a descriptive name. There
is no guarantee or enforcement of uniqueness.
tasks (Sequence[google.cloud.bigquery_migration_v2alpha.types.MigrationWorkflow.TasksEntry]):
The tasks in a workflow in a named map. The
name (i.e. key) has no meaning and is merely a
convenient way to address a specific task in a
workflow.
state (google.cloud.bigquery_migration_v2alpha.types.MigrationWorkflow.State):
Output only. That status of the workflow.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Time when the workflow was created.
last_update_time (google.protobuf.timestamp_pb2.Timestamp):
Time when the workflow was last updated.
"""
class State(proto.Enum):
r"""Possible migration workflow states."""
STATE_UNSPECIFIED = 0
DRAFT = 1
RUNNING = 2
PAUSED = 3
COMPLETED = 4
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=6,)
tasks = proto.MapField(
proto.STRING, proto.MESSAGE, number=2, message="MigrationTask",
)
state = proto.Field(proto.ENUM, number=3, enum=State,)
create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
last_update_time = proto.Field(
proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,
)
class MigrationTask(proto.Message):
r"""A single task for a migration which has details about the
configuration of the task.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
assessment_task_details (google.cloud.bigquery_migration_v2alpha.types.AssessmentTaskDetails):
Task configuration for Assessment.
This field is a member of `oneof`_ ``task_details``.
translation_task_details (google.cloud.bigquery_migration_v2alpha.types.TranslationTaskDetails):
Task configuration for Batch/Offline SQL
Translation.
This field is a member of `oneof`_ ``task_details``.
id (str):
Output only. Immutable. The unique identifier
for the migration task. The ID is server-
generated.
type_ (str):
The type of the task. This must be a
supported task type.
details (google.protobuf.any_pb2.Any):
DEPRECATED! Use one of the task_details below. The details
of the task. The type URL must be one of the supported task
details messages and correspond to the Task's type.
state (google.cloud.bigquery_migration_v2alpha.types.MigrationTask.State):
Output only. The current state of the task.
processing_error (google.rpc.error_details_pb2.ErrorInfo):
Output only. An explanation that may be
populated when the task is in FAILED state.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Time when the task was created.
last_update_time (google.protobuf.timestamp_pb2.Timestamp):
Time when the task was last updated.
orchestration_result (google.cloud.bigquery_migration_v2alpha.types.MigrationTaskOrchestrationResult):
Output only. Additional information about the
orchestration.
"""
class State(proto.Enum):
r"""Possible states of a migration task."""
STATE_UNSPECIFIED = 0
PENDING = 1
ORCHESTRATING = 2
RUNNING = 3
PAUSED = 4
SUCCEEDED = 5
FAILED = 6
assessment_task_details = proto.Field(
proto.MESSAGE,
number=12,
oneof="task_details",
message=assessment_task.AssessmentTaskDetails,
)
translation_task_details = proto.Field(
proto.MESSAGE,
number=13,
oneof="task_details",
message=translation_task.TranslationTaskDetails,
)
id = proto.Field(proto.STRING, number=1,)
type_ = proto.Field(proto.STRING, number=2,)
details = proto.Field(proto.MESSAGE, number=3, message=any_pb2.Any,)
state = proto.Field(proto.ENUM, number=4, enum=State,)
processing_error = proto.Field(
proto.MESSAGE, number=5, message=error_details_pb2.ErrorInfo,
)
create_time = proto.Field(proto.MESSAGE, number=6, message=timestamp_pb2.Timestamp,)
last_update_time = proto.Field(
proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,
)
orchestration_result = proto.Field(
proto.MESSAGE, number=10, message="MigrationTaskOrchestrationResult",
)
class MigrationSubtask(proto.Message):
r"""A subtask for a migration which carries details about the
configuration of the subtask. The content of the details should
not matter to the end user, but is a contract between the
subtask creator and subtask worker.
Attributes:
name (str):
Output only. Immutable. The resource name for the migration
subtask. The ID is server-generated.
Example:
``projects/123/locations/us/workflows/345/subtasks/678``
task_id (str):
The unique ID of the task to which this
subtask belongs.
type_ (str):
The type of the Subtask. The migration
service does not check whether this is a known
type. It is up to the task creator (i.e.
orchestrator or worker) to ensure it only
creates subtasks for which there are compatible
workers polling for Subtasks.
state (google.cloud.bigquery_migration_v2alpha.types.MigrationSubtask.State):
Output only. The current state of the
subtask.
processing_error (google.rpc.error_details_pb2.ErrorInfo):
Output only. An explanation that may be
populated when the task is in FAILED state.
resource_error_details (Sequence[google.cloud.bigquery_migration_v2alpha.types.ResourceErrorDetail]):
Output only. Provides details to errors and
issues encountered while processing the subtask.
Presence of error details does not mean that the
subtask failed.
resource_error_count (int):
The number or resources with errors. Note: This is not the
total number of errors as each resource can have more than
one error. This is used to indicate truncation by having a
``resource_error_count`` that is higher than the size of
``resource_error_details``.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Time when the subtask was created.
last_update_time (google.protobuf.timestamp_pb2.Timestamp):
Time when the subtask was last updated.
metrics (Sequence[google.cloud.bigquery_migration_v2alpha.types.TimeSeries]):
The metrics for the subtask.
"""
class State(proto.Enum):
r"""Possible states of a migration subtask."""
STATE_UNSPECIFIED = 0
ACTIVE = 1
RUNNING = 2
SUCCEEDED = 3
FAILED = 4
PAUSED = 5
name = proto.Field(proto.STRING, number=1,)
task_id = proto.Field(proto.STRING, number=2,)
type_ = proto.Field(proto.STRING, number=3,)
state = proto.Field(proto.ENUM, number=5, enum=State,)
processing_error = proto.Field(
proto.MESSAGE, number=6, message=error_details_pb2.ErrorInfo,
)
resource_error_details = proto.RepeatedField(
proto.MESSAGE, number=12, message=migration_error_details.ResourceErrorDetail,
)
resource_error_count = proto.Field(proto.INT32, number=13,)
create_time = proto.Field(proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,)
last_update_time = proto.Field(
proto.MESSAGE, number=8, message=timestamp_pb2.Timestamp,
)
metrics = proto.RepeatedField(
proto.MESSAGE, number=11, message=migration_metrics.TimeSeries,
)
class MigrationTaskOrchestrationResult(proto.Message):
r"""Additional information from the orchestrator when it is done
with the task orchestration.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
assessment_details (google.cloud.bigquery_migration_v2alpha.types.AssessmentOrchestrationResultDetails):
Details specific to assessment task types.
This field is a member of `oneof`_ ``details``.
"""
assessment_details = proto.Field(
proto.MESSAGE,
number=1,
oneof="details",
message=assessment_task.AssessmentOrchestrationResultDetails,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
StarcoderdataPython
|
3534198
|
import os
import sys
import inspect
from rkd.api.inputoutput import IO
from rkd.api.testing import BasicTestingCase
TESTS_PATH = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) + '/../'
sys.path.insert(0, TESTS_PATH)
from infracheck.infracheck.config import ConfigLoader
class ConfigTest(BasicTestingCase):
def test_load_finds_file_successfully(self):
"""
Loads a check configuration with a success
"""
loader = ConfigLoader([TESTS_PATH + '/example/healthchecks', TESTS_PATH + '/infracheck'], IO())
check = loader.load('ram')
self.assertEqual('ram', check.name)
self.assertEqual('85', check.input_variables.get('max_ram_percentage'))
def test_load_does_not_find_file(self):
loader = ConfigLoader([TESTS_PATH + '/example/healthchecks', TESTS_PATH + '/infracheck'], IO())
self.assertRaises(FileNotFoundError, lambda: loader.load('not-existing'))
def test_assert_valid_format(self):
with self.subTest('Success case'):
ConfigLoader._assert_valid_format('Durruti', {'type': 'curl'})
with self.subTest('Failure case - missing "type" attribute'):
self.assertRaises(Exception, lambda: ConfigLoader._assert_valid_format('Some', {}))
|
StarcoderdataPython
|
1917112
|
from pyspider.core.model.mysql_base import *
"""
万里牛库存sku数据的保存模块
"""
class SkuInventory(Model):
goods_code = CharField(max_length=50, verbose_name='商品编码')
lock_size = IntegerField(default=0, verbose_name='锁定库存')
quantity = IntegerField(default=0, verbose_name='数量')
sku_code = CharField(max_length=50, default='', verbose_name='规格编码')
skc_code = CharField(max_length=50, default='', verbose_name='颜色编码')
underway = IntegerField(default='', verbose_name='在途库存')
update_time = DateTimeField(default=None, verbose_name='爬虫更新日期')
storage_code = CharField(max_length=50, default='', verbose_name='仓库编码')
class Meta:
database = db
db_table = 'spider_sku_inventory'
# 设置索引
indexes = (
(('sku_code', 'storage_code'), True),
)
|
StarcoderdataPython
|
8110276
|
<filename>tests/__init__.py<gh_stars>1-10
import asynctest
import socket
from contextlib import contextmanager
class BaseCase(asynctest.TestCase):
@contextmanager
def socketpair(self):
server_listener_conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_listener_conn = server_listener_conn
server_listener_conn.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_listener_conn.bind(('', 0))
server_listener_conn.listen(5)
server_listener_conn.setblocking(0)
self.client_conn = client_conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_conn.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
client_conn.setblocking(0)
client_conn.connect_ex(server_listener_conn.getsockname())
server_conn, server_addr = server_listener_conn.accept()
self.server_conn = server_conn
server_conn.setblocking(0)
yield (client_conn, server_conn)
client_conn.close()
server_conn.close()
server_listener_conn.close()
|
StarcoderdataPython
|
11322449
|
<filename>learning-python-development/2-hour-python-coding-bootcamp/04_classes_and_objects.py
# Classes and objects
class GameCharacter:
# attributes
name = ""
health = 100
position = 0
# initializer
def __init__(self, name, position):
self.name = name
self.position = position
# behaviours
def move_by(self, amount):
self.position += amount
def heal_self(self, by_amount):
self.health += by_amount
if self.health > 100:
self.health = 100
new_character = GameCharacter("Monster", 50)
print(new_character.name)
print(new_character.health)
print(new_character.position)
new_character.move_by(10)
print(new_character.position)
new_character.health = 80
print(new_character.health)
new_character.heal_self(40)
print(new_character.health)
|
StarcoderdataPython
|
6473530
|
<reponame>wuemily2/csc290-tut104-mines2019<gh_stars>0
from __future__ import annotations
from typing import Tuple, List
from Tile import Tile
from NumberTile import NumberTile
class EmptyTile(Tile):
"""
The EmptyTile class extends the Tile class. It should have
all the attributes and methods of the Tile class.
"""
def __init__(self, board: List[List[Tile]], position: Tuple[int, int]):
"""
Initialize the EmptyTile class with board and position like the
Tile class.
"""
super().__init__(board, position)
# self._icon
self._tile_type = "empty"
def _calculate_click(self):
"""
Implement the reveal_tile method in the Tile class.
EmptyTile is initially unrevealed. When it is clicked by the player,
it reveals itself and all other EmptyTile and Numbered Tiles around it.
"""
for x_shift in [-1, 0, 1]:
for y_shift in [-1, 0, 1]:
if x_shift == y_shift == 0:
continue
try:
click_index = (self.get_position()[0] + x_shift,
self.get_position()[1] + y_shift)
if click_index[0] < 0 or click_index[1] < 0:
continue
other_tile = self._board[click_index[0]][click_index[1]]
if isinstance(other_tile, EmptyTile) or \
isinstance(other_tile, NumberTile):
other_tile.reveal_tile()
except IndexError: # do nothing
pass
return True
def get_symbol(self) -> str:
"""
Return the string representation of an empty tile.
:return:
"""
return " "
|
StarcoderdataPython
|
12827372
|
"""Module describing the planemo ``delete_alias`` command."""
import click
from planemo import options
from planemo.cli import command_function
from planemo.galaxy import profiles
from planemo.io import error, info
try:
from tabulate import tabulate
except ImportError:
tabulate = None # type: ignore
@click.command('delete_alias')
@options.alias_option(required=True)
@options.profile_option(required=True)
@command_function
def cli(ctx, alias, profile, **kwds):
"""
List aliases for a path or a workflow or dataset ID. Aliases are associated with a particular planemo profile.
"""
info("Looking for profiles...")
exit_code = profiles.delete_alias(ctx, alias, profile)
if exit_code == 0:
info('Alias {} was successfully deleted from profile {}'.format(alias, profile))
else:
error('Alias {} does not exist, so was not deleted from profile {}'.format(alias, profile))
ctx.exit(exit_code)
return
|
StarcoderdataPython
|
6657627
|
<gh_stars>0
from .base_model import *
from .dist_model import *
from .network_basics import *
from .pretrained_networks import *
from .ps_util import *
|
StarcoderdataPython
|
4834265
|
<filename>qc2-copy-tool-firefox.py<gh_stars>1-10
#!/usr/bin/python3
import sqlite3
import urllib.request
import gzip
import re
LOCALSTORAGE_PATH = '/home/llama/.mozilla/firefox/m16gwf0a.default/webappsstore.sqlite'
def get_qc2_data(site):
conn = sqlite3.connect(LOCALSTORAGE_PATH)
c = conn.cursor()
c.execute('SELECT value FROM webappsstore2 WHERE key = \'qc2\' AND scope = ?;', (get_scope(site),))
qc2 = c.fetchone()[0]
conn.close()
return qc2
def copy_data_to(data, site):
try:
conn = sqlite3.connect(LOCALSTORAGE_PATH)
c = conn.cursor()
c.execute('INSERT OR REPLACE INTO webappsstore2 (scope, key, value) VALUES (?, \'qc2\', ?)', (get_scope(site), data))
conn.commit()
conn.close()
except sqlite3.OperationalError:
print('warning: site %s not found' % site)
def get_scope(site):
return '%s.:http:80' % site[::-1]
def get_sites():
return re.findall(r'http://(.*?)"', str(gzip.GzipFile(fileobj=urllib.request.urlopen('http://api.stackexchange.com/2.2/sites?pagesize=500&filter=!SmNnfYtt-PkOXruW4z')).read()))
data = get_qc2_data('codegolf.stackexchange.com')
for site in get_sites():
copy_data_to(data, site)
|
StarcoderdataPython
|
8034608
|
import flops_counter
import flops_counter.nn as nn
from vision import models
class S3FD(nn.Module):
def __init__(self):
super(S3FD, self).__init__()
# backbone
self.vgg16 = nn.ModuleList(make_layers(vgg_cfgs['D']))
# s3fd specific
self.conv_fc6 = nn.Conv2d(512, 1024, 3, 1, 1)
self.relu_fc6 = nn.ReLU()
self.conv_fc7 = nn.Conv2d(1024, 1024, 1, 1, 0)
self.relu_fc7 = nn.ReLU()
self.conv6_1 = nn.Conv2d(1024, 256, 1, 1, 0)
self.relu_conv6_1 = nn.ReLU()
self.conv6_2 = nn.Conv2d(256, 512, 3, 2, 1)
self.relu_conv6_2 = nn.ReLU()
self.conv7_1 = nn.Conv2d(512, 128, 1, 1, 0)
self.relu_conv7_1 = nn.ReLU()
self.conv7_2 = nn.Conv2d(128, 256, 3, 2, 1)
self.relu_conv7_2 = nn.ReLU()
self.l2norm_conv3_3 = nn.L2Norm2d(256, 10)
self.l2norm_conv4_3 = nn.L2Norm2d(512, 8)
self.l2norm_conv5_3 = nn.L2Norm2d(512, 5)
# Detection Head - mbox_loc
self.mbox_loc_conv3_3_norm = nn.Conv2d(256, 4, 3, 1, 1)
self.mbox_loc_conv4_3_norm = nn.Conv2d(512, 4, 3, 1, 1)
self.mbox_loc_conv5_3_norm = nn.Conv2d(512, 4, 3, 1, 1)
self.mbox_loc_conv_fc7 = nn.Conv2d(1024, 4, 3, 1, 1)
self.mbox_loc_conv6_2 = nn.Conv2d(512, 4, 3, 1, 1)
self.mbox_loc_conv7_2 = nn.Conv2d(256, 4, 3, 1, 1)
# Detection Head - mbox_conf
self.mbox_conf_conv3_3_norm = nn.Conv2d(256, 4, 3, 1, 1) # 4->2 through maxout at channels 0~2
self.mbox_conf_conv4_3_norm = nn.Conv2d(512, 2, 3, 1, 1)
self.mbox_conf_conv5_3_norm = nn.Conv2d(512, 2, 3, 1, 1)
self.mbox_conf_conv_fc7 = nn.Conv2d(1024, 2, 3, 1, 1)
self.mbox_conf_conv6_2 = nn.Conv2d(512, 2, 3, 1, 1)
self.mbox_conf_conv7_2 = nn.Conv2d(256, 2, 3, 1, 1)
# Detection Head - mbox_conf - softmax
self.softmax = nn.Softmax(dim=-1)
@property
def name(self):
return self._get_name() + '_VGG16'
def forward(self, x):
out = x
# get conv3_3
for k in range(16):
out = self.vgg16[k](out)
conv3_3 = out # channels = 256
conv3_3_norm = self.l2norm_conv3_3(conv3_3)
# get conv4_3
for k in range(16, 23):
out = self.vgg16[k](out)
conv4_3 = out # channels = 512
conv4_3_norm = self.l2norm_conv4_3(conv4_3)
# get conv5_3
for k in range(23, 30):
out = self.vgg16[k](out)
conv5_3 = out # channels = 512
conv5_3_norm = self.l2norm_conv5_3(conv5_3)
out = self.vgg16[30](out)
# get conv_fc7
out = self.conv_fc6(out)
out = self.relu_fc6(out)
out = self.conv_fc7(out)
out = self.relu_fc7(out)
conv_fc7 = out
# get conv6_2
out = self.conv6_1(out)
out = self.relu_conv6_1(out)
out = self.conv6_2(out)
out = self.relu_conv6_2(out)
conv6_2 = out
# get conv7_2
out = self.conv7_1(out)
out = self.relu_conv7_1(out)
out = self.conv7_2(out)
out = self.relu_conv7_2(out)
conv7_2 = out
# Detection Head - mbox_loc
mbox_loc_inputs = [
self.mbox_loc_conv3_3_norm(conv3_3_norm),
self.mbox_loc_conv4_3_norm(conv4_3_norm),
self.mbox_loc_conv5_3_norm(conv5_3_norm),
self.mbox_loc_conv_fc7(conv_fc7),
self.mbox_loc_conv6_2(conv6_2),
self.mbox_loc_conv7_2(conv7_2)
]
mbox_loc = flops_counter.cat([o.permute(0, 2, 3, 1).view(1, -1, 4) for o in mbox_loc_inputs], 1)
# Detection Head - mbox_conf
mbox_conf_conv3_3_norm = self.mbox_conf_conv3_3_norm(conv3_3_norm)
conf1 = [i for i in mbox_conf_conv3_3_norm.value]
conf1[1] = 1
conf1 = flops_counter.TensorSize(conf1)
conf234 = [i for i in mbox_conf_conv3_3_norm.value]
conf234[1] = 3
conf234 = flops_counter.TensorSize(conf234)
conf234 = conf234.max(1, keepdim=True)
mbox_conf_conv3_3_norm = flops_counter.cat([conf1, conf234], 1)
mbox_conf_inputs = [
mbox_conf_conv3_3_norm,
self.mbox_conf_conv4_3_norm(conv4_3_norm),
self.mbox_conf_conv5_3_norm(conv5_3_norm),
self.mbox_conf_conv_fc7(conv_fc7),
self.mbox_conf_conv6_2(conv6_2),
self.mbox_conf_conv7_2(conv7_2)
]
mbox_conf = flops_counter.cat([o.permute(0, 2, 3, 1).view(1, -1, 2) for o in mbox_conf_inputs], 1)
mbox_conf = self.softmax(mbox_conf)
return mbox_loc, mbox_conf
vgg_cfgs = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return layers
|
StarcoderdataPython
|
8134724
|
<reponame>aharonnovo/magma
#!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass
from datetime import datetime
from gql.gql.datetime_utils import DATETIME_FIELD
from gql.gql.graphql_client import GraphqlClient
from functools import partial
from numbers import Number
from typing import Any, Callable, List, Mapping, Optional
from dataclasses_json import DataClassJsonMixin
from .add_image_input import AddImageInput
@dataclass
class AddImageMutation(DataClassJsonMixin):
@dataclass
class AddImageMutationData(DataClassJsonMixin):
@dataclass
class File(DataClassJsonMixin):
id: str
fileName: str
addImage: File
data: AddImageMutationData
__QUERY__: str = """
mutation AddImageMutation($input: AddImageInput!) {
addImage(input: $input) {
id
fileName
}
}
"""
@classmethod
# fmt: off
def execute(cls, client: GraphqlClient, input: AddImageInput) -> AddImageMutationData:
# fmt: off
variables = {"input": input}
response_text = client.call(cls.__QUERY__, variables=variables)
return cls.from_json(response_text).data
|
StarcoderdataPython
|
243334
|
from math import radians, cos, sin, sqrt, radians, pow
import math
import colorsys
from colormath.color_objects import (LCHuvColor, LCHabColor, HSLColor,
HSVColor, IPTColor, sRGBColor,
XYZColor, AdobeRGBColor)
from colormath.color_conversions import convert_color
from colorspacious import cspace_convert, CAM02UCS, CAM02SCD, CAM02LCD
def _rgb1_to_rgb255(r, g, b):
return round(r*255), round(g*255), round(b*255)
def _hls_to_yiq(H, L, S):
y = L/100
i = S/100*cos(radians(H))
q = S/100*sin(radians(H))
return y, i, q
def yiq(start, amount, saturation, luminosity):
step = 360/amount
ans = ((start + i*step, luminosity, saturation) for i in range(amount))
ans = (_hls_to_yiq(H, L, S) for H, L, S in ans)
ans = (colorsys.yiq_to_rgb(y, i, q) for y, i, q in ans)
ans = (_rgb1_to_rgb255(r, g, b) for r, g, b in ans)
ans = (("#%02x%02x%02x" % (r, g, b), r, g, b) for r, g, b in ans)
return ans
def yiq_mixer(color1, color2):
rgb1, rgb2 = hex_to_rgb(color1), hex_to_rgb(color2)
x1, y1, z1 = colorsys.rgb_to_hls(rgb1)
x2, y2, z2 = colorsys.rgb_to_hls(rgb2)
ans = ((x1 + x2)/2, (y1 + y2)/2 , (z1 + z2)/2)
ans = _hls_to_yiq(*ans)
ans = colorsys.yiq_to_rgb(*ans)
return "#%02x%02x%02x" % ans
def _hls_to_ycbcr(H, L, S):
Y = L/100
Cb = S/100*cos(radians(H))
Cr = S/100*sin(radians(H))
return Y, Cb, Cr
def _rgb1_to_rgb255(r, g, b):
return round(r*255), round(g*255), round(b*255)
def _ycbcr_to_rgb(Y, Cb, Cr):
r = Y - 0.000170345404941155*Cb + 1.40214249139129*Cr
g = Y - 0.345602379281664*Cb - 0.71447536324549*Cr
b = Y + 1.77101547535512*Cb + 9.48244798877522e-5*Cr
r = 0 if r < 0 else 1 if r > 1 else r
g = 0 if g < 0 else 1 if g > 1 else g
b = 0 if b < 0 else 1 if b > 1 else b
return r, g, b
def cat02(start, amount, saturation, luminosity):
step = 360/amount
ans = ((start + i*step, luminosity, saturation) for i in range(amount))
ans = (_hls_to_ycbcr(H, L, S) for H, L, S in ans)
ans = (_ycbcr_to_rgb(Y, Cb, Cr) for Y, Cb, Cr in ans)
ans = (_rgb1_to_rgb255(r, g, b) for r, g, b in ans)
ans = (("#%02x%02x%02x" % (r, g, b), r, g, b) for r, g, b in ans)
return ans
def _ycbcr_to_rgb_2(Y, Cb, Cr):
r = -7.65413663080094*Cb + 3.07695666724817*Cr + 5.909003314299*Y
g = 11.3940532344494*Cb - 2.46426433976758*Cr - 9.14035819725992*Y
b = -2.73991660364845*Cb + 0.387307672519408*Cr + 4.23135488296092*Y
r = 0 if r < 0 else 1 if r > 1 else r
g = 0 if g < 0 else 1 if g > 1 else g
b = 0 if b < 0 else 1 if b > 1 else b
return r, g, b
def ycbcr(start, amount, saturation, luminosity):
step = 360/amount
ans = ((start + i*step, luminosity, saturation) for i in range(amount))
ans = (_hls_to_ycbcr(H, L, S) for H, L, S in ans)
ans = (_ycbcr_to_rgb_2(Y, Cb, Cr) for Y, Cb, Cr in ans)
ans = (_rgb1_to_rgb255(r, g, b) for r, g, b in ans)
ans = (("#%02x%02x%02x" % (r, g, b), r, g, b) for r, g, b in ans)
return ans
def yuv_mixer(color1, color2):
rgb1, rgb2 = hex_to_rgb(color1), hex_to_rgb(color2)
x1, y1, z1 = colorsys.rgb_to_hls(rgb1)
x2, y2, z2 = colorsys.rgb_to_hls(rgb2)
ans = ((x1 + x2)/2, (y1 + y2)/2 , (z1 + z2)/2)
ans = _hls_to_yuv(*ans)
ans = _yuv_to_rgb(*ans)
return "#%02x%02x%02x" % ans
def to_hex_rgb(gen):
ans = ((v.clamped_rgb_r, v.clamped_rgb_g,v.clamped_rgb_b) for v in gen)
ans = ((round(r*255), round(g*255), round(b*255)) for r, g, b in ans)
ans = (("#%02x%02x%02x" % (r, g, b), r, g, b) for r, g, b in ans)
return ans
def lchab(l, c, h):
return convert_color(LCHabColor(l, c, h), sRGBColor, target_illuminant="d65")
def lchuv(l, c, h):
return convert_color(LCHuvColor(l, c, h), sRGBColor, target_illuminant="d65")
def hsl(l, c, h):
return convert_color(HSLColor(h, c/100, l/100), sRGBColor, target_illuminant="d65")
def hsv(l, c, h):
return convert_color(HSVColor(h, c/100, l/100), sRGBColor, target_illuminant="d65")
def ipt(l, c, h):
i = l/100
p = c/100*cos(radians(h))
t = c/100*sin(radians(h))
return convert_color(IPTColor(i, p, t), sRGBColor, target_illuminant="d65")
def bt2020color(l, c, h):
return convert_color(BT2020Color(h, c/100, l/100), sRGBColor, target_illuminant="d65")
def create_system(space):
def hex_colors(start, amount, saturation, luminosity):
k = 360/amount
ans = (space(luminosity, saturation, start + i*k) for i in range(amount))
return to_hex_rgb(ans)
return hex_colors
def hex_to_rgb(value):
"""Return (red, green, blue) for the color given as #rrggbb."""
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
def hex_to_space(color, space):
color_tuple = hex_to_rgb(color)
values = list(i/255 for i in color_tuple)
return convert_color(sRGBColor(*values), space, target_illuminant="d65").get_value_tuple()
def create_mixer(space):
def mix(color1, color2):
x1, y1, z1 = hex_to_space(color1, space)
x2, y2, z2 = hex_to_space(color2, space)
color3 = space((x1 + x2)/2, (y1 + y2)/2 , (z1 + z2)/2)
ans = convert_color(color3, sRGBColor, target_illuminant="d65")
ans = (ans.clamped_rgb_r, ans.clamped_rgb_g, ans.clamped_rgb_b)
ans = tuple(round(i*255) for i in ans)
return "#%02x%02x%02x" % ans
return mix
def jch(start, amount, saturation, luminosity):
k = 360/amount
ans = ((luminosity, saturation, start + i*k) for i in range(amount))
ans = (cspace_convert(color, "JCh", "XYZ1") for color in ans)
ans = (XYZColor(*color.tolist()) for color in ans)
ans = (convert_color(color, AdobeRGBColor, target_illuminant="d65") for color in ans)
ans = ((color.clamped_rgb_r, color.clamped_rgb_g, color.clamped_rgb_b) for color in ans)
ans = ((round(r*255), round(g*255), round(b*255)) for r, g, b in ans)
ans = (("#%02x%02x%02x" % (r, g, b), r, g, b) for r, g, b in ans)
return ans
def jch_mixer(color1, color2):
rgb1, rgb2 = hex_to_rgb(color1), hex_to_rgb(color2)
x1, y1, z1 = cspace_convert(rgb1, "sRGB255", "JCh").tolist()
x2, y2, z2 = cspace_convert(rgb2, "sRGB255", "JCh").tolist()
ans = ((x1 + x2)/2, (y1 + y2)/2 , (z1 + z2)/2)
ans = cspace_convert(ans, "JCh", "sRGB1").tolist()
ans = sRGBColor(*ans)
ans = (ans.clamped_rgb_r, ans.clamped_rgb_g, ans.clamped_rgb_b)
ans = tuple(round(i*255) for i in ans)
return "#%02x%02x%02x" % ans
def ipt_jch(start, amount, saturation, luminosity):
# Generate colors
k = 360/amount
colors = [(luminosity, saturation, start + i*k) for i in range(amount)]
# From lch to IPT
ans = ((l/100, c/100*cos(radians(h)), c/100*sin(radians(h)))
for l, c, h in colors)
# From IPT to XYZ1
ans = (convert_color(IPTColor(i, p, t), XYZColor, target_illuminant="d65")
for i, p, t in ans)
ipt_colors = (color.get_value_tuple() for color in ans)
# From JCh to XYZ1
ans = (cspace_convert(color, "JCh", "XYZ1") for color in colors)
jch_colors = (color.tolist() for color in ans)
# Compute average
ans = (((x1 + x2)/2, (y1 + y2)/2 , (z1 + z2)/2)
for (x1, y1, z1), (x2, y2, z2)
in zip(ipt_colors, jch_colors))
# From XYZ1 to sRGB1
ans = (cspace_convert(color, "XYZ1", "sRGB1") for color in ans)
ans = ((color.tolist() for color in ans))
ans = (sRGBColor(*color) for color in ans)
return to_hex_rgb(ans)
def jmh(start, amount, saturation, luminosity):
k = 360/amount
ans = ((luminosity, saturation, start + i*k) for i in range(amount))
ans = (cspace_convert(color, "JMh", "sRGB1") for color in ans)
ans = (color.tolist() for color in ans)
ans = (sRGBColor(*color) for color in ans)
return to_hex_rgb(ans)
def jmh_mixer(color1, color2):
rgb1, rgb2 = hex_to_rgb(color1), hex_to_rgb(color2)
x1, y1, z1 = cspace_convert(rgb1, "sRGB255", "JMh").tolist()
x2, y2, z2 = cspace_convert(rgb2, "sRGB255", "JMh").tolist()
ans = ((x1 + x2)/2, (y1 + y2)/2 , (z1 + z2)/2)
ans = cspace_convert(ans, "JMh", "sRGB1").tolist()
ans = sRGBColor(*ans)
ans = (ans.clamped_rgb_r, ans.clamped_rgb_g, ans.clamped_rgb_b)
ans = tuple(round(i*255) for i in ans)
return "#%02x%02x%02x" % ans
def jmh_mixer_2(*colors):
n = len(colors)
ans = (hex_to_rgb(color) for color in colors)
ans = (cspace_convert(color, "sRGB255", "JMh") for color in ans)
ans = (color.tolist() for color in ans)
ans = zip(*ans)
ans = (sum(items)/n for items in ans)
return list(ans)
def create_cie_system(cie_string):
def hex_colors(start, amount, saturation, luminosity):
k = 360/amount
ans = ((luminosity, saturation, start + i*k) for i in range(amount))
ans = (cspace_convert(color, cie_string, "sRGB1") for color in ans)
ans = (color.tolist() for color in ans)
ans = (sRGBColor(*color) for color in ans)
return to_hex_rgb(ans)
return hex_colors
def create_cie_mixer(cie_string):
def mix(color1, color2):
rgb1, rgb2 = hex_to_rgb(color1), hex_to_rgb(color2)
x1, y1, z1 = cspace_convert(rgb1, "sRGB255", cie_string).tolist()
x2, y2, z2 = cspace_convert(rgb2, "sRGB255", cie_string).tolist()
ans = ((x1 + x2)/2, (y1 + y2)/2 , (z1 + z2)/2)
ans = cspace_convert(ans, cie_string, "sRGB1").tolist()
ans = sRGBColor(*ans)
ans = (ans.clamped_rgb_r, ans.clamped_rgb_g, ans.clamped_rgb_b)
ans = tuple(round(i*255) for i in ans)
return "#%02x%02x%02x" % ans
return mix
def create_cam02_system(cie_string):
def hex_colors(start, amount, saturation, luminosity):
k = 360/amount
ans = ((luminosity, saturation, start + i*k) for i in range(amount))
ans = ((l, c*cos(radians(h)), c*sin(radians(h))) for l, c, h in ans)
ans = (cspace_convert(color, cie_string, "sRGB1") for color in ans)
ans = (color.tolist() for color in ans)
ans = (sRGBColor(*color) for color in ans)
return to_hex_rgb(ans)
return hex_colors
def create_cam02_mixer(cie_string):
def mix(color1, color2):
rgb1, rgb2 = hex_to_rgb(color1), hex_to_rgb(color2)
x1, y1, z1 = cspace_convert(rgb1, "sRGB255", cie_string).tolist()
x2, y2, z2 = cspace_convert(rgb2, "sRGB255", cie_string).tolist()
ans = ((x1 + x2)/2, (y1 + y2)/2 , (z1 + z2)/2)
ans = cspace_convert(ans, cie_string, "sRGB1").tolist()
ans = sRGBColor(*ans)
ans = (ans.clamped_rgb_r, ans.clamped_rgb_g, ans.clamped_rgb_b)
ans = tuple(round(i*255) for i in ans)
return "#%02x%02x%02x" % ans
return mix
# OSR18a hue angles
R = 0
Y = 66
G = 136
C = 191
B = 246
M = 303
# Delta hue
d_RY = Y - R
d_YG = G - Y
d_GC = C - G
d_CB = B - C
d_BM = M - B
d_MR = 360 - M
# Luminosity
R_L = 0.25212576226954675
Y_L = 0.7200627962473862
G_L = 0.6479370339778395
C_L = 0.6486607905138208
B_L = 0.09072375653598125
M_L = 0.342849518805528
# Slope
s_RY = (Y_L - R_L)/d_RY
s_YG = (G_L - Y_L)/d_YG
s_GC = (C_L - G_L)/d_GC
s_CB = (B_L - C_L)/d_CB
s_BM = (M_L - B_L)/d_BM
s_MR = (R_L - M_L)/d_MR
def _hsl1_to_rgb(h, s, l):
s = s/100
l = l/100
d = l if l <= 0.5 else (1 - l)
while h > 360:
h = h - 360
if R <= h <= Y:
j = l - (-R*s_RY + R_L + h*s_RY - 0.5)*d*s
c = (1 - abs(2*j - 1))*s
m = j - c/2
x = c*(1 - abs((h/d_RY)%2 - 1))
r, g, b = (c + m, x + m, m)
elif Y < h <= G:
j = l - (-Y*s_YG + Y_L + h*s_YG - 0.5)*d*s
c = (1 - abs(2*j - 1))*s
m = j - c/2
x = c*(1 - abs(((G - h)/d_YG)%2 - 1))
r, g, b = (x + m, c + m, m)
elif G < h <= C:
j = l - (-G*s_GC + G_L + h*s_GC - 0.5)*d*s
c = (1 - abs(2*j - 1))*s
m = j - c/2
x = c*(1 - abs(((G - h)/d_GC)%2 - 1))
r, g, b = (m, c + m, x + m)
elif C < h <= B:
j = l - (-C*s_CB + C_L + h*s_CB - 0.5)*d*s
c = (1 - abs(2*j - 1))*s
m = j - c/2
x = c*(1 - abs(((B - h)/d_CB)%2 - 1))
r, g, b = (m, x + m, c + m)
elif B < h <= M:
j = l - (-B*s_BM + B_L + h*s_BM - 0.5)*d*s
c = (1 - abs(2*j - 1))*s
m = j - c/2
x = c*(1 - abs(((B - h)/d_BM)%2 - 1))
r, g, b = (x + m, m, c + m)
elif M < h <= 360:
j = l - (-M*s_MR + M_L + h*s_MR - 0.5)*d*s
c = (1 - abs(2*j - 1))*s
m = j - c/2
x = c*(1 - abs(((360 - h)/d_MR)%2 - 1))
r, g, b = (c + m, m, x + m)
else:
r, g, b = (m, m, m)
return r, g, b
def hsl2(start, amount, saturation, luminosity):
step = 360/amount
ans = ((start + i*step, saturation, luminosity) for i in range(amount))
ans = (_hsl1_to_rgb(h, s, l) for h, s, l in ans)
ans = (_rgb1_to_rgb255(r, g, b) for r, g, b in ans)
ans = (("#%02x%02x%02x" % (r, g, b), r, g, b) for r, g, b in ans)
return ans
space = {
"HSL": create_system(hsl),
"HSL2": hsl2,
"HSV": create_system(hsv),
"YIQ": yiq,
"cat02": cat02,
"YCbCr": cat02,
"LCHab": create_system(lchab),
"LCHuv": create_system(lchuv),
"IPT": create_system(ipt),
"JCh": jch,
"JMh": jmh,
"CIELCHab": create_cie_system("CIELCh"),
"CAM02-UCS": create_cam02_system("CAM02-UCS"),
"CAM02-LCD": create_cam02_system("CAM02-LCD"),
"CAM02-SCD": create_cam02_system("CAM02-SCD"),
"IPT+JCh": ipt_jch,
}
mixer = {
"HSL": create_mixer(HSLColor),
"HSV": create_mixer(HSLColor),
"YIQ": yiq_mixer,
"LCHab": create_mixer(LCHabColor),
"LCHuv": create_mixer(LCHuvColor),
"IPT": create_mixer(IPTColor),
# "BT2020": create_mixer(BT2020Color),
"JCh": jch_mixer,
"JMh": jmh_mixer,
"CIELCHab": create_cie_mixer("CIELCh"),
"CAM02-UCS": create_cam02_mixer("CAM02-UCS"),
"CAM02-LCD": create_cam02_mixer("CAM02-LCD"),
"CAM02-SCD": create_cam02_mixer("CAM02-SCD"),
}
|
StarcoderdataPython
|
6678225
|
#!/usr/bin/env python3
from aws_cdk import core
from s3trigger.s3trigger_stack import S3TriggerStack
app = core.App()
S3TriggerStack(app, "s3trigger")
app.synth()
|
StarcoderdataPython
|
3393052
|
"""
Core control module for nimbus buddy
"""
import argparse
import logging
import unittest
import nimbusdisplay
import terraformhandler
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
# this will inherit aws cloud buddy OR we will use a function pointer
class NimbusBuddy:
def __init__(self):
pass
def main():
parser = argparse.ArgumentParser(description="Cloud Visualization and Backup Tool")
parser.add_argument("command", help="valid commands: display, terraform")
parser.add_argument("region", help="Current working AWS Region e.g. us-west-1")
parser.add_argument("--targetregion", help="Destination AWS Region for migration")
parser.add_argument(
"--ami", help="Override AMI codes for region to region migration in terraform"
)
args = parser.parse_args()
nd = nimbusdisplay.Display(args.region)
if args.command == "display":
nd.display()
elif args.command == "terraform":
ami = None
if args.ami:
ami = args.ami
if args.region and args.targetregion:
tf = terraformhandler.TerraformHandler(args.region, args.targetregion, ami)
tf.terraformDump()
elif args.region:
print("no target region set, defaulting to region = targetregion")
tf = terraformhandler.TerraformHandler(args.region, args.region, ami)
tf.terraformDump()
else:
print("need args")
elif args.command == "test":
nd.VPCandSubnets()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1776180
|
'''
A newly designed keypad was tested, where a tester pressed a sequence of n keys, one at a time.
You are given a string keysPressed of length n, where keysPressed[i] was
the ith key pressed in the testing sequence, and a sorted list releaseTimes, where
releaseTimes[i] was the time the ith key was released. Both arrays are 0-indexed. The 0th key was
pressed at the time 0, and every subsequent key was pressed at the exact time the previous key was released.
The tester wants to know the key of the keypress that had the longest duration. The ith
keypress had a duration of releaseTimes[i] - releaseTimes[i - 1], and the 0th keypress had a duration of releaseTimes[0].
'''
class Solution:
def slowestKey(self, releaseTimes: List[int], keysPressed: str) -> str:
times = releaseTimes
r = keysPressed
res = [None] * len(times)
res[0] = (times[0], r[0])
for i in range(1,len(times)):
res[i] = ( times[i] - times[i-1] , r[i])
maxi = max(item[0] for item in res)
#print(maxi)
temp = list()
for item in res:
if item[0] == maxi:
temp.append(item[1])
temp.sort()
print( temp[-1])
return temp[-1]
|
StarcoderdataPython
|
3288215
|
#!/usr/bin/env python3
# Copyright 2020, <NAME>
# Licensed under the terms of the MIT license. See LICENSE file in project root for terms.
# Ensure the device is at the "Guess for dice roll (1-6)?" prompt
import binascii
import serial
import sys
import struct
if len(sys.argv) < 2:
print("Usage: {} serport".format(sys.argv[0]))
sys.exit(1)
in_serport = sys.argv[1]
ser = serial.Serial(in_serport, 115200, timeout=1)
DUMP_ALL_RAM_PAYLOAD = binascii.unhexlify("48f20002c2f2000240f20003c2f202030320117800df02f101029a42f9d1704700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800020") + b'\r\n'
for b in DUMP_ALL_RAM_PAYLOAD:
ser.write(bytes([b]))
ser.flush()
x = ser.read(1)
assert len(x) == 1
reply = ser.read(0x18000)
# print(reply)
prng_state = reply[0x100:0x100 + 132]
print(prng_state)
print(binascii.hexlify(prng_state))
prng_index = struct.unpack("<I", prng_state[-4:])[0]
print(prng_index)
for i in range(prng_index, 64):
cachebyte = prng_state[64 + i]
if cachebyte < 252:
print("Next roll is {}".format(cachebyte % 6 + 1))
|
StarcoderdataPython
|
85406
|
# -*-coding:utf-8 -*-
import numpy as np
from bs4 import BeautifulSoup
import random
def scrapePage(retX, retY, inFile, yr, numPce, origPrc):
"""
函数说明:从页面读取数据,生成retX和retY列表
Parameters:
retX - 数据X
retY - 数据Y
inFile - HTML文件
yr - 年份
numPce - 乐高部件数目
origPrc - 原价
Returns:
无
Website:
http://www.cuijiahua.com/
Modify:
2017-12-03
"""
# 打开并读取HTML文件
with open(inFile, encoding='utf-8') as f:
html = f.read()
soup = BeautifulSoup(html)
i = 1
# 根据HTML页面结构进行解析
currentRow = soup.find_all('table', r = "%d" % i)
while(len(currentRow) != 0):
currentRow = soup.find_all('table', r = "%d" % i)
title = currentRow[0].find_all('a')[1].text
lwrTitle = title.lower()
# 查找是否有全新标签
if (lwrTitle.find('new') > -1) or (lwrTitle.find('nisb') > -1):
newFlag = 1.0
else:
newFlag = 0.0
# 查找是否已经标志出售,我们只收集已出售的数据
soldUnicde = currentRow[0].find_all('td')[3].find_all('span')
if len(soldUnicde) == 0:
print("商品 #%d 没有出售" % i)
else:
# 解析页面获取当前价格
soldPrice = currentRow[0].find_all('td')[4]
priceStr = soldPrice.text
priceStr = priceStr.replace('$','')
priceStr = priceStr.replace(',','')
if len(soldPrice) > 1:
priceStr = priceStr.replace('Free shipping', '')
sellingPrice = float(priceStr)
# 去掉不完整的套装价格
if sellingPrice > origPrc * 0.5:
print("%d\t%d\t%d\t%f\t%f" % (yr, numPce, newFlag, origPrc, sellingPrice))
retX.append([yr, numPce, newFlag, origPrc])
retY.append(sellingPrice)
i += 1
currentRow = soup.find_all('table', r = "%d" % i)
def ridgeRegres(xMat, yMat, lam = 0.2):
"""
函数说明:岭回归
Parameters:
xMat - x数据集
yMat - y数据集
lam - 缩减系数
Returns:
ws - 回归系数
Website:
http://www.cuijiahua.com/
Modify:
2017-11-20
"""
xTx = xMat.T * xMat
denom = xTx + np.eye(np.shape(xMat)[1]) * lam
if np.linalg.det(denom) == 0.0:
print("矩阵为奇异矩阵,不能求逆")
return
ws = denom.I * (xMat.T * yMat)
return ws
def setDataCollect(retX, retY):
"""
函数说明:依次读取六种乐高套装的数据,并生成数据矩阵
Parameters:
无
Returns:
无
Website:
http://www.cuijiahua.com/
Modify:
2017-12-03
"""
scrapePage(retX, retY, './lego/lego8288.html', 2006, 800, 49.99) #2006年的乐高8288,部件数目800,原价49.99
scrapePage(retX, retY, './lego/lego10030.html', 2002, 3096, 269.99) #2002年的乐高10030,部件数目3096,原价269.99
scrapePage(retX, retY, './lego/lego10179.html', 2007, 5195, 499.99) #2007年的乐高10179,部件数目5195,原价499.99
scrapePage(retX, retY, './lego/lego10181.html', 2007, 3428, 199.99) #2007年的乐高10181,部件数目3428,原价199.99
scrapePage(retX, retY, './lego/lego10189.html', 2008, 5922, 299.99) #2008年的乐高10189,部件数目5922,原价299.99
scrapePage(retX, retY, './lego/lego10196.html', 2009, 3263, 249.99) #2009年的乐高10196,部件数目3263,原价249.99
def regularize(xMat, yMat):
"""
函数说明:数据标准化
Parameters:
xMat - x数据集
yMat - y数据集
Returns:
inxMat - 标准化后的x数据集
inyMat - 标准化后的y数据集
Website:
http://www.cuijiahua.com/
Modify:
2017-12-03
"""
inxMat = xMat.copy() #数据拷贝
inyMat = yMat.copy()
yMean = np.mean(yMat, 0) #行与行操作,求均值
inyMat = yMat - yMean #数据减去均值
inMeans = np.mean(inxMat, 0) #行与行操作,求均值
inVar = np.var(inxMat, 0) #行与行操作,求方差
# print(inxMat)
print(inMeans)
# print(inVar)
inxMat = (inxMat - inMeans) / inVar #数据减去均值除以方差实现标准化
return inxMat, inyMat
def rssError(yArr,yHatArr):
"""
函数说明:计算平方误差
Parameters:
yArr - 预测值
yHatArr - 真实值
Returns:
Website:
http://www.cuijiahua.com/
Modify:
2017-12-03
"""
return ((yArr-yHatArr)**2).sum()
def standRegres(xArr,yArr):
"""
函数说明:计算回归系数w
Parameters:
xArr - x数据集
yArr - y数据集
Returns:
ws - 回归系数
Website:
http://www.cuijiahua.com/
Modify:
2017-11-12
"""
xMat = np.mat(xArr); yMat = np.mat(yArr).T
xTx = xMat.T * xMat #根据文中推导的公示计算回归系数
if np.linalg.det(xTx) == 0.0:
print("矩阵为奇异矩阵,不能求逆")
return
ws = xTx.I * (xMat.T*yMat)
return ws
def crossValidation(xArr, yArr, numVal = 10):
"""
函数说明:交叉验证岭回归
Parameters:
xArr - x数据集
yArr - y数据集
numVal - 交叉验证次数
Returns:
wMat - 回归系数矩阵
Website:
http://www.cuijiahua.com/
Modify:
2017-11-20
"""
m = len(yArr) #统计样本个数
indexList = list(range(m)) #生成索引值列表
errorMat = np.zeros((numVal,30)) #create error mat 30columns numVal rows
for i in range(numVal): #交叉验证numVal次
trainX = []; trainY = [] #训练集
testX = []; testY = [] #测试集
random.shuffle(indexList) #打乱次序
for j in range(m): #划分数据集:90%训练集,10%测试集
if j < m * 0.9:
trainX.append(xArr[indexList[j]])
trainY.append(yArr[indexList[j]])
else:
testX.append(xArr[indexList[j]])
testY.append(yArr[indexList[j]])
wMat = ridgeTest(trainX, trainY) #获得30个不同lambda下的岭回归系数
for k in range(30): #遍历所有的岭回归系数
matTestX = np.mat(testX); matTrainX = np.mat(trainX) #测试集
meanTrain = np.mean(matTrainX,0) #测试集均值
varTrain = np.var(matTrainX,0) #测试集方差
matTestX = (matTestX - meanTrain) / varTrain #测试集标准化
yEst = matTestX * np.mat(wMat[k,:]).T + np.mean(trainY) #根据ws预测y值
errorMat[i, k] = rssError(yEst.T.A, np.array(testY)) #统计误差
meanErrors = np.mean(errorMat,0) #计算每次交叉验证的平均误差
minMean = float(min(meanErrors)) #找到最小误差
bestWeights = wMat[np.nonzero(meanErrors == minMean)] #找到最佳回归系数
xMat = np.mat(xArr); yMat = np.mat(yArr).T
meanX = np.mean(xMat,0); varX = np.var(xMat,0)
unReg = bestWeights / varX #数据经过标准化,因此需要还原
print('%f%+f*年份%+f*部件数量%+f*是否为全新%+f*原价' % ((-1 * np.sum(np.multiply(meanX,unReg)) + np.mean(yMat)), unReg[0,0], unReg[0,1], unReg[0,2], unReg[0,3]))
def ridgeTest(xArr, yArr):
"""
函数说明:岭回归测试
Parameters:
xMat - x数据集
yMat - y数据集
Returns:
wMat - 回归系数矩阵
Website:
http://www.cuijiahua.com/
Modify:
2017-11-20
"""
xMat = np.mat(xArr); yMat = np.mat(yArr).T
#数据标准化
yMean = np.mean(yMat, axis = 0) #行与行操作,求均值
yMat = yMat - yMean #数据减去均值
xMeans = np.mean(xMat, axis = 0) #行与行操作,求均值
xVar = np.var(xMat, axis = 0) #行与行操作,求方差
xMat = (xMat - xMeans) / xVar #数据减去均值除以方差实现标准化
numTestPts = 30 #30个不同的lambda测试
wMat = np.zeros((numTestPts, np.shape(xMat)[1])) #初始回归系数矩阵
for i in range(numTestPts): #改变lambda计算回归系数
ws = ridgeRegres(xMat, yMat, np.exp(i - 10)) #lambda以e的指数变化,最初是一个非常小的数,
wMat[i, :] = ws.T #计算回归系数矩阵
return wMat
def useStandRegres():
"""
函数说明:使用简单的线性回归
Parameters:
无
Returns:
无
Website:
http://www.cuijiahua.com/
Modify:
2017-11-12
"""
lgX = []
lgY = []
setDataCollect(lgX, lgY)
data_num, features_num = np.shape(lgX)
lgX1 = np.mat(np.ones((data_num, features_num + 1)))
lgX1[:, 1:5] = np.mat(lgX)
ws = standRegres(lgX1, lgY)
print('%f%+f*年份%+f*部件数量%+f*是否为全新%+f*原价' % (ws[0],ws[1],ws[2],ws[3],ws[4]))
def usesklearn():
"""
函数说明:使用sklearn
Parameters:
无
Returns:
无
Website:
http://www.cuijiahua.com/
Modify:
2017-12-08
"""
from sklearn import linear_model
reg = linear_model.Ridge(alpha = .5)
lgX = []
lgY = []
setDataCollect(lgX, lgY)
reg.fit(lgX, lgY)
print('%f%+f*年份%+f*部件数量%+f*是否为全新%+f*原价' % (reg.intercept_, reg.coef_[0], reg.coef_[1], reg.coef_[2], reg.coef_[3]))
if __name__ == '__main__':
usesklearn()
|
StarcoderdataPython
|
9754809
|
from nms.non_maximum_suppression import non_maximum_suppression
|
StarcoderdataPython
|
3462302
|
<gh_stars>100-1000
"""
Test that weather options can be configured both from scenario and
programmatically
"""
import pytest
from _pytest.fixtures import FixtureRequest
from holodeck import HolodeckException
from holodeck.environments import HolodeckEnvironment
from tests.utils.captures import (
compare_rgb_sensor_data_with_baseline,
compare_rgb_sensor_data,
)
from tests.worlds.mazeworld.conftest import (
env_with_config,
weather_type_test_data,
weather_config, fog_density_test_data, day_cycle_test_data, time_test_data)
def mean_square_error_before_after_reset(env: HolodeckEnvironment):
"""
Args:
env: Environment to reset and test on
Returns: mean squared error between RGB sensor data capture before and
after and environment reset
"""
env.tick(10)
before_data = env.tick()["TestCamera"]
env.reset()
env.tick(10)
after_data = env.tick()["TestCamera"]
return compare_rgb_sensor_data(before_data, after_data)
@pytest.mark.parametrize("weather_type, max_err", weather_type_test_data)
def test_weather_type_scenario(
weather_type: str, max_err: float, request: FixtureRequest
) -> None:
"""Validate that weather type is loaded correctly from scenario by
comparing RGB sensor data with saved baseline
data
Args:
weather_type: Type of weather in ["sunny", "cloudy", "rain"]
max_err: Maximum mean squared error between sensor data and baseline
data allowed for test to pass
request: pytest fixture request information
"""
config = weather_config.copy()
config["weather"] = {"type": weather_type}
with env_with_config(config) as env:
env.tick(5)
err = compare_rgb_sensor_data_with_baseline(
env.tick()["TestCamera"],
request.fspath.dirname,
"weather_type_{}".format(weather_type),
)
assert err < max_err
@pytest.mark.parametrize("fog_density, max_err", fog_density_test_data)
def test_weather_fog_density_scenario(
fog_density: float, max_err: float, request: FixtureRequest
) -> None:
"""Validate that fog density is loaded correctly from scenario by
comparing RGB sensor data with saved baseline data
image
Args:
fog_density: Density of fog in interval [0, 1]
max_err: Maximum mean squared error between sensor data and baseline
data allowed for test to pass
request: pytest fixture request information
"""
config = weather_config.copy()
config["weather"] = {"fog_density": fog_density}
with env_with_config(config) as env:
env.tick(5)
err = compare_rgb_sensor_data_with_baseline(
env.tick()["TestCamera"],
request.fspath.dirname,
"weather_fog_density_{}".format(fog_density),
)
assert err < max_err
@pytest.mark.parametrize(
"cycle_length, ticks, max_err_before, max_err_after", day_cycle_test_data
)
def test_weather_day_cycle_scenario(
cycle_length: float,
ticks: int,
max_err_before: float,
max_err_after: float,
request: FixtureRequest,
) -> None:
"""Verify that day cycle can be set with scenario by comparing RGB sensor
data with saved baseline data
Args:
cycle_length: The hour in 24-hour format: [0, 23].
ticks: Number of ticks between captures
max_err_before: Maximum mean squared error between sensor data and
baseline for `before` image
max_err_before: Maximum mean squared error between sensor data and
baseline for `after` image
data allowed for test to pass
request: pytest fixture request information
"""
config = weather_config.copy()
config["weather"] = {"day_cycle_length": cycle_length}
with env_with_config(config) as env:
env.tick(5)
err_before = compare_rgb_sensor_data_with_baseline(
env.tick()["TestCamera"],
request.fspath.dirname,
"weather_time_before_{}".format(cycle_length),
)
env.tick(ticks)
err_after = compare_rgb_sensor_data_with_baseline(
env.tick()["TestCamera"],
request.fspath.dirname,
"weather_time_after_{}".format(cycle_length),
)
assert err_before < max_err_before
assert err_after < max_err_after
@pytest.mark.parametrize("hour, max_err", time_test_data)
def test_weather_time_scenario(
hour: float, max_err: float, request: FixtureRequest
) -> None:
"""Validate that time can be set with scenario by comparing RGB sensor
data with saved baseline data
Args:
hour: The hour in 24-hour format: [0, 23].
max_err: Maximum mean squared error between sensor
data and baseline
request: pytest fixture request information
"""
config = weather_config.copy()
config["weather"] = {"hour": hour}
with env_with_config(config) as env:
env.tick(5)
err = compare_rgb_sensor_data_with_baseline(
env.tick()["TestCamera"],
request.fspath.dirname,
"weather_time_{}".format(hour),
)
assert err < max_err
def test_fail_incorrect_weather_type_scenario():
"""
Validate that an exception is thrown when an invalid weather type is
specified in scenario
"""
# Hail is not a valid weather type--this is on purpose
config = weather_config.copy()
config["weather"] = {"type": "hail"}
with pytest.raises(HolodeckException):
env_with_config(config)
@pytest.mark.parametrize("weather_type, max_err", weather_type_test_data)
def test_weather_type_persists_after_reset_scenario(
weather_type: str, max_err: float,
):
"""
Validate that weather type set in scenario persists after an environment
reset
Args:
weather_type: Type of weather in ["sunny", "cloudy", "rain"]
max_err: Maximum mean squared error between sensor data and baseline
"""
config = weather_config.copy()
config["weather"] = {"type": weather_type}
err = mean_square_error_before_after_reset(env_with_config(config))
assert err < max_err
@pytest.mark.parametrize("fog_density, max_err", fog_density_test_data)
def test_weather_fog_density_persists_after_reset_scenario(
fog_density: float, max_err: float,
):
"""
Validate that fog density set in scenario persists after an environment
reset
Args:
fog_density: Density of fog in interval [0, 1]
max_err: Maximum mean squared error between sensor data and baseline
"""
config = weather_config.copy()
config["weather"] = {"fog_density": fog_density}
err = mean_square_error_before_after_reset(env_with_config(config))
assert err < max_err
@pytest.mark.parametrize("hour, max_err", time_test_data)
def test_weather_time_persists_after_reset_scenario(
hour: int, max_err: float,
):
"""
Validate that time set in scenario persists after an environment
reset
Args:
hour: The hour in 24-hour format: [0, 23].
max_err: Maximum mean squared error between sensor data and baseline
"""
config = weather_config.copy()
config["weather"] = {"hour": hour}
err = mean_square_error_before_after_reset(env_with_config(config))
assert err < max_err
|
StarcoderdataPython
|
11353834
|
import django.contrib.auth.views
from otp_agents.forms import OTPAuthenticationForm
from lemoncurry import breadcrumbs
breadcrumbs.add(route='lemonauth:login', label='log in', parent='home:index')
login = django.contrib.auth.views.LoginView.as_view(
authentication_form=OTPAuthenticationForm,
extra_context={'title': 'log in'},
template_name='lemonauth/login.html',
redirect_authenticated_user=True,
)
|
StarcoderdataPython
|
1992850
|
<filename>applications/cdr/test2.py<gh_stars>0
from __future__ import print_function
import sys, os, shutil
from argparse import ArgumentParser
sys.path.append("@simfempythonpath@")
sys.path.append("@libpythonpath@")
import mesh.geometry
import tools.plot
import numpy as np
import simfempy
import simfemcdr
import time
import math
#---------------------------------------------------------#
def main():
if len(sys.argv)>1:
if sys.argv[1] == "1D":
meshtypes = ["LineMesh"]
elif sys.argv[1] == "2D":
meshtypes = ["TriangleMesh"]
elif sys.argv[1] == "3D":
meshtypes = ["TetrahedralMesh"]
else:
raise KeyError("unknwon argument", sys.argv[1])
meshtypes = ["LineMesh", "TriangleMesh", "TetrahedralMesh"]
meshtypes = ["TriangleMesh"]
testname = "poisson"
# testname = "cdrexp"
# testname = "rdcosh"
fem = "P1"
# fem = "CR1"
niter = 3
methods=["traditionalintegration", "nitscheintegration", "newnitscheintegration","traditional", "nitsche", "newnitsche"]
methods=["traditionalintegration", "nitscheintegration", "newnitscheintegration"]
# methods=["traditionalintegration"]
methods=["traditional", "nitsche", "newnitsche"]
# methods=["traditional", "traditionalintegration"]
# methods=["nitsche", "nitscheintegration"]
# methods=["newnitsche", "newnitscheintegration"]
for meshtype in meshtypes:
test(meshtype, testname, methods, niter, fem)
#---------------------------------------------------------#
def test(meshtype, testname, methods, niter, fem):
if meshtype == "LineMesh":
from mesh.geometries.unitline import GeometryClass
elif meshtype == "TriangleMesh":
from mesh.geometries.unitsquare import GeometryClass
elif meshtype == "TetrahedralMesh":
from mesh.geometries.unitcube import GeometryClass
if testname == "cdrexp":
diff = 0.001
beta = "east"
alpha = 0.0
application="cdexplayer"
elif testname == "rdcosh":
diff = 0.0001
beta = "zero"
alpha = 1.0
application="rdcosh"
elif testname == "poisson":
diff = 1.0
beta = "zero"
alpha = 0.0
application="quadratic"
# application="cosinus"
# application="linear"
errL1 = {}
errL2 = {}
errH1 = {}
times = {}
for method in methods:
times[method] = 0.0
errL1[method] = np.zeros(niter)
errL2[method] = np.zeros(niter)
errH1[method] = np.zeros(niter)
geom=GeometryClass(hmean=0.5)
name = geom.runGmsh(outdir="Data", number=0)
hmeans = np.zeros(niter)
for ih in range(niter):
# print("---- hmean=", hmean)
# geom2=GeometryClass(hmean=hmean)
# geom2.runGmsh(outdir="Data")
name = geom.runGmshRefine(number=ih, outdir="Data")
partion_id = 1
construct_bdrymeshes = False
mesh = simfempy.create(meshtype, partion_id, construct_bdrymeshes)
meshfilename = "Data/"+name+'.msh'
print("meshfilename",meshfilename)
mesh.readGmsh(meshfilename)
mesh.addGeometryObject('MeasureOfCell')
mesh.addGeometryObject('Normals')
# mesh.save("Data/"+geom.name+'.h5')
hmeans[ih] = math.pow(mesh.getNCells(), -1/mesh.getDimension())
for im,method in enumerate(methods):
print("####### %s #####" %(method))
start = time.time()
solver = simfemcdr.Solver()
solver.setParameter("method",method)
solver.setParameter("application",application)
solver.setParameter("beta",beta)
solver.setParameter("fem",fem)
solver.setParameter("diff", diff);
solver.setParameter("alpha", alpha);
solver.setParameter("deltasupg", 0.5);
solver.setMesh(mesh)
# solver.loadMesh(meshtype, "Data/"+geom.name+'.h5')
solver.init()
info = solver.getInfo()
# print(info)
d = solver.run()
errL1[method][ih] = d["L1"]
errL2[method][ih] = d["L2"]
errH1[method][ih] = d["H1"]
# print("d=",d)
solver.writeXdmf()
times[method] = time.time()-start
for method in methods:
print("errL2 %-20s" %(method), errL2[method])
print()
for method in methods:
print("errL1 %-20s" %(method), errL1[method])
print()
for method in methods:
print("errH1 %-20s" %(method), errH1[method])
# for method in methods:
# errH1[method] = np.sqrt( diff*errH1[method]**2 + errL2[method]**2)
for method in methods: print("%-20s %10g" %(method,times[method]))
datatoplot = {}
datatoplot['L1'] = errL1
datatoplot['L2'] = errL2
datatoplot['H1'] = errH1
methodsnames={}
methodsnames["traditionalintegration"] = "trad"
methodsnames["nitscheintegration"] = "nit"
methodsnames["newnitscheintegration"] = "new"
methodsnames["traditional"] = "trad"
methodsnames["nitsche"] = "nit"
methodsnames["newnitsche"] = "new"
print("hmeans",hmeans)
simfemplot = tools.plot.SimFemPlot(methods, params=hmeans, param='hmean')
simfemplot.ploterrors(datatoplot)
#---------------------------------------------------------#
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.