seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
40261683440
|
import sys
sys.path.append("..")
import os
import pandas
import re
import math
import argparse
from models.train_model import get_training_model_new
from train.ds_iterator import DataIterator
from train.ds_client_generator import DataGeneratorClient
from keras.optimizers import Adam
from keras.callbacks import LearningRateScheduler, ModelCheckpoint, CSVLogger, TensorBoard
from keras.layers.convolutional import Conv2D
from keras.applications.vgg19 import VGG19
def get_last_epoch():
data = pandas.read_csv(TRAINING_LOG)
return max(data['epoch'].values)
# euclidean loss as implemented in caffe https://github.com/BVLC/caffe/blob/master/src/caffe/layers/euclidean_loss_layer.cpp
def eucl_loss(x, y):
return K.sum(K.square(x - y)) / batch_size / 2
def step_decay(epoch):
initial_lrate = base_lr
steps = epoch * iterations_per_epoch
lrate = initial_lrate * math.pow(gamma, math.floor(steps/stepsize))
return lrate
if __name__ == '__main__':
batch_size = 60
base_lr = 4e-5 # 2e-5
momentum = 0.9
weight_decay = 5e-4
lr_policy = "step"
gamma = 0.333
stepsize = 68053#136106 #// after each stepsize iterations update learning rate: lr=lr*gamma
max_iter = 20000 # 600000
#True = start data generator client, False = use augmented dataset file (deprecated)
use_client_gen = True
parser = argparse.ArgumentParser()
parser.add_argument('--stages', type=int, default =6, help='number of stages')
parser.add_argument('--port', type=int, default =5555, help= 'port where training data is running' )
parser.add_argument('--folder',type=str,default="weights_logs/5p_6/",help='"Where to save this training"' )
parser.add_argument('--gpu',default =1, help= 'what gpu to use, if "all" try to allocate on every gpu' )
parser.add_argument('--gpu_fraction', type=float, default =0.6, help= 'how much memory of the gpu to use' )
parser.add_argument('--np1', type=int, default =12, help= 'Number of pafs' )
parser.add_argument('--np2', type=int, default =6, help= 'number of heatmaps' )
args = parser.parse_args()
folder = args.folder
stages=int(args.stages)
port=int(args.port)
fraction = float(args.gpu_fraction)
np1=int(args.np1)#12 #number of channels for pafs
np2=int(args.np2)#6#number of channels for parts
gpu = int(args.gpu)
print(gpu)
#stages=2#number of stages of network
if gpu != 'all':
print(gpu)
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]="%d"%gpu
import keras.backend as K
import tensorflow as tf
os.makedirs(folder,exist_ok=True)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = fraction
session = tf.Session(config=config)
WEIGHTS_100_EPOCH = os.path.join(folder,"weights-2-{epoch:04d}_%d_%d_%d.h5"%(stages,np1,np2))
WEIGHTS_BEST = os.path.join(folder,"weights_%d_%d_%d.best.h5"%(stages,np1,np2))
WEIGHTS_COMPLETE = os.path.join(folder,"complete_model_%d_%d_%d.h5"%(stages,np1,np2))
TRAINING_LOG = os.path.join(folder,"training_new_%d_%d_%d.csv"%(stages,np1,np2))
LOGS_DIR = os.path.join(folder,"logs/")
os.makedirs(LOGS_DIR,exist_ok=True)
model = get_training_model_new(weight_decay,np1=np1,np2=np2,stages=stages)
from_vgg = dict()
from_vgg['conv1_1'] = 'block1_conv1'
from_vgg['conv1_2'] = 'block1_conv2'
from_vgg['conv2_1'] = 'block2_conv1'
from_vgg['conv2_2'] = 'block2_conv2'
from_vgg['conv3_1'] = 'block3_conv1'
from_vgg['conv3_2'] = 'block3_conv2'
from_vgg['conv3_3'] = 'block3_conv3'
from_vgg['conv3_4'] = 'block3_conv4'
from_vgg['conv4_1'] = 'block4_conv1'
from_vgg['conv4_2'] = 'block4_conv2'
# load previous weights or vgg19 if this is the first run
if os.path.exists(WEIGHTS_BEST):
print("Loading the best weights...")
model.load_weights(WEIGHTS_BEST)
last_epoch = get_last_epoch() + 1
else:
print("Loading vgg19 weights...")
vgg_model = VGG19(include_top=False, weights='imagenet')
for layer in model.layers:
if layer.name in from_vgg:
vgg_layer_name = from_vgg[layer.name]
layer.set_weights(vgg_model.get_layer(vgg_layer_name).get_weights())
print("Loaded VGG19 layer: " + vgg_layer_name)
last_epoch = 0
# prepare generators
if use_client_gen:
train_client = DataGeneratorClient(port=port, host="localhost", hwm=160, batch_size=20,np1=np1,np2=np2,stages=stages)
train_client.start() # check ds_generator_client.py
train_di = train_client.gen()
train_samples = 100
else:
pass
# Add our augmenter for check stuff
# setup lr multipliers for conv layers
lr_mult=dict()
for layer in model.layers:
if isinstance(layer, Conv2D):
# stage = 1
if re.match("Mconv\d_stage1.*", layer.name):
kernel_name = layer.weights[0].name
bias_name = layer.weights[1].name
lr_mult[kernel_name] = 1
lr_mult[bias_name] = 2
# stage > 1
elif re.match("Mconv\d_stage.*", layer.name):
kernel_name = layer.weights[0].name
bias_name = layer.weights[1].name
lr_mult[kernel_name] = 4
lr_mult[bias_name] = 8
# vgg
else:
kernel_name = layer.weights[0].name
bias_name = layer.weights[1].name
lr_mult[kernel_name] = 1
lr_mult[bias_name] = 2
# configure loss functions
losses = {}
for i in range(1,stages+1):
losses["weight_stage"+str(i)+"_L1"] = eucl_loss
losses["weight_stage"+str(i)+"_L2"] = eucl_loss
print(losses.keys())
# learning rate schedule - equivalent of caffe lr_policy = "step"
iterations_per_epoch = train_samples // batch_size
# configure callbacks
lrate = LearningRateScheduler(step_decay)
checkpoint = ModelCheckpoint(WEIGHTS_BEST, monitor='loss', verbose=0, save_best_only=False, save_weights_only=True, mode='min', period=1)
checkpoint2 = ModelCheckpoint(WEIGHTS_100_EPOCH, monitor='loss', verbose=0, save_best_only=False, save_weights_only=True, mode='min', period=100)
checkpoint3 = ModelCheckpoint(WEIGHTS_COMPLETE, monitor='loss', verbose=0, save_best_only=True,save_weights_only=False, mode='min', period=100)
csv_logger = CSVLogger(TRAINING_LOG, append=True)
tb = TensorBoard(log_dir=LOGS_DIR, histogram_freq=0, write_graph=True, write_images=False)
callbacks_list = [lrate, checkpoint, csv_logger, tb,checkpoint2,checkpoint3]
# sgd optimizer with lr multipliers
#multisgd = MultiSGD(lr=base_lr, momentum=momentum, decay=0.0, nesterov=False, lr_mult=lr_mult)
multisgd = Adam(lr=base_lr, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
# start training
model.compile(loss=losses, optimizer=multisgd, metrics=["accuracy"])
model.fit_generator(train_di,
steps_per_epoch=train_samples // batch_size,
epochs=max_iter,
callbacks=callbacks_list,
#validation_data=val_di,
#validation_steps=val_samples // batch_size,
use_multiprocessing=False,
initial_epoch=last_epoch
)
|
piperod/beepose
|
beepose/train/train_stages.py
|
train_stages.py
|
py
| 7,547 |
python
|
en
|
code
| 8 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "math.pow",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "tensorflow.ConfigProto",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "tensorflow.Session",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "models.train_model.get_training_model_new",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "keras.applications.vgg19.VGG19",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "train.ds_client_generator.DataGeneratorClient",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "keras.layers.convolutional.Conv2D",
"line_number": 140,
"usage_type": "argument"
},
{
"api_name": "re.match",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "keras.callbacks.LearningRateScheduler",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "keras.callbacks.ModelCheckpoint",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "keras.callbacks.ModelCheckpoint",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "keras.callbacks.ModelCheckpoint",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "keras.callbacks.CSVLogger",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "keras.callbacks.TensorBoard",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "keras.optimizers.Adam",
"line_number": 189,
"usage_type": "call"
}
] |
15293278222
|
import numpy as np
import pandas as pd
from flask import Flask, render_template, request
app = Flask(__name__)
df = pd.read_csv("amazon_prime.csv")
df = df.fillna("NaN")
df["release_year"] = [str(x) for x in df['release_year']]
def get_features(feats):
input_columns = feats[0]
inputs = feats[1]
indices = [inputs.index(x) for x in inputs if x != ""]
input_columns = [input_columns[idx] for idx in indices]
inputs = [inputs[idx] for idx in indices]
results = []
for sample in df.iloc:
if len(results)==10:
break
for col in input_columns:
features_idx = list(input_columns).index(col)
input_ = inputs[features_idx].lower()
split = sample[col].lower().split(", ")
if input_ not in split:
break
else:
results.append(sample["title"])
return results
@app.route("/")
def home():
return render_template("home.html")
@app.route("/get_data", methods=["POST"])
def get_data():
website = "home.html"
message = request.get_data()
message = str(message)[2:-1].split("&")
category = [x.split("=")[0] for x in message]
value = [x.split("=")[1] for x in message]
features = {"genre":"", "rating":"", "release_year":"", "duration":"", "actor":"", "director":"", "country":""}
for col in category:
idx = category.index(col)
features[col] = value[idx]
message = list(features.values())
if message[1].split("&")[0]=="age_rating":
website = "secret_home.html"
message[4] = message[4].replace("+", " ")
message[5] = message[5].replace("+", " ")
feature_names = ["listed_in", "rating", "release_year", "duration", "cast", "director", "country"]
features = [feature_names, message]
result = get_features(features)
if message==['', '', '', '', '', '', '']:
return render_template(website, result1="Please enter a value in any of the text boxes")
if message[3] != "":
input_ = message[3]
if not input_.isnumeric():
return render_template(website, result1="Please enter the duration as a number")
else:
features[1][3] = input_+" min"
if message[2] != "":
input_ = message[2]
if not input_.isnumeric():
return render_template(website, result1="Please enter the release year as a number")
input_features = "Results for "+", ".join([x for x in message if x != ""])
if len(result)==0:
return render_template(website, result1="Your input did not match any movie or TV show in the database")
elif len(result)==1:
return render_template(website, input_features=input_features, result1=result[0])
elif len(result)==2:
return render_template(website, input_features=input_features, result1=result[0], result2=result[1])
elif len(result)==3:
return render_template(website, input_features=input_features, result1=result[0], result2=result[1], result3=result[2])
elif len(result)==4:
return render_template(website, input_features=input_features, result1=result[0], result2=result[1], result3=result[2], result4=result[3])
elif len(result)==5:
return render_template(website, input_features=input_features, result1=result[0], result2=result[1], result3=result[2], result4=result[3], result5=result[4])
elif len(result)==6:
return render_template(website, input_features=input_features, result1=result[0], result2=result[1], result3=result[2], result4=result[3], result5=result[4], result6=result[5])
elif len(result)==7:
return render_template(website, input_features=input_features, result1=result[0], result2=result[1], result3=result[2], result4=result[3], result5=result[4], result6=result[5], result7=result[6])
elif len(result)==8:
return render_template(website, input_features=input_features, result1=result[0], result2=result[1], result3=result[2], result4=result[3], result5=result[4], result6=result[5], result7=result[6], result8=result[7])
elif len(result)==9:
return render_template(website, input_features=input_features, result1=result[0], result2=result[1], result3=result[2], result4=result[3], result5=result[4], result6=result[5], result7=result[6], result8=result[7], result9=result[8])
elif len(result)>=10:
return render_template(website, input_features=input_features, result1=result[0], result2=result[1], result3=result[2], result4=result[3], result5=result[4], result6=result[5], result7=result[6], result8=result[7], result9=result[8], result10=result[9])
if __name__=='__main__':
app.run(debug=True)
|
daBawse167/amazon-prime
|
app.py
|
app.py
|
py
| 4,782 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "flask.request.get_data",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 109,
"usage_type": "call"
}
] |
16119409500
|
import customtkinter as tk
tk.set_appearance_mode("dark")
janela = tk.CTk()
janela.title("Janela 1")
janela.geometry("400x350")
janela.configure(fg_color="grey31")
janela.resizable(width=False,height=False)
colunas = list(range(13))
linhas = list(range(13))
janela.grid_columnconfigure(colunas, weight=1)
janela.grid_rowconfigure(linhas, weight=1)
def verificar():
num1 = int(caixa1.get())
num2 = int(caixa2.get())
media = (num1 + num2) / 2
if media >= 6:
texto1.configure(text="Aprovado", text_color="green")
else:
texto1.configure(text="Reprovado", text_color="red")
texto= tk.CTkLabel(janela, text="Digite...")
texto.grid(row=6, column=6)
caixa1=tk.CTkEntry(janela, placeholder_text="Digite a primeira nota", width=250, height=50)
caixa1.grid(row=7, column=6)
caixa2=tk.CTkEntry(janela, placeholder_text="Digite a segunda nota", width=250, height=50)
caixa2.grid(row=8, column=6)
btn1= tk.CTkButton(janela, text="Clique Aqui", command= verificar, width=100, height=50, fg_color='DarkTurquoise')
btn1.grid (row=9, column=6)
texto1= tk.CTkLabel(janela, text="")
texto1.grid(row=10, column=6)
janela.mainloop()
|
dudasaanches/interface-grafica
|
1.py
|
1.py
|
py
| 1,201 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "customtkinter.set_appearance_mode",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "customtkinter.CTk",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "customtkinter.CTkLabel",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "customtkinter.CTkEntry",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "customtkinter.CTkEntry",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "customtkinter.CTkButton",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "customtkinter.CTkLabel",
"line_number": 39,
"usage_type": "call"
}
] |
37785863928
|
#!/usr/bin/env python3
# Modules libraries
from PyInquirer import Separator
from PyInquirer.prompts import list as PyInquirer_prompts_list
from PyInquirer.prompts.common import if_mousedown
from PyInquirer.prompts.list import basestring
from prompt_toolkit.layout.controls import TokenListControl
from prompt_toolkit.token import Token
# pylint: skip-file
# Override with https://github.com/CITGuru/PyInquirer/pull/88
class InquirerControl(TokenListControl):
def __init__(self, choices, **kwargs):
self.selected_option_index = 0
self.answered = False
self.choices = choices
self._init_choices(choices)
super(InquirerControl, self).__init__(self._get_choice_tokens, **kwargs)
def _init_choices(self, choices, default=None):
# helper to convert from question format to internal format
self.choices = [] # list (name, value, disabled)
searching_first_choice = True
for i, c in enumerate(choices):
if isinstance(c, Separator):
self.choices.append((c, None, None))
else:
if isinstance(c, basestring):
self.choices.append((c, c, None))
else:
name = c.get('name')
value = c.get('value', name)
disabled = c.get('disabled', None)
self.choices.append((name, value, disabled))
if searching_first_choice:
self.selected_option_index = i # found the first choice
searching_first_choice = False
@property
def choice_count(self):
return len(self.choices)
def _get_choice_tokens(self, cli):
tokens = []
T = Token
def append(index, choice):
selected = (index == self.selected_option_index)
@if_mousedown
def select_item(cli, mouse_event): # pragma: no cover
# bind option with this index to mouse event
self.selected_option_index = index
self.answered = True
cli.set_return_value(None)
if isinstance(choice[0], Separator):
tokens.append((T.Separator, ' %s\n' % choice[0]))
else:
tokens.append(
(T.Pointer if selected else T, ' \u276f ' if selected else ' '))
if selected:
tokens.append((Token.SetCursorPosition, ''))
if choice[2]: # disabled
tokens.append((T.Selected if selected else T,
'- %s (%s)' % (choice[0], choice[2])))
else:
try:
tokens.append(
(T.Selected if selected else T, str(choice[0]), select_item))
except: # pragma: no cover
tokens.append(
(T.Selected if selected else T, choice[0], select_item))
tokens.append((T, '\n'))
# prepare the select choices
for i, choice in enumerate(self.choices):
append(i, choice)
tokens.pop() # Remove last newline.
return tokens
def get_selection(self):
return self.choices[self.selected_option_index]
# Patcher class
class Patcher:
# Constructor
def __init__(self):
# Apply library patches
PyInquirer_prompts_list.InquirerControl = InquirerControl
|
starr-dusT/gitlab-ci
|
gitlabci_local/package/patcher.py
|
patcher.py
|
py
| 3,488 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "prompt_toolkit.layout.controls.TokenListControl",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "PyInquirer.Separator",
"line_number": 27,
"usage_type": "argument"
},
{
"api_name": "PyInquirer.prompts.list.basestring",
"line_number": 30,
"usage_type": "argument"
},
{
"api_name": "prompt_toolkit.token.Token",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "PyInquirer.prompts.common.if_mousedown",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "PyInquirer.Separator",
"line_number": 59,
"usage_type": "argument"
},
{
"api_name": "prompt_toolkit.token.Token.SetCursorPosition",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "prompt_toolkit.token.Token",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "PyInquirer.prompts.list.InquirerControl",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "PyInquirer.prompts.list",
"line_number": 94,
"usage_type": "name"
}
] |
20774839234
|
import csv
import math
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.ticker as mticker
initial = time.time()
B = 5e-4
RBW = 300
data = {}
for k in range(11):
for i in range(3):
with open('C:\\Users\\uqfgotar\\Documents\\Magnetometry\\Sensitivity_calculations\\Fernando\\254_4\\14stJun'
+ '\\Spectrum_analyzer\\SSA_' + str("{:02d}".format(k+1)) + '_' + str(i+1) + '.csv') as a:
df = csv.reader(a, delimiter=',')
df_temp = []
for row in df:
df_temp.append(row)
df = df_temp[31:]
for j in range(len(df)):
df[j] = [np.float(df[j][0]), np.float(df[j][1])]
data['SSA_' + str(k + 1) + '_exp_' + str(i + 1)] = np.reshape(np.array(df), (-1, 2))
data['SSA_' + str(k+1) + '_exp_' + str(i+1)] = np.array(df)
# data['SSA_2_exp_1'] = data['SSA_2_exp_1'][:, 0:2]
# data['SSA_8_exp_1'] = data['SSA_8_exp_1'][:, 0:2]
Bmin_ref = np.zeros(11)
SN_min = np.zeros(11)
for k in range(11):
SNR = []
mean = np.mean(data['SSA_' + str(k + 1) + '_exp_3'][370:440, 1])
for row in range(751):
c = float(data['SSA_' + str(k + 1) + '_exp_2'][row, 1]) - mean
SNR.append(c)
data['SNR' + str(k + 1)] = np.array(SNR)
SN_min = math.pow(10,(data['SNR' + str(k + 1)][370:440].max())/10)
Bmin_ref[k] = np.divide(B,(np.sqrt(SN_min*RBW)))
for k in range(13):
with open('C:\\Users\\uqfgotar\\Documents\\Magnetometry\\Sensitivity_calculations\\Fernando\\254_4\\14stJun'
+ '\\Network_analyzer\\TRACE' + str("{:02d}".format(k+1)) + '.csv') as a:
df = csv.reader(a, delimiter=',')
df_temp = []
for row in df:
df_temp.append(row)
df = df_temp[3:]
for j in range(len(df)):
df[j] = [np.float(df[j][0]), np.float(df[j][1])]
data['TRACE' + str(k + 1)] = np.reshape(np.array(df), (-1, 2))
S21_Snn_ref_ratio = np.zeros(11)
Bmin_min = np.zeros(11)
for k in range(11):
Bmin = []
S21_Snn_ref_ratio[k] = data['TRACE' + str(k + 1)][8, 1]/data['TRACE13'][8, 1]
for row in range(751):
c = np.multiply(np.sqrt(np.multiply(np.divide(data['TRACE13'][row, 1],
data['TRACE' + str(k + 1)][row, 1]), S21_Snn_ref_ratio[k])), Bmin_ref[k])
Bmin.append(c)
for j in range(len(Bmin)):
Bmin[j] = np.float(Bmin[j])
data['Bmin' + str(k)] = np.asarray(Bmin)
data['Bmin_omega' + str(k)] = np.multiply(np.divide(data['Bmin' + str(k)], 1e-12), Bmin_ref[k])
print(data['Bmin_omega' + str(k)].shape)
Bmin_min[k] = np.divide(data['Bmin' + str(k)].min(), 1e-6)
height = [30, 60, 90, 150, 210, 270, 470, 670, 1000, 2000, 2400]
height = np.array(height)
axes = plt.gca()
xmin = data['TRACE1'][:, 0].min()
xmax = data['TRACE1'][:, 0].max()
plt.figure(1)
for k in range(11):
plt.plot(data['TRACE' + str(k + 1)][:,0], data['Bmin_omega' + str(k)], label='$\Delta$z = ' + str(height[k]))
plt.xlabel('Frequency (MHz)')
plt.ylabel('Sensitivity ($\mu$T/$\sqrt{Hz}$)')
axes.set_xlim([(xmin-50000), 2000000])
plt.figure(2)
plt.plot(height, Bmin_min, 'ro')
plt.xscale('log')
plt.xlabel(r'$\Delta$z ($\mu$m)')
plt.ylabel('Best sensitivity ($\mu$T/$\sqrt{Hz}$)')
final = time.time()
print('\n' + str(final - initial) + ' seconds')
plt.show()
|
gotamyers/Flux_conc_height
|
Read_multiple_data_files.py
|
Read_multiple_data_files.py
|
py
| 3,404 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "time.time",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.float",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "math.pow",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.divide",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.float",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.multiply",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.divide",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.float",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.multiply",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.divide",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.divide",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.gca",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xscale",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 114,
"usage_type": "name"
}
] |
9910655539
|
from flask import Flask, render_template
import requests, json
NYTimes_API_KEY = 'ca470e1e91b15a82cc0d4350b08a3c0b:14:70189328'
app = Flask(__name__, static_folder='static', static_url_path='/static')
NYTimes_Search_URL = 'http://api.nytimes.com/svc/search/v2/articlesearch.json?q={0}+&api-key=' + NYTimes_API_KEY
def searchArticle(topic):
r = requests.get(NYTimes_Search_URL.format(topic))
data = json.loads(r.text)
return data['response']['docs']
@app.route("/")
def urlRoute():
return render_template('index.html', article=searchArticle('Artificial Intelligence'))
if __name__ == "__main__":
app.run()
|
NYUHackDays/NYTimes-Python-Done
|
nytimes.py
|
nytimes.py
|
py
| 614 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 16,
"usage_type": "call"
}
] |
11004600528
|
from typing import List
class Solution:
def largestSumOfAverages(self, A: List[int], K: int) -> float:
n = len(A)
p = [0.0] * (n + 1)
for i in range(n):
p[i+1] = p[i]+A[i]
dp = [0.0] * n
for i in range(n):
dp[i] = (p[n] - p[i])/(n-i)
for k in range(K-1):
for i in range(n):
for j in range(i+1,n):
dp[i] = max(dp[i], dp[j] + (p[j] - p[i])/(j-i))
return dp[0]
print(Solution().largestSumOfAverages(
[9,1,2,3,9], 3
))
|
xixihaha1995/CS61B_SP19_SP20
|
temp/toy/python/813. Largest Sum of Averages.py
|
813. Largest Sum of Averages.py
|
py
| 555 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.List",
"line_number": 5,
"usage_type": "name"
}
] |
8495271737
|
import bpy
from bpy_extras.object_utils import world_to_camera_view
import numpy as np
from util import poissonDiscSampling
import math
import random
from mathutils import Euler, Vector
import os
import glob
import sys
class ForegroundObjectPlacementRandomizer:
"""
A randomizer class which randomly spawns virtual human objects.
The foreground generation process of the blender scene involves selecting a subset from a pool of 3D human assets.
These chosen 3D human assets are placed randomly within the region above the background objects.
The placement positions of the foreground objects and their distances from each other are determined through
Poisson distribution sampling within the specified spatial area.
Attributes
----------
__scene (bpy.types.Scene): The blender scene data-block of current virtual environment.
__camera (bpy.types.Camera): The blender camera data-block.
__clip_start (float): Camera near clipping distance.
__clip_end (float): Camera far clipping distance.
num_foreground_object_in_scene_range (dict of str: int): The distribution of the number of retail items within the blender scene.
__num_foreground_object_in_scene (int): The number of retail items within the blender scene.
foreground_area (list of float): Spatial distribution area of foreground objects.
__foreground_domain_size (numpy.ndarray): Spatial distribution area of foreground objects(convert foreground_area to ndarray).
foreground_poisson_disk_sampling_radius (float): Foreground objects separation distance.
asset_foreground_object_folder_path (str): The path to foreground object assets.
__foreground_object_collection (bpy.types.Collection): The blender collection data-block of foreground objects.
__n_particle (int): Number of generated particles of the poisson disks sampling.
__particle_coordinates (numpy.ndarray): Coordinates of the poisson disks sampling.
__particle_coordinates_can_see_in_view (list of list of float): Coordinates of the poisson disks sampling.
Methods
-------
__error_check(): Check assigned background object assets folder path isn't empty.
__load_object(): Load asset from other blendfile to the current blendfile.
__posson_disc_sampling(): Using poisson disk sampling algorithm to generate the sampling.
__import_foreground_object_asset(): Import a number of __n_particle foreground objects into current blender scene.
__check_particle_in_cam_view(): Check if the particles from the Poisson disk sampling are within the camera's view.
foreground_object_placement_randomize(): Generate foreground.
"""
def __init__(self,
num_foreground_object_in_scene_range = {"min": 1 , "max": 5}, # Must <= 5
foreground_area = [9, 7, 4],
foreground_poisson_disk_sampling_radius = 1.5,
asset_foreground_object_folder_path = "C:/Users/user/Documents/project/PeopleSansPeople/Asset/Human/Procedural"
):
self.__scene = bpy.data.scenes["Scene"]
self.__camera = bpy.data.objects['Camera']
self.__clip_start = bpy.data.objects['Camera'].data.clip_start
self.__clip_end = bpy.data.objects['Camera'].data.clip_end
self.num_foreground_object_in_scene_range = num_foreground_object_in_scene_range
self.__num_foreground_object_in_scene = None
self.foreground_area = foreground_area
self.__foreground_domain_size = np.array(self.foreground_area)
self.foreground_poisson_disk_sampling_radius = foreground_poisson_disk_sampling_radius
self.asset_foreground_object_folder_path = asset_foreground_object_folder_path
self.__foreground_object_collection = bpy.data.collections["HumanCollection"]
self.__n_particle = None
self.__particle_coordinates = None # np.array
self.__particle_coordinates_can_see_in_view = list()
def __error_check(self,asset_path_list):
"""Check assigned background object assets folder path isn't empty.
Args:
asset_path_list (list of str): list of the path to background object assets.
"""
num_asset_in_folder = len(asset_path_list)
if num_asset_in_folder < 1:
print(f'ERROR!!! can not find any foreground asset in {self.asset_foreground_object_folder_path}')
input("Press Enter to continue...")
sys.exit()
def __load_object(self,filepath):
"""Load asset from other blendfile to the current blendfile.
Args:
filepath (str): The path to background object assets.
References
----------
https://studio.blender.org/training/scripting-for-artists/5eabe54d521eafd0953f6d45/
https://docs.blender.org/api/current/bpy.types.BlendDataLibraries.html
https://blender.stackexchange.com/questions/17876/import-object-without-bpy-ops-wm-link-append/33998#33998
https://blender.stackexchange.com/questions/34540/how-to-link-append-a-data-block-using-the-python-api?noredirect=1&lq=1
"""
# Append object from .blend file
with bpy.data.libraries.load(filepath, link = False,assets_only = True) as (data_from, data_to):
data_to.objects = data_from.objects
# Link object to current scene
for obj in data_to.objects:
if obj is not None:
self.__foreground_object_collection.objects.link(obj)
def __posson_disc_sampling(self):
"""Generate the sampling with a spatially variable sampling radius."""
# It seem like function poisson_disc_sampling sometimes will break (mtbf:2000-3000 cycle), when it break , return a empty list[]
# add condition check len(self.__particle_coordinates) must >= 1
while self.__n_particle == None or self.__n_particle == 0:
self.__particle_coordinates = poissonDiscSampling.poisson_disc_sampling(radius = self.foreground_poisson_disk_sampling_radius,
sample_domain_size = self.__foreground_domain_size,
sample_rejection_threshold = 30)
self.__n_particle = len(self.__particle_coordinates)
print(f"nParticle Prev : {self.__n_particle}") # Show posson disc sampling caculated particle num
loc_offset = np.array([self.__foreground_domain_size[0]/2,self.__foreground_domain_size[1]/2,-2])
self.__particle_coordinates -= loc_offset
def __import_foreground_object_asset(self):
"""Import a number of __n_particle foreground objects into current blender scene."""
# Check n_particle must bigger than num_foreground_object_in_scene
if self.__n_particle < self.__num_foreground_object_in_scene:
print('Warning!!! nParticle:{} must bigger than fg_obj_in_scene_num:{}'.format(self.__n_particle,self.__num_foreground_object_in_scene))
input("Press Enter to continue...")
sys.exit()
# Get foreground object asset path
foreground_object_path_list = glob.glob(os.path.join(self.asset_foreground_object_folder_path, "*.blend"))
self.__error_check(asset_path_list = foreground_object_path_list)
num_fg_obj = len(foreground_object_path_list)
print("num fg obj in folder: {}".format(num_fg_obj))
# Shuffle foreground_object_path_list
random.shuffle(foreground_object_path_list)
# Check num_foreground_object_in_scene is bigger than num_fg_obj
if self.__num_foreground_object_in_scene >= num_fg_obj:
# Loop importforeground object
num_loop = self.__num_foreground_object_in_scene // num_fg_obj
num_remain = self.__num_foreground_object_in_scene % num_fg_obj
for i in range(num_loop):
for fg_obj_path in foreground_object_path_list:
self.__load_object(filepath = fg_obj_path)
if num_remain != 0:
for i in range(num_remain):
self.__load_object(filepath = foreground_object_path_list[i])
else:
# Randomly select n(n=num_foreground_object_in_scene) fg_obj from foreground_object_path_list, then import to scene
foreground_object_path_list_selected = random.sample(foreground_object_path_list, self.__num_foreground_object_in_scene)
for fg_obj_path in foreground_object_path_list_selected:
self.__load_object(filepath = fg_obj_path)
def __check_particle_in_cam_view(self):
"""Check if the particles from the Poisson disk sampling are within the camera's view.
References
----------
https://blender.stackexchange.com/questions/284884/what-does-world-to-camera-view-depend-on
https://blender.stackexchange.com/questions/258000/how-to-update-world-transformation-matrices-without-calling-a-scene-update/258002#258002
"""
# Update camera object matrix_world
self.__camera.matrix_world = self.__camera.matrix_basis
for coordinates in self.__particle_coordinates:
# World space to ndc space
vector_p = Vector(coordinates)
co_ndc = world_to_camera_view(self.__scene, self.__camera, vector_p)
# Check wether point is inside frustum
if (0.0 < co_ndc.x < 1.0 and 0.0 < co_ndc.y < 1.0 and self.__clip_start < co_ndc.z < self.__clip_end):
self.__particle_coordinates_can_see_in_view.append(coordinates)
# Update __particle_coordinates and __n_particle var value
self.__particle_coordinates = np.array(self.__particle_coordinates_can_see_in_view)
self.__n_particle = len(self.__particle_coordinates_can_see_in_view)
def foreground_object_placement_randomize(self):
"""Generate foreground.
References
----------
[1]https://stackoverflow.com/questions/14262654/numpy-get-random-set-of-rows-from-2d-array
"""
self.__num_foreground_object_in_scene = random.randint(self.num_foreground_object_in_scene_range["min"], self.num_foreground_object_in_scene_range["max"])
# PoissonDiskSampling
self.__posson_disc_sampling()
# Select particles which can see in cam view
self.__check_particle_in_cam_view()
# Import background object asset
self.__import_foreground_object_asset()
# Randomly select n(n=num_foreground_object_in_scene) location from __particle_coordinates [1]
selected_indices = np.random.choice(self.__particle_coordinates.shape[0],
size = self.__num_foreground_object_in_scene,
replace = False)
fg_location = self.__particle_coordinates[selected_indices]
print("fg_num: {} ".format(len(fg_location)))
print("fg_location:\n {} ".format(fg_location))
# Move all foregeound objects to fg_location
fg_obj_list = []
for fg_obj in self.__foreground_object_collection.objects:
if fg_obj.type == "ARMATURE": # Select armature object only
fg_obj_list.append(fg_obj)
for i in range(self.__num_foreground_object_in_scene):
obj_location = (fg_location[i][0],fg_location[i][1], fg_location[i][2])
fg_obj_list[i].location = obj_location
print("Particles in cam view num : {}".format(self.__n_particle)) # Show particle in cam view num
print("Foreground Object Placement Randomize COMPLERED !!!")
if __name__ == '__main__':
randomizer = ForegroundObjectPlacementRandomizer()
randomizer.foreground_object_placement_randomize()
|
MichaelLiLee/Synthetic-Data-Generator-for-Human-Detection
|
HumanSDG/HumanSDG_020_ForegroundObjectPalcementRandomizer.py
|
HumanSDG_020_ForegroundObjectPalcementRandomizer.py
|
py
| 11,870 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "bpy.data",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "bpy.data",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "bpy.data",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "bpy.data.libraries.load",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "bpy.data",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "util.poissonDiscSampling.poisson_disc_sampling",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "util.poissonDiscSampling",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "random.shuffle",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "mathutils.Vector",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "bpy_extras.object_utils.world_to_camera_view",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 202,
"usage_type": "attribute"
}
] |
21705466300
|
from os.path import basename
from glob import glob
from tqdm import tqdm
def main():
"""
フルラベルファイルのp16に歌唱者名を仕込む。
"""
# フルラベルファイルが入ってるフォルダを指定
label_dir = input('label_dir: ').strip('"')
# フルラベル全ファイル取得
l = glob(f'{label_dir}/**/*.lab', recursive=True)
# ラベルファイルのp16部分に歌唱者名を埋め込む
for path_label in tqdm(l):
singer = basename(path_label).split('__')[0]
with open(path_label, 'r') as fl:
s = fl.read()
s = s.replace(']xx/A:', f']{singer}/A:')
with open(path_label, 'w') as fl:
fl.write(s)
if __name__ == '__main__':
main()
|
oatsu-gh/nnsvs_mixed_db
|
recipe/00-svs-world/utils/set_singername_p16.py
|
set_singername_p16.py
|
py
| 763 |
python
|
ja
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "glob.glob",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 16,
"usage_type": "call"
}
] |
44248037473
|
import cv2
import numpy as np
import glob
import uuid
import caffe
import skimage.io
from util import histogram_equalization
from scipy.ndimage import zoom
from skimage.transform import resize
import random
#from project_face import project_face
import cv2
import numpy as np
from matplotlib import pyplot as plt
import dlib
from project_face import frontalizer
IMAGE_WIDTH = 32
IMAGE_HEIGHT = 32
class mouth_detector():
def __init__(self):
self.PATH_face_model = '../lib/shape_predictor_68_face_landmarks.dat'
self.md_face = dlib.shape_predictor(self.PATH_face_model)
self.fronter = frontalizer('../lib/ref3d.pkl')
self.face_det = dlib.get_frontal_face_detector() #HOG
def mouth_detect_single(self,image,isPath):
if isPath == True:
img = cv2.imread(image, cv2.IMREAD_UNCHANGED)
else:
img = image
img = cv2.resize(img, (300, 300), interpolation = cv2.INTER_CUBIC) #experimental
img = histogram_equalization(img)
facedets = self.face_det(img,1)
if len(facedets) > 0:
facedet_obj= facedets[0]
#cv2.rectangle(img, (facedet_obj.left(),facedet_obj.top()),(facedet_obj.right(),facedet_obj.bottom()),(0,255,0),4,0)
shape = self.md_face(img,facedet_obj)
p2d = np.asarray([(shape.part(n).x, shape.part(n).y,) for n in range(shape.num_parts)], np.float32)
#for n in range(shape.num_parts):
# cv2.circle(img, (shape.part(n).x,shape.part(n).y), 2, (0,0,255), thickness=4, lineType=8, shift=0)
rawfront, symfront = self.fronter.frontalization(img,facedet_obj,p2d)
symfront_bgr = cv2.cvtColor(symfront, cv2.COLOR_RGB2BGR)
face_hog_mouth = symfront_bgr[165:220, 130:190] #get half-bottom part
#face_hog = symfront_bgr[100:200, 110:205] #get face region for display
if(face_hog_mouth is not None):
gray_img = cv2.cvtColor(face_hog_mouth, cv2.COLOR_BGR2GRAY)
crop_img_resized = cv2.resize(gray_img, (IMAGE_WIDTH, IMAGE_HEIGHT), interpolation = cv2.INTER_CUBIC)
#crop_img_resized_full = cv2.resize(symfront_bgr, (IMAGE_WIDTH, IMAGE_HEIGHT), interpolation = cv2.INTER_CUBIC)
#cv2.imwrite("../img/output_test_img/mouthdetectsingle_crop_rezized.jpg",crop_img_resized)
#cv2.imwrite("../img/output_test_img/mouthdetectsingle_face.jpg",img)
#cv2.imwrite("../img/output_test_img/mouthdetectsingle_face_front.jpg",symfront_bgr)
#cv2.imwrite("../img/output_test_img/mouthdetectsingle_face_mouth.jpg",face_hog_mouth)
#cv2.imwrite("../img/output_test_img/mouthdetectsingle_face_front_.jpg",face_hog)
return crop_img_resized,facedet_obj.left(),facedet_obj.top(),facedet_obj.right(),facedet_obj.bottom()
else:
return None,-1,-1,-1,-1
else:
return None,-1,-1,-1,-1
def mouth_detect_bulk(self,input_folder,output_folder):
transformed_data_set = [img for img in glob.glob(input_folder+"/*jpg")]
for in_idx, img_path in enumerate(transformed_data_set):
mouth = self.mouth_detect_single(img_path,True)
if 'showingteeth' in img_path:
guid = uuid.uuid4()
uid_str = guid.urn
str_guid = uid_str[9:]
path = output_folder+"/"+str_guid+"_showingteeth.jpg"
cv2.imwrite(path,mouth)
else:
guid = uuid.uuid4()
uid_str = guid.urn
str_guid = uid_str[9:]
path = output_folder+"/"+str_guid+".jpg"
cv2.imwrite(path,mouth)
def negative_image(self,imagem):
imagem = (255-imagem)
return imagem
def adaptative_threashold(self,input_img_path):
img = cv2.imread(input_img_path,0)
img = cv2.medianBlur(img,3)
ret,th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
th2 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
cv2.THRESH_BINARY,11,2)
th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,11,2)
#cv2.imwrite("../img/output_test_img/hmouthdetectsingle_adaptative.jpg",th3)
return th3
|
juanzdev/TeethClassifierCNN
|
src/mouth_detector_dlib.py
|
mouth_detector_dlib.py
|
py
| 4,369 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "dlib.shape_predictor",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "project_face.frontalizer",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "dlib.get_frontal_face_detector",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_UNCHANGED",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_CUBIC",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "util.histogram_equalization",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_RGB2BGR",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_CUBIC",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "cv2.medianBlur",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "cv2.threshold",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "cv2.adaptiveThreshold",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "cv2.ADAPTIVE_THRESH_MEAN_C",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "cv2.adaptiveThreshold",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "cv2.ADAPTIVE_THRESH_GAUSSIAN_C",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 97,
"usage_type": "attribute"
}
] |
42367773251
|
# -*- coding: utf-8 -*-
from tornado.web import RequestHandler
from ..Apps import Apps
from ..Exceptions import AsyncyError
from ..Sentry import Sentry
class BaseHandler(RequestHandler):
logger = None
# noinspection PyMethodOverriding
def initialize(self, logger):
self.logger = logger
def handle_story_exc(self, app_id, story_name, e):
# Always prefer the app logger if the app is available.
try:
logger = Apps.get(app_id).logger
except BaseException:
logger = self.logger
logger.error(f'Story execution failed; cause={str(e)}', exc=e)
self.set_status(500, 'Story execution failed')
self.finish()
if isinstance(e, AsyncyError):
Sentry.capture_exc(e, e.story, e.line)
else:
if story_name is None:
Sentry.capture_exc(e)
else:
Sentry.capture_exc(e, extra={
'story_name': story_name
})
def is_finished(self):
return self._finished
def is_not_finished(self):
return self.is_finished() is False
|
rashmi43/platform-engine
|
asyncy/http_handlers/BaseHandler.py
|
BaseHandler.py
|
py
| 1,138 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "tornado.web.RequestHandler",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "Apps.Apps.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "Apps.Apps",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "Exceptions.AsyncyError",
"line_number": 26,
"usage_type": "argument"
},
{
"api_name": "Sentry.Sentry.capture_exc",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "Sentry.Sentry",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "Sentry.Sentry.capture_exc",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "Sentry.Sentry",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "Sentry.Sentry.capture_exc",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "Sentry.Sentry",
"line_number": 32,
"usage_type": "name"
}
] |
12771403336
|
import tensorflow as tf
from yolo import YOLO, detect_video
from PIL import Image
import os
os.environ['CUDA_VISIBLE_DEVICES'] = "1"
def detect_img(yolo):
img = '10.jpg'
try:
image = Image.open(img)
except Exception as e:
print('Open Error! Try again!')
print(e)
else:
r_image = yolo.detect_image(image)
r_image.show()
# detect_img(YOLO())
path = '3.mp4'
output = './result/333333333.mp4'
detect_video(YOLO(), output_path=output)
|
Jerry-Z464/yolo
|
keras-yolo3/test.py
|
test.py
|
py
| 490 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.environ",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "yolo.detect_image",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "yolo.detect_video",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "yolo.YOLO",
"line_number": 22,
"usage_type": "call"
}
] |
42672162843
|
# -*- coding: utf-8 -*-
"""
Created on Apr 7 2021
Modified on May 05 2021
@author: Andres Sandino
Convert "nii" image format in "png" in Lung WW=-500,WL=1500
"""
#%%
import os
import numpy as np
import matplotlib.pyplot as plt
import cv2
import nibabel as nib
# Patient number
patient_no = 1
# Origin path and filename
path = 'C:/Users/Andres/Desktop/CTAnotado/resultados/Dr Alvarado/'
filename = 'maskEstudio1.nii'
# Dest path
destpath = 'C:/Users/Andres/Desktop/CovidImages/Mask/'
# Load Image
img = nib.load(path+filename)
img = img.get_fdata()
# Image format
imgformat = '.png'
array=np.asarray(img)
#%%
[width,length,numslices]=np.shape(array)
[m,n,t]=np.shape(array)
#for i in range(numslices):
for i in range(35,40):
#print(i)
# List is flipped
a=numslices-1-i
slide = array[:,:,a]
#Labeling files
filename='P'+str(patient_no).zfill(4)+'_Im'+str(numslices-a).zfill(4)+'_mask'+imgformat
print(filename)
# Image rotation 90°, later flip 180°
im2=np.rot90(slide)
# for i in range(4):
# im2=np.rot90(im2)
i#m3=im2.copy()
im3=np.fliplr(im2)
norm_img=cv2.normalize(im3, None, alpha = 0,
beta = 255,
norm_type = cv2.NORM_MINMAX,
dtype = cv2.CV_32F)
norm_img=np.uint8(norm_img)
cv2.imwrite(destpath+filename, norm_img)
#plt.figure()
#plt.axis('off')
#plt.imshow(norm_img,cmap="gray")
#plt.title('slide'+str(t-a))
|
andres87sg/LungCT
|
ConvertImages/get_nii_LungMask.py
|
get_nii_LungMask.py
|
py
| 1,553 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "nibabel.load",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.rot90",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "numpy.fliplr",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "cv2.normalize",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "cv2.NORM_MINMAX",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "cv2.CV_32F",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "numpy.uint8",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 71,
"usage_type": "call"
}
] |
26625675366
|
from django import template
import re
try:
from django.utils.safestring import mark_safe
except ImportError:
mark_safe = lambda s:s
register = template.Library()
def rfc3339_date(date):
return date.strftime('%Y-%m-%dT%H:%M:%SZ')
register.filter('atom_date', rfc3339_date)
def atom_tag_uri(url, date=None):
tag = re.sub('^https?://', '', url)
if date:
tag = re.sub('/', ',%s:/' % date.strftime('%Y-%m-%d'), tag, 1)
tag = re.sub('#', '/', tag)
return 'tag:' + tag
register.filter('atom_tag_uri', atom_tag_uri)
def feed_safe_name(name):
return name.replace(' ', '_').lower()
register.filter('feed_safe_name', feed_safe_name)
GOOGLE_TAGS = ('actor', 'age', 'age_range', 'agent', 'area', 'artist', 'aspect_ratio',
'author', 'bathrooms', 'battery_life', 'bedrooms', 'binding', 'brand', 'broker',
'calories', 'capacity', 'cholesterol', 'color', 'color_output', 'condition',
'cooking_time', 'course', 'course_date_range', 'course_number', 'course_times',
'cuisine', 'currency', 'department', 'description', 'director', 'display_type',
'edition', 'education', 'employer', 'ethnicity', 'event_date_range', 'event_type',
'expiration_date', 'expiration_date_time', 'feature', 'fiber', 'film_type', 'focus_type',
'format', 'from_location', 'functions', 'gender', 'genre', 'heel_height', 'height',
'hoa_dues', 'id', 'image_link', 'immigration_status', 'installation', 'interested_in',
'isbn', 'job_function', 'job_industry', 'job_type', 'language', 'length', 'link',
'listing_status', 'listing_type', 'load_type', 'location', 'lot_size', 'made_in',
'main_ingredient', 'make', 'marital_status', 'material', 'meal_type', 'megapixels',
'memory_card_slot', 'mileage', 'mls_listing_id', 'mls_name', 'model', 'model_number',
'mpn', 'name_of_item_reviewed', 'news_source', 'occasion', 'occupation', 'open_house_date_range',
'operating_system', 'optical_drive', 'pages', 'payment_accepted', 'payment_notes', 'performer',
'pickup', 'platform', 'preparation_time', 'price', 'price_type', 'processor_speed', 'product_type',
'property_taxes', 'property_type', 'protein', 'provider_class', 'provider_name',
'provider_telephone_number', 'publication_name', 'publication_volume', 'publish_date',
'publisher', 'quantity', 'rating', 'recommended_usage', 'resolution', 'review_type',
'reviewer_type', 'salary', 'salary_type', 'saturated_fat', 'school', 'school_district',
'screen_size', 'service_type', 'servings', 'sexual_orientation', 'shipping', 'shoe_width',
'size', 'sleeps', 'sodium', 'style', 'subject', 'tax_percent', 'tax_region', 'tech_spec_link',
'title', 'to_location', 'total_carbs', 'total_fat', 'travel_date_range', 'university', 'upc',
'url_of_item_reviewed', 'vehicle_type', 'venue_description', 'venue_name', 'venue_type',
'venue_website', 'vin', 'weight', 'width', 'wireless_interface', 'year', 'zoning', 'zoom'
)
def make_googlebase_option(opt, custom):
"""Convert an option into a tag. First look to see if it is a predefined tag,
if it is, good, use it. Otherwise make a custom tag."""
custom = custom.lower() in ('true','t','1')
return make_googlebase_tag(opt.option_group.name, opt.name,custom)
register.filter('make_googlebase_option', make_googlebase_option)
def make_googlebase_attribute(att, custom):
"""Convert an attribute into a tag. First look to see if it is a predefined tag,
if it is, good, use it. Otherwise make a custom tag."""
custom = custom.lower() in ('true','t','1')
return make_googlebase_tag(att.name, att.value, custom)
register.filter('make_googlebase_attribute', make_googlebase_attribute)
def make_googlebase_tag(key, val, custom):
"""Convert a key/val pair into a tag. First look to see if it is a predefined tag,
if it is, good, use it. Otherwise make a custom tag."""
key = feed_safe_name(key)
if key in GOOGLE_TAGS:
tag = "<g:%s>%s</g:%s>"
elif key.endswith('s') and key[:-1] in GOOGLE_TAGS:
key = key[:-1]
tag = "<g:%s>%s</g:%s>"
elif custom:
tag = "<c:%s:string>%s</c:%s:string>"
else:
tag = None
if tag:
return mark_safe(tag % (key, val, key))
else:
return ""
def stripspaces(s):
s = re.sub(r'^\s+', '', s)
s = re.sub(r'\s+$', '', s)
s = s.replace('\n\n','\n')
return s
register.filter('stripspaces', stripspaces)
|
dokterbob/satchmo
|
satchmo/apps/satchmo_ext/product_feeds/templatetags/satchmo_feed.py
|
satchmo_feed.py
|
py
| 4,527 |
python
|
en
|
code
| 30 |
github-code
|
6
|
[
{
"api_name": "django.utils.safestring.mark_safe",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.template.Library",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "re.sub",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.utils.safestring.mark_safe",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 98,
"usage_type": "call"
}
] |
6484090494
|
from rest_framework import serializers
from django.contrib.auth import get_user_model
from session.serializers.recent_sessions import RecentSessionSerializer
User = get_user_model()
class ClientListSerializer(serializers.ModelSerializer):
number_of_sessions = serializers.SerializerMethodField()
latest_session = serializers.SerializerMethodField()
def get_number_of_sessions(self, obj):
return obj.client_sessions.count()
def get_latest_session(self, obj):
session = obj.client_sessions.latest('created')
return RecentSessionSerializer(session).data
class Meta:
model = User
fields = [
'id',
'full_name',
'coaches',
'email',
'about',
'location',
'phone_number',
'avatar',
'number_of_sessions',
'latest_session'
]
|
roberttullycarr/cyclingsimulator
|
backend/user/serializers/coach/list_clients.py
|
list_clients.py
|
py
| 909 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.SerializerMethodField",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.SerializerMethodField",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "session.serializers.recent_sessions",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "session.serializers.recent_sessions.RecentSessionSerializer",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "session.serializers.recent_sessions",
"line_number": 18,
"usage_type": "argument"
}
] |
11896445749
|
from django.http import HttpRequest
from google_optimize.context_processors import google_experiment
def test_experiment_processor():
request = HttpRequest()
request.COOKIES["_gaexp"] = "GAX1.2.utSuKi3PRbmxeG08en8VNw.18147.1"
experiment = google_experiment(request)
assert experiment == dict(google_optimize={"redesign": "new_design"})
def test_context_processor_template(client):
client.cookies["_gaexp"] = "GAX1.2.utSuKi3PRbmxeG08en8VNw.18147.1"
response = client.get("/test")
assert response.context["google_optimize"] == {"redesign": "new_design"}
|
danihodovic/django-google-optimize
|
tests/test_context_processors.py
|
test_context_processors.py
|
py
| 585 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "django.http.HttpRequest",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "google_optimize.context_processors.google_experiment",
"line_number": 9,
"usage_type": "call"
}
] |
7357482434
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This is an example script that uses DIC data from Carrol et al
as input for SIF to find K field and cracktip data
The output is written to a CSV file
@author: Swati Gupta
"""
import SIF_final as SIF
import numpy as np
from os import walk
import pdb
from datetime import datetime
import csv, timeit
from pandas import read_csv
synData = 0
r2 = 100
alpha = 2
N=-1
noise = -1
# provide path to directory containing displacement data files
path = 'DICresults/'
filenames = next(walk(path), (None, None, []))[2]
# exclude temp files
for file in filenames:
if file.startswith('.'):
filenames.remove(file)
filenames.sort() #sort assuming files are named chronologically
#initilize
lenF = len(filenames)
cracktip1 = np.zeros((lenF-1,2)) # Geo
cracktip2 = np.zeros((lenF-1,2)) # Sep
cracktip3 = np.zeros((lenF-1,2)) # DC
K_field1 = np.zeros((lenF-1,6)) # Geo
K_field2 = np.zeros((lenF-1,6)) # Sepp
K_field3 = np.zeros((lenF-1,6)) # DC
discr = 10
geo = 0 # = 1 if use geometrical method too or 0 if only use separability
mat_constants = [0.327,109.1,43] #poisson's ratio, E, shear modulus
prev = [545, 315]
startT = timeit.default_timer()
#loop over the set of files
for i in range(0,lenF-1):
file1 = path+filenames[i]
print('filename \n', file1)
# pdb.set_trace()
data = read_csv(file1)
x = data['x'].values
y = data['y'].values
u = data['u'].values
v = data['v'].values
cracktip1[i],cracktip2[i],cracktip3[i],K_field1[i],K_field2[i], K_field3[i] = \
SIF.SIF_projection(synData, r2 = 50, alpha = 2.5,coords = np.array([x,y]).T,
coords_ref = np.array([x+u, y+v]).T, guess = prev, h=discr,geo = geo, constants = mat_constants)
# prev = cracktip2[i]
endT = timeit.default_timer()
print("Time taken:", round(endT - startT, 2), "seconds to analyze ", lenF, "files")
## write output to file ##
currentDT = datetime.now() # get current date and time
outputFile = "DIC_" + str(currentDT) + ".csv"
with open(outputFile, 'w') as f:
writer = csv.writer(f)
if geo:
writer.writerow(['S.no','filename','x_geo','y_geo','K_I_geo','K_II_geo','T_geo','x_sep','y_sep','K_I_sep','K_II_sep','T_sep'])
writer.writerows(zip(range(1,lenF),filenames,cracktip1[:,0], cracktip1[:,1],K_field1[:,2],K_field1[:,3],K_field1[:,4],
cracktip2[:,0], cracktip2[:,1],K_field2[:,2],K_field2[:,3],K_field2[:,4]))
else:
writer.writerow(['S.no','filename','x','y','K_I','K_II','T'])
writer.writerows(zip(range(1,lenF),filenames,cracktip2[:,0], cracktip2[:,1],K_field2[:,2],K_field2[:,3],K_field2[:,4]))
|
sg759/separability
|
DICexample.py
|
DICexample.py
|
py
| 2,731 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.walk",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "timeit.default_timer",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "SIF_final.SIF_projection",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "timeit.default_timer",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "csv.writer",
"line_number": 73,
"usage_type": "call"
}
] |
26336217910
|
import streamlit as st
import extra_streamlit_components as stx
from datetime import datetime, timedelta
import Scripts.constants as constants
@st.experimental_singleton(suppress_st_warning=True)
def get_manager():
return stx.CookieManager()
# def get_user_cookies():
# COOKIES = constants.COOKIES.get(constants.COOKIE_ID, None)
# # print("COOKIES", COOKIES)
# if COOKIES != None:
# COOKIES = [x.strip() for x in COOKIES.split(";")]
# constants.CURR_USER = COOKIES[0]
# constants.CURR_USER_IS_DOC = eval(COOKIES[1])
# def set_user_cookies(VALUE):
# # Set final date of expiry
# # set the cookie
# VALUE = ''.join([VALUE[0], ";", str(VALUE[1])])
# constants.COOKIES[constants.COOKIE_ID] = VALUE
# constants.COOKIES.save()
def get_user_cookies():
COOKIES = constants.COOKIE_MANAGER.get_all()
COOKIE = COOKIES.get(constants.COOKIE_ID, None)
if COOKIE != None:
COOKIE = [x.strip() for x in COOKIE.split(";")]
constants.CURR_USER = COOKIE[0]
constants.CURR_USER_IS_DOC = eval(COOKIE[1])
def set_user_cookies(VALUE):
constants.COOKIE_MANAGER = get_manager()
# Set final date of expiry
EXPIRES_AT = datetime.now() + timedelta(days=constants.EXPIRES_IN_DAYS)
# set the cookie
constants.COOKIE_MANAGER.set(
cookie = constants.COOKIE_ID,
val = ''.join([VALUE[0], ";", str(VALUE[1])]),
expires_at = EXPIRES_AT
)
constants.CURR_USER = VALUE[0]
constants.CURR_USER_IS_DOC = VALUE[1]
|
PeaPals/docnets
|
Scripts/cookie_manager.py
|
cookie_manager.py
|
py
| 1,550 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "extra_streamlit_components.CookieManager",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "streamlit.experimental_singleton",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "Scripts.constants.COOKIE_MANAGER.get_all",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "Scripts.constants.COOKIE_MANAGER",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "Scripts.constants",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "Scripts.constants.COOKIE_ID",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "Scripts.constants",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "Scripts.constants.CURR_USER",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "Scripts.constants",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "Scripts.constants.CURR_USER_IS_DOC",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "Scripts.constants",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "Scripts.constants.COOKIE_MANAGER",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "Scripts.constants",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "Scripts.constants.EXPIRES_IN_DAYS",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "Scripts.constants",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "Scripts.constants.COOKIE_MANAGER.set",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "Scripts.constants.COOKIE_MANAGER",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "Scripts.constants",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "Scripts.constants.COOKIE_ID",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "Scripts.constants",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "Scripts.constants.CURR_USER",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "Scripts.constants",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "Scripts.constants.CURR_USER_IS_DOC",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "Scripts.constants",
"line_number": 60,
"usage_type": "name"
}
] |
8201566770
|
from typing import Dict
import os
import shutil
from hexlib.db import Table, PersistentState
import pickle
from tesseract import get_tesseract_langs
import sqlite3
from config import LOG_FOLDER, logger
from sist2 import SearchBackendType, Sist2SearchBackend
RUNNING_FRONTENDS: Dict[str, int] = {}
TESSERACT_LANGS = get_tesseract_langs()
DB_SCHEMA_VERSION = "5"
from pydantic import BaseModel
def _serialize(item):
if isinstance(item, BaseModel):
return pickle.dumps(item)
if isinstance(item, bytes):
raise Exception("FIXME: bytes in PickleTable")
return item
def _deserialize(item):
if isinstance(item, bytes):
return pickle.loads(item)
return item
class PickleTable(Table):
def __getitem__(self, item):
row = super().__getitem__(item)
if row:
return dict((k, _deserialize(v)) for k, v in row.items())
return row
def __setitem__(self, key, value):
value = dict((k, _serialize(v)) for k, v in value.items())
super().__setitem__(key, value)
def __iter__(self):
for row in super().__iter__():
yield dict((k, _deserialize(v)) for k, v in row.items())
def sql(self, where_clause, *params):
for row in super().sql(where_clause, *params):
yield dict((k, _deserialize(v)) for k, v in row.items())
def get_log_files_to_remove(db: PersistentState, job_name: str, n: int):
if n < 0:
return []
counter = 0
to_remove = []
for row in db["task_done"].sql("WHERE has_logs=1 ORDER BY started DESC"):
if row["name"].endswith(f"[{job_name}]"):
counter += 1
if counter > n:
to_remove.append(row)
return to_remove
def delete_log_file(db: PersistentState, task_id: str):
db["task_done"][task_id] = {
"has_logs": 0
}
try:
os.remove(os.path.join(LOG_FOLDER, f"sist2-{task_id}.log"))
except:
pass
def migrate_v1_to_v2(db: PersistentState):
shutil.copy(db.dbfile, db.dbfile + "-before-migrate-v2.bak")
# Frontends
db._table_factory = PickleTable
frontends = [row["frontend"] for row in db["frontends"]]
del db["frontends"]
db._table_factory = Table
for frontend in frontends:
db["frontends"][frontend.name] = frontend
list(db["frontends"])
# Jobs
db._table_factory = PickleTable
jobs = [row["job"] for row in db["jobs"]]
del db["jobs"]
db._table_factory = Table
for job in jobs:
db["jobs"][job.name] = job
list(db["jobs"])
db["sist2_admin"]["info"] = {
"version": "2"
}
def create_default_search_backends(db: PersistentState):
es_backend = Sist2SearchBackend.create_default(name="elasticsearch",
backend_type=SearchBackendType("elasticsearch"))
db["search_backends"]["elasticsearch"] = es_backend
sqlite_backend = Sist2SearchBackend.create_default(name="sqlite", backend_type=SearchBackendType("sqlite"))
db["search_backends"]["sqlite"] = sqlite_backend
def migrate_v3_to_v4(db: PersistentState):
shutil.copy(db.dbfile, db.dbfile + "-before-migrate-v4.bak")
create_default_search_backends(db)
try:
conn = sqlite3.connect(db.dbfile)
conn.execute("ALTER TABLE task_done ADD COLUMN has_logs INTEGER DEFAULT 1")
conn.commit()
conn.close()
except Exception as e:
logger.exception(e)
db["sist2_admin"]["info"] = {
"version": "4"
}
|
simon987/sist2
|
sist2-admin/sist2_admin/state.py
|
state.py
|
py
| 3,537 |
python
|
en
|
code
| 652 |
github-code
|
6
|
[
{
"api_name": "typing.Dict",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "tesseract.get_tesseract_langs",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 23,
"usage_type": "argument"
},
{
"api_name": "pickle.dumps",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pickle.loads",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "hexlib.db.Table",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "hexlib.db.PersistentState",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "hexlib.db.PersistentState",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "os.remove",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "config.LOG_FOLDER",
"line_number": 80,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "hexlib.db.PersistentState",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "shutil.copy",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "hexlib.db.Table",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "hexlib.db.Table",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "hexlib.db.PersistentState",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "sist2.Sist2SearchBackend.create_default",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "sist2.Sist2SearchBackend",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "sist2.SearchBackendType",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "sist2.Sist2SearchBackend.create_default",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "sist2.Sist2SearchBackend",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "sist2.SearchBackendType",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "hexlib.db.PersistentState",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "shutil.copy",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "config.logger.exception",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "config.logger",
"line_number": 132,
"usage_type": "name"
}
] |
40887076205
|
# Тестирование компонентов задач
import unittest
from pyodbc import Connection as PyodbcConnection
from connections import Connection1
from task_classes.db.mssqldb import MSSqlTarget
from task_classes.csv_task_classes import PrepareCsvBulkPackages
class TestMSSqlTarget(unittest.TestCase):
"""Класс тестирования MSSqlTarget"""
def setUp(self):
"""Проверка создания объекта"""
self.ms_sql_target = MSSqlTarget(
host=Connection1().host,
user=Connection1().user,
password=Connection1().password,
database=Connection1().database,
table=Connection1().table,
update_id='test1'
)
self.assertIsInstance(self.ms_sql_target, MSSqlTarget, "Объект должен создаваться корректно")
def test_01_connect(self):
"""Провека соединения с базой данных"""
db_conn = self.ms_sql_target.connect()
self.assertIsInstance(db_conn, PyodbcConnection, "Соединение с Connection1 должно быть успешным")
def test_02_touch(self):
"""Проверка записи в таблицу сессий загрузки"""
self.ms_sql_target.create_marker_table()
self.ms_sql_target.touch()
self.assertTrue(self.ms_sql_target.exists(), "Загрузка должа быть зарегистрирована и помечена как выполненная")
class TestCsv(unittest.TestCase):
"""Класс тестирования обработчиков csv-файлов"""
def setUp(self):
self.csv_task = PrepareCsvBulkPackages()
def test_02_package(self):
# print(self.csv_task.bulk_packages_directory)
self.assertEqual(self.csv_task.bulk_packages_directory, r'D:\temp\data\packages',
'Каталог для Bulk-пакетов должен быть указан верно')
def test_03_filename(self):
print(self.csv_task.filename)
self.assertEqual(self.csv_task.filename, r'D:\temp\data\packages\package.csv',
'Файл Bulk-пакетов должен быть указан верно')
if __name__ == '__main__':
unittest.main(failfast=True)
|
Foresco/luigivar
|
tests.py
|
tests.py
|
py
| 2,370 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "unittest.TestCase",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "task_classes.db.mssqldb.MSSqlTarget",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "connections.Connection1",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "connections.Connection1",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "connections.Connection1",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "connections.Connection1",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "connections.Connection1",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "task_classes.db.mssqldb.MSSqlTarget",
"line_number": 24,
"usage_type": "argument"
},
{
"api_name": "pyodbc.Connection",
"line_number": 30,
"usage_type": "argument"
},
{
"api_name": "unittest.TestCase",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "task_classes.csv_task_classes.PrepareCsvBulkPackages",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "unittest.main",
"line_number": 58,
"usage_type": "call"
}
] |
10068857131
|
import cv2
import numpy as np
from time import sleep
import os
# global variables
bg = None
def run_avg(image, aWeight):
global bg
# initialize the background
if bg is None:
bg = image.copy().astype("float")
return
# compute weighted average, accumulate it and update the background
cv2.accumulateWeighted(image, bg, aWeight)
def segment(image, threshold=25):
global bg
# find the absolute difference between background and current frame
diff = cv2.absdiff(bg.astype("uint8"), image)
# threshold the diff image so that we get the foreground
thresholded = cv2.threshold(diff,
threshold,
255,
cv2.THRESH_BINARY)[1]
# get the contours in the thresholded image
(_, cnts, _) = cv2.findContours(thresholded.copy(),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# return None, if no contours detected
if len(cnts) == 0:
return
else:
# based on contour area, get the maximum contour which is the hand
segmented = max(cnts, key=cv2.contourArea)
return (thresholded, segmented)
if __name__ == "__main__" :
index = 0
aWeight = 0.5
camera = cv2.VideoCapture(0)
top, right, bottom, left = 10, 470, 250, 750
num_frames = 0
r = ""
while True:
(grabbed, frame) = camera.read()
frame = cv2.flip(frame, 1)
cv2.putText(frame,"predictio is "+r, (20,100), cv2.FONT_HERSHEY_PLAIN , 1.5, 100)
clone = frame.copy()
(height, width) = frame.shape[:2]
roi = frame[top:bottom, right:left]
gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
if num_frames < 30:
run_avg(gray, aWeight)
else:
hand = segment(gray)
if hand is not None:
index +=1
if index % 30 == 0 :
(thresholded, segmented) = hand
thresholded = cv2.resize(thresholded, (64, 64))
cv2.drawContours(clone, [segmented + (right, top)], -1, (0, 0, 255))
cv2.imshow("Thresholded", thresholded)
sleep(3)
path = "test"+str(index)+".jpg"
cv2.imwrite(path,thresholded)
r = os.popen("python predict.py "+path).read()[:-1]
print("prediction is ",r)
os.popen("rm -fr "+path).read()
print("images taken: {}".format(index))
cv2.rectangle(clone, (left, top), (right, bottom), (0,255,0), 2)
num_frames += 1
cv2.imshow("recording", clone)
keypress = cv2.waitKey(1) & 0xFF
if keypress == ord("q") :
break
cv2.destroyWindow("recording")
cv2.destroyWindow("Thresholded")
camera = None
|
RemonIbrahimNashed/HandGestureUseingCNN
|
live.py
|
live.py
|
py
| 2,732 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.accumulateWeighted",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.absdiff",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.threshold",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "cv2.findContours",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "cv2.RETR_EXTERNAL",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "cv2.CHAIN_APPROX_SIMPLE",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "cv2.contourArea",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "cv2.flip",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_PLAIN",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "cv2.GaussianBlur",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "cv2.drawContours",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "os.popen",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.popen",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "cv2.destroyWindow",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "cv2.destroyWindow",
"line_number": 91,
"usage_type": "call"
}
] |
73919543869
|
from django.shortcuts import render
from resources.models import Resource
def resources(request):
resources = Resource.objects.all().order_by('order').filter(hidden=False)
context = {
'resources': resources
}
return render(request, 'resources.html', context)
|
ctiller15/Humanity-first-tracker
|
resources/views.py
|
views.py
|
py
| 288 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "resources.models",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "resources.models.Resource.objects.all",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "resources.models.Resource.objects",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "resources.models.Resource",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "resources.models",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 11,
"usage_type": "call"
}
] |
36733100195
|
import os
import sys
import logging
import MySQLdb
#import datetime
logger = logging.getLogger(__name__)
locz = []
locz_file = ''
# 'locz' table fields: chat_id, chat_title, user_id, user_name, date_time, latitude, longitude
def add_loc(mess):
locstr = 'chat.id:' + str(mess.chat.id) + ',chat.title:' + str(mess.chat.title) + ',user.id:' + str(mess.from_user.id) + ',user.username:' + str(mess.from_user.username) + ',message.date:' + str(mess.date) + ',location.latitude:' + str(mess.location.latitude) + ',location.longitude:' + str(mess.location.longitude)
locz.append(locstr)
# logger.info('locstr: ' + locstr)
#sql = """insert into locz values({0}, '{1}', {2}, '{3}', '{4}', {5}, {6}).format(mess.chat.id, mess.chat.title, mess.from_user.id, mess.from_user.username, datetime.datetime.strptime(mess.date, '%Y-%m-%d %H:%M:%S'), mess.location.latitude, mess.location.longitude)
sql = """insert into locz values({0}, '{1}', {2}, '{3}', '{4}', {5}, {6})""".format(mess.chat.id, mess.chat.title, mess.from_user.id, mess.from_user.username, mess.date, mess.location.latitude, mess.location.longitude)
try:
db = MySQLdb.connect(host="nikodim.mysql.pythonanywhere-services.com", user="nikodim", passwd="IkuRa700", db="nikodim$ikuradb", charset='utf8')
try:
db.query(sql)
db.commit()
except:
logger.error('Location record to DB failure. ' + str(sys.exc_info()[0]) + '. sql: ' + sql)
finally:
db.close()
except:
logger.error('DB connection error. ' + str(sys.exc_info()[0]))
save_loc()
logger.info('Location added. ' + locstr)
def select_locz(chat_id, user_id):
sql = """select latitude, longitude, date_time from locz where chat_id = {0} and user_id = {1} order by date_time""".format(chat_id, user_id)
try:
db = MySQLdb.connect(host="nikodim.mysql.pythonanywhere-services.com", user="nikodim", passwd="IkuRa700", db="nikodim$ikuradb", charset='utf8')
try:
db.query(sql)
r = db.store_result()
rows = r.fetch_row(maxrows=0)
except:
logger.error('Select locations from DB failure. ' + str(sys.exc_info()[0]) + '. sql: ' + sql)
finally:
db.close()
if not rows:
return 'нет локов'
else:
res = ''
for tup in rows:
res = res + '|' + str(tup[0]) + ',' + str(tup[1])
return res
except:
logger.error('DB connection error. ' + str(sys.exc_info()[0]))
return 'ошибка'
def init_locz():
global locz
global locz_file
THIS_FOLDER = os.path.dirname(os.path.abspath(__file__))
locz_file = os.path.join(THIS_FOLDER, '-locs', 'locz.tdb')
lf = open(locz_file, 'r', encoding='utf-8')
locz = lf.readlines()
lf.close()
locz = [l.replace('\n', '') for l in locz]
def save_loc():
global locz
global locz_file
locz = list(set(locz))
loczz = [l + '\n' for l in locz]
lf = open(locz_file, 'w')
lf.writelines(loczz)
lf.close()
# print('locz.db saved')
init_locz()
|
nikodim500/pyIkuraTeleBot
|
locationstore.py
|
locationstore.py
|
py
| 3,142 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "MySQLdb.connect",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sys.exc_info",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sys.exc_info",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "MySQLdb.connect",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "sys.exc_info",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "sys.exc_info",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 63,
"usage_type": "attribute"
}
] |
5257520483
|
import boto3
from secretss import accessKey, secretKey
# upload files to AWS S3 bucket
s3 = boto3.client('s3')
bucket_name = "mmc-video-bucket"
file_path = 'E:\Programming files\Home-Surveillance\\basicvideo.mp4'
object_key = 'basicvideo.mp4'
s3.upload_file(file_path, bucket_name, object_key)
|
Varun-Naik/Home-Surveillance
|
upload_to_s3.py
|
upload_to_s3.py
|
py
| 297 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "boto3.client",
"line_number": 5,
"usage_type": "call"
}
] |
11417571951
|
# -*- coding: utf-8 -*-
"""
(C) 2014-2019 Roman Sirokov and contributors
Licensed under BSD license
http://github.com/r0x0r/pywebview/
"""
import os
import sys
import logging
import json
import shutil
import tempfile
import webbrowser
from threading import Event, Semaphore
from ctypes import windll
from platform import architecture
from webview import WebViewException, _debug, _user_agent
from webview.serving import resolve_url
from webview.util import parse_api_js, interop_dll_path, parse_file_type, inject_base_uri, default_html, js_bridge_call
from webview.js import alert
from webview.js.css import disable_text_select
import clr
clr.AddReference('System.Windows.Forms')
clr.AddReference('System.Collections')
clr.AddReference('System.Threading')
import System.Windows.Forms as WinForms
from System import IntPtr, Int32, String, Action, Func, Type, Environment, Uri
from System.Threading.Tasks import Task, TaskScheduler, TaskContinuationOptions
from System.Drawing import Size, Point, Icon, Color, ColorTranslator, SizeF
archpath = 'x64' if architecture()[0] == '64bit' else 'x86'
os.environ['Path'] = interop_dll_path(archpath) + ';' + os.environ['Path']
clr.AddReference(interop_dll_path('Microsoft.Web.WebView2.Core.dll'))
clr.AddReference(interop_dll_path('Microsoft.Web.WebView2.WinForms.dll'))
from Microsoft.Web.WebView2.WinForms import WebView2, CoreWebView2CreationProperties
from Microsoft.Web.WebView2.Core import CoreWebView2Environment
logger = logging.getLogger('pywebview')
class EdgeChrome:
def __init__(self, form, window):
self.pywebview_window = window
self.web_view = WebView2()
props = CoreWebView2CreationProperties()
#props.UserDataFolder = os.path.join(os.getcwd(), 'profile')
props.UserDataFolder = os.path.join(os.environ['LOCALAPPDATA'], 'pywebview')
self.web_view.CreationProperties = props
form.Controls.Add(self.web_view)
self.js_results = {}
self.js_result_semaphore = Semaphore(0)
self.web_view.Dock = WinForms.DockStyle.Fill
#settings under on_webview_ready
self.web_view.CoreWebView2Ready += self.on_webview_ready
self.web_view.NavigationStarting += self.on_navigation_start
self.web_view.NavigationCompleted += self.on_navigation_completed
self.web_view.WebMessageReceived += self.on_script_notify
self.url = None
self.ishtml = False
self.html = None
if window.real_url:
self.load_url(window.real_url)
elif window.html:
self.html = window.html
self.load_html(window.html, '')
else:
self.html = default_html
self.load_html(default_html, '')
def evaluate_js(self, script, id, callback=None):
def _callback(result):
if callback is None:
self.js_results[id] = None if result is None or result == '' else json.loads(result)
self.js_result_semaphore.release()
else:
# future js callback option to handle async js method
callback(result)
self.js_results[id] = None
self.js_result_semaphore.release()
self.syncContextTaskScheduler = TaskScheduler.FromCurrentSynchronizationContext()
try:
result = self.web_view.ExecuteScriptAsync(script).ContinueWith(
Action[Task[String]](
lambda task: _callback(json.loads(task.Result))
),
self.syncContextTaskScheduler)
except Exception as e:
logger.exception('Error occurred in script')
self.js_results[id] = None
self.js_result_semaphore.release()
def get_current_url(self):
return self.url
def load_html(self, content, base_uri):
self.html = content
self.ishtml = True
self.web_view.EnsureCoreWebView2Async(None)
def load_url(self, url):
self.ishtml = False
self.web_view.Source = Uri(url)
def on_script_notify(self, _, args):
try:
func_name, func_param, value_id = json.loads(args.get_WebMessageAsJson())
if func_name == 'alert':
WinForms.MessageBox.Show(func_param)
elif func_name == 'console':
print(func_param)
else:
js_bridge_call(self.pywebview_window, func_name, func_param, value_id)
except Exception as e:
logger.exception('Exception occured during on_script_notify')
def on_new_window_request(self, _, args):
args.set_Handled(True)
#webbrowser.open(str(args.get_Uri()))
def on_webview_ready(self, sender, args):
sender.CoreWebView2.NewWindowRequested += self.on_new_window_request
settings = sender.CoreWebView2.Settings
settings.AreDefaultContextMenusEnabled = _debug
settings.AreDefaultScriptDialogsEnabled = True
settings.AreDevToolsEnabled = _debug
settings.IsBuiltInErrorPageEnabled = True
settings.IsScriptEnabled = True
settings.IsWebMessageEnabled = True
settings.IsStatusBarEnabled = _debug
settings.IsZoomControlEnabled = True
if self.html: sender.CoreWebView2.NavigateToString(self.html)
def on_navigation_start(self, sender, args):
pass
def on_navigation_completed(self, sender, args):
url = str(sender.Source)
self.url = None if self.ishtml else url
self.web_view.ExecuteScriptAsync('window.alert = (msg) => window.chrome.webview.postMessage(["alert", msg+"", ""])')
if _debug:
self.web_view.ExecuteScriptAsync('window.console = { log: (msg) => window.chrome.webview.postMessage(["console", msg+"", ""])}')
self.web_view.ExecuteScriptAsync(parse_api_js(self.pywebview_window, 'chromium'))
if not self.pywebview_window.text_select:
self.web_view.ExecuteScriptAsync(disable_text_select)
self.pywebview_window.loaded.set()
|
hanzzhu/chadle
|
venv/Lib/site-packages/webview/platforms/edgechromium.py
|
edgechromium.py
|
py
| 6,044 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "clr.AddReference",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "clr.AddReference",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "clr.AddReference",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "platform.architecture",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "webview.util.interop_dll_path",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "clr.AddReference",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "webview.util.interop_dll_path",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "clr.AddReference",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "webview.util.interop_dll_path",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "Microsoft.Web.WebView2.WinForms.WebView2",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "Microsoft.Web.WebView2.WinForms.CoreWebView2CreationProperties",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "threading.Semaphore",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "System.Windows.Forms.DockStyle",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "System.Windows.Forms",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "webview.util.default_html",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "webview.util.default_html",
"line_number": 78,
"usage_type": "argument"
},
{
"api_name": "json.loads",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "System.Threading.Tasks.TaskScheduler.FromCurrentSynchronizationContext",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "System.Threading.Tasks.TaskScheduler",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "System.Action",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "System.Threading.Tasks.Task",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "System.String",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "System.Uri",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "System.Windows.Forms.MessageBox.Show",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "System.Windows.Forms.MessageBox",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "System.Windows.Forms",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "webview.util.js_bridge_call",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "webview._debug",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "webview._debug",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "webview._debug",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "webview._debug",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "webview.util.parse_api_js",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "webview.js.css.disable_text_select",
"line_number": 159,
"usage_type": "argument"
}
] |
7986369348
|
import basc_py4chan as chanapi
import requests
import argparse
import sys
import os
class FourchanDownloader:
def __init__(self):
self.boards_list = chanapi.get_all_boards()
def run(self):
self.verify_boards()
if len(self.board) == 0:
print("No existing boards selected, you fucking idiot!")
sys.exit(2)
elif self.board[0] == '*':
self.boards = chanapi.get_all_boards()
else:
self.boards = chanapi.get_boards(self.board)
if self.thread_id != None:
self.download_threads(self.boards[0])
else:
self.download_boards()
def board_exists(self, board_name):
for board in self.boards_list:
if board.name == board_name:
return True
return False
def thread_exists(self, thread_id):
return self.board[0].thread_exists(thread_id)
def verify_boards(self):
if self.board[0] == '*':
return
for f in self.board:
if not self.board_exists(f):
self.board.remove(f)
def download_threads(self, board):
for tid in self.thread_id:
print(" >Thread #{0} at /{1}/:".format(tid, board.name))
if (board.thread_exists(tid)):
t = board.get_thread(tid)
t.expand()
thread_files = t.files()
thread_files_sum = sum(1 for _ in thread_files)
fnum = 1
print(" =>Closed/sticky/archived?: {0}/{1}/{2}\n =>Bumplimit/imagelimit hit: {3}/{4}\n =>Posts: {5}\n =>Files: {6}\n =>Topic: {7}".format(
t.closed, t.sticky, t.archived, t.bumplimit, t.imagelimit, len(t.all_posts), thread_files_sum, t.topic.text_comment[:50].encode('utf-8')
))
for thread_file in t.files():
print("{0}/{1}".format(fnum, thread_files_sum))
self.download_image(thread_file, "{0}/{1}/{2}".format(self.directory, board.name, tid))
fnum += 1
else:
print(" =>Thread is 404 (don't exists or got deleted)")
print("")
def download_boards(self):
for b in self.boards:
self.thread_id = b.get_all_thread_ids()
self.download_threads(b)
def download_image(self, url, path):
file_name = url.split('/')[-1]
imgpath = "{0}/{1}".format(path, file_name)
if not os.path.exists(path):
os.makedirs(path)
print("Downloading image {0}".format(file_name))
response = requests.get(url, stream=True)
size = int(response.headers.get('content-length'))
if os.path.isfile(imgpath) and os.path.getsize(imgpath) == size:
print("File is already downloaded!")
return
f = open(imgpath, "wb")
if (size is None):
f.write(response.content)
else:
dl = 0
for data in response.iter_content(chunk_size=4096):
dl += len(data)
f.write(data)
done = int(50 * dl / size)
sys.stdout.write("\r[{0}{1}]".format('=' * done, ' ' * (50-done)))
sys.stdout.flush()
print("")
def main():
parser = argparse.ArgumentParser(description="Download pics from your favourite fucking boards (or threads). Enter board names, or one board name and threads ID's.", epilog="op is a faggot")
parser.add_argument('-d', '--directory', default="4chan", help="directory or path in which pics will be saved (default: 4chan)")
parser.add_argument('-b', '--board', help="board(s) short name(s) from where pictures will be downloaded (* means all boards, enter multiple with spaces)", nargs='+')
parser.add_argument('-t', '--thread_id', help="thread ID's from where pics will be downloaded (you can enter multiple with spaces)", nargs='+')
dl = FourchanDownloader()
args = parser.parse_args(namespace=dl)
if dl.board == None:
print("You must enter at least one board, faggot!")
sys.exit(1)
dl.run()
if __name__ == "__main__":
main()
|
SteelPh0enix/4chanDownloader
|
4chan.py
|
4chan.py
|
py
| 4,179 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "basc_py4chan.get_all_boards",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "basc_py4chan.get_all_boards",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "basc_py4chan.get_boards",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "os.path.getsize",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 107,
"usage_type": "call"
}
] |
29703199407
|
import bpy
import types
import sys
from select import select
import socket
import errno
import mathutils
import traceback
from math import radians
from bpy.props import *
from ast import literal_eval as make_tuple
from .callbacks import *
from ..nodes.nodes import *
def make_osc_messages(myOscKeys, myOscMsg):
envars = bpy.context.scene.nodeosc_envars
for item in myOscKeys:
if item.dp_format_enable == False:
# we cannot deal with a datapath string that has format syntax
#print( "sending :{}".format(item) )
prop = None
if item.node_type == 1:
prop = eval(item.data_path + ".getValue()")
else:
prop = eval(item.data_path)
# now make the values to be sent a tuple (unless its a string or None)
if isinstance(prop, (bool, int, float)):
prop = (prop,)
elif prop is None:
prop = 'None'
elif isinstance(prop, (mathutils.Vector, mathutils.Quaternion, mathutils.Euler, mathutils.Matrix)):
prop = tuple(prop)
stringProp = str(prop)
if not (item.filter_repetition and envars.repeat_argument_filter_OUT) and stringProp != item.value:
item.value = stringProp
# make sure the osc indices are a tuple
indices = make_tuple(item.osc_index)
if isinstance(indices, int):
indices = (indices,)
# sort the properties according to the osc_indices
if prop is not None and not isinstance(prop, str) and len(indices) > 0:
prop = tuple(prop[i] for i in indices)
myOscMsg[item.osc_address] = prop
return myOscMsg
#######################################
# PythonOSC Server BASE CLASS #
#######################################
class OSC_OT_OSCServer(bpy.types.Operator):
_timer = None
count = 0
#####################################
# CUSTOMIZEABLE FUNCTIONS:
#inputServer = "" #for the receiving socket
#outputServer = "" #for the sending socket
#dispatcher = "" #dispatcher function
def sendingOSC(self, context, event):
pass
# setup the sending server
def setupInputServer(self, context, envars):
pass
# setup the receiving server
def setupOutputServer(self, context, envars):
pass
# add method
def addMethod(self, address, data):
pass
# add default method
def addDefaultMethod():
pass
# start receiving
def startupInputServer(self, context, envars):
pass
# stop receiving
def shutDownInputServer(self, context, envars):
pass
#
#
#####################################
#######################################
# MODAL Function #
#######################################
def modal(self, context, event):
envars = bpy.context.scene.nodeosc_envars
if envars.isServerRunning == False:
return self.cancel(context)
if envars.message_monitor:
if len(envars.error) > 0:
for myError in envars.error:
self.report({myError.type}, myError.name + myError.value)
print(myError.name + myError.value)
envars.error.clear()
if event.type == 'TIMER':
#hack to refresh the GUI
self.count = self.count + envars.output_rate
if envars.message_monitor == True:
if self.count >= 100:
self.count = 0
for area in context.screen.areas:
if area.type == 'VIEW_3D':
area.tag_redraw()
# only available spot where updating the sorcar tree doesn't throw errors...
executeSorcarNodeTrees(context)
try:
start = time.perf_counter()
self.sendingOSC(context, event)
# calculate the execution time
end = time.perf_counter()
bpy.context.scene.nodeosc_envars.executionTimeOutput = end - start
except Exception as err:
self.report({'WARNING'}, "Output error: {0}".format(err))
return self.cancel(context)
return {'PASS_THROUGH'}
#######################################
# Setup OSC Receiver and Sender #
#######################################
def execute(self, context):
envars = bpy.context.scene.nodeosc_envars
if envars.port_in == envars.port_out:
self.report({'WARNING'}, "Ports must be different.")
return{'FINISHED'}
if envars.isServerRunning == False:
#Setting up the dispatcher for receiving
try:
self.setupInputServer(context, envars)
self.setupOutputServer(context, envars)
# all the osc messages handlers ready for registering to the server
oscHandlerDict = {}
oscHandleList = []
# register a message for executing
if envars.node_update == "MESSAGE" and hasAnimationNodes():
# oscHandleList content:
# callback type
# blender datapath (i.e. bpy.data.objects['Cube'])
# blender property (i.e. location)
# blender property index (i.e. location[index])
# osc argument index to use (should be a tuplet, like (1,2,3))
# node type
# datapath format string
# loop range string
# filter eval string
oscHandleList = (-1, None, None, None, None, 0, '', '', True)
self.addOscHandler(oscHandlerDict, envars.node_frameMessage, oscHandleList)
for item in bpy.context.scene.NodeOSC_keys:
filter_eval = True
if item.filter_enable:
filter_eval = item.filter_eval
if item.osc_direction != "OUTPUT" and item.enabled:
if item.dp_format_enable == False:
# make osc index into a tuple ..
oscIndex = make_tuple(item.osc_index)
# ... and don't forget the corner case
if isinstance(oscIndex, int):
oscIndex = (oscIndex,)
try:
oscHandleList = None
if item.data_path.find('script(') == 0:
raise Exception("using script() with format disabled is not allowed!")
elif item.data_path.find('][') != -1 and (item.data_path[-2:] == '"]' or item.data_path[-2:] == '\']'):
#For custom properties
# like bpy.data.objects['Cube']['customProp']
prop = item.data_path[item.data_path.rindex('['):]
prop = prop[2:-2] # get rid of [' ']
datapath = item.data_path[0:item.data_path.rindex('[')]
oscHandleList = [1, eval(datapath), prop, item.idx, oscIndex, item.node_type, '', '', filter_eval]
elif item.data_path[-1] == ']':
#For normal properties with index in brackets
# like bpy.data.objects['Cube'].location[0]
datapath = item.data_path[0:item.data_path.rindex('.')]
prop = item.data_path[item.data_path.rindex('.') + 1:item.data_path.rindex('[')]
prop_index = item.data_path[item.data_path.rindex('[') + 1:item.data_path.rindex(']')]
oscHandleList = [3, eval(datapath), prop, int(prop_index), oscIndex, item.node_type, '', '', filter_eval]
elif item.data_path[-1] == ')':
# its a function call
oscHandleList = [7, item.data_path, '', item.idx, oscIndex, item.node_type, '', '', filter_eval]
else:
#without index in brackets
datapath = item.data_path[0:item.data_path.rindex('.')]
prop = item.data_path[item.data_path.rindex('.') + 1:]
if isinstance(getattr(eval(datapath), prop), (int, float, str)):
# property is single value
oscHandleList = [2, eval(datapath), prop, item.idx, oscIndex, item.node_type, '', '', filter_eval]
else:
# property is array
oscHandleList = [4, eval(datapath), prop, item.idx, oscIndex, item.node_type, '', '', filter_eval]
if oscHandleList != None:
self.addOscHandler(oscHandlerDict, item.osc_address.strip(), oscHandleList)
else:
self.report({'WARNING'}, "Unable to create listener for: object '"+item.data_path+"' with id '"+item.props+"' : {0}".format(err))
except Exception as err:
self.report({'WARNING'}, "Register custom handle: object '"+item.data_path+"' with id '"+item.props+"' : {0}".format(err))
else:
oscIndex = item.osc_index
try:
oscHandleList = None
if item.data_path.find('script(') == 0:
if item.data_path.find(').'):
scriptName = item.data_path[7:item.data_path.find(').')]
functionName = item.data_path[item.data_path.find(').')+2:]
asModule = bpy.data.texts[scriptName].as_module()
asFunction = getattr(asModule, functionName)
oscHandleList = [11, scriptName + "." + functionName, asFunction, 0, item.osc_index, item.node_type, item.dp_format, '', filter_eval]
else:
if item.loop_enable:
oscHandleList = [10, item.data_path, '', 0, item.osc_index, item.node_type, item.dp_format, item.loop_range, filter_eval]
else:
oscHandleList = [10, item.data_path, '', 0, item.osc_index, item.node_type, item.dp_format, '', filter_eval]
if oscHandleList != None:
self.addOscHandler(oscHandlerDict, item.osc_address.strip(), oscHandleList)
else:
self.report({'WARNING'}, "Unable to create listener for: object '"+item.data_path+"' with id '"+item.props+"' : {0}".format(err))
except Exception as err:
self.report({'WARNING'}, "Register custom handle: object '"+item.data_path+"' with id '"+item.props+"' : {0}".format(err))
# lets go and find all nodes in all nodetrees that are relevant for us
nodes_createCollections()
for item in bpy.context.scene.NodeOSC_nodes:
filter_eval = True
if item.osc_direction != "OUTPUT":
# make osc index into a tuple ..
oscIndex = make_tuple(item.osc_index)
# ... and don't forget the corner case
if isinstance(oscIndex, int):
oscIndex = (oscIndex,)
try:
if item.node_data_type == "SINGLE":
oscHandleList = [5, eval(item.data_path), item.props, item.idx, oscIndex, item.node_type, '', '', filter_eval]
elif item.node_data_type == "LIST":
oscHandleList = [6, eval(item.data_path), item.props, item.idx, oscIndex, item.node_type, '', '', filter_eval]
self.addOscHandler(oscHandlerDict, item.osc_address.strip(), oscHandleList)
except Exception as err:
self.report({'WARNING'}, "Register node handle: object '"+item.data_path+"' with id '"+item.props+"' : {0}".format(err))
# register all oscHandles on the server
for address, oscHandles in oscHandlerDict.items():
self.addMethod(address, oscHandles)
# this provides the callback functions with the oscHandles
setOscHandlers(oscHandlerDict)
# register the default method for unregistered addresses
self.addDefaultMethod()
# startup the receiving server
self.startupInputServer(context, envars)
# register the execute queue method
bpy.app.timers.register(execute_queued_OSC_callbacks)
#inititate the modal timer thread
context.window_manager.modal_handler_add(self)
self._timer = context.window_manager.event_timer_add(envars.output_rate/1000, window = context.window)
except Exception as err:
self.report({'WARNING'}, "Server startup: {0}".format(err))
return {'CANCELLED'}
envars.isServerRunning = True
self.report({'INFO'}, "Server successfully started!")
return {'RUNNING_MODAL'}
else:
self.report({'INFO'}, "Server stopped!")
envars.isServerRunning = False
return{'FINISHED'}
def cancel(self, context):
envars = bpy.context.scene.nodeosc_envars
self.shutDownInputServer(context, envars)
context.window_manager.event_timer_remove(self._timer)
# hack to check who is calling the cancel method.
# see https://blender.stackexchange.com/questions/23126/is-there-a-way-to-execute-code-before-blender-is-closing
traceback_elements = traceback.format_stack()
# if the stack has 2 elements, it is because the server stop has been pushed.
# otherwise it might be loading a new project which would cause an exception
# and stop the proper shutdown of the server..
if traceback_elements.__len__ == 2:
bpy.app.timers.unregister(execute_queued_OSC_callbacks)
return {'CANCELLED'}
# will take an address and a oscHandle data packet.
# if the address has already been used, the package will be added to the packagelist
def addOscHandler(self, handleDict, address, oscHandlePackage):
oldpackage = handleDict.get(address)
if oldpackage == None:
oldpackage = [oscHandlePackage]
else:
oldpackage += [oscHandlePackage]
handleDict[address] = oldpackage
|
maybites/blender.NodeOSC
|
server/_base.py
|
_base.py
|
py
| 16,277 |
python
|
en
|
code
| 100 |
github-code
|
6
|
[
{
"api_name": "bpy.context",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "mathutils.Vector",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "mathutils.Quaternion",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "mathutils.Euler",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "mathutils.Matrix",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "ast.literal_eval",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "bpy.types",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "bpy.context",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "bpy.context",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "bpy.context",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "bpy.context",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "ast.literal_eval",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "bpy.data",
"line_number": 238,
"usage_type": "attribute"
},
{
"api_name": "bpy.context",
"line_number": 259,
"usage_type": "attribute"
},
{
"api_name": "ast.literal_eval",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "bpy.app.timers.register",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "bpy.app",
"line_number": 292,
"usage_type": "attribute"
},
{
"api_name": "bpy.context",
"line_number": 315,
"usage_type": "attribute"
},
{
"api_name": "traceback.format_stack",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "bpy.app.timers.unregister",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "bpy.app",
"line_number": 326,
"usage_type": "attribute"
}
] |
72646625788
|
# some functions from discovery/scripts/cdisco/cdisco.py
import numpy as np
import torch
import torchvision
import PIL.Image as Image
from my_datasets import transform
from my_datasets import transform_normalize
def get_model_state(model, paths, y, dim_c, dim_w, dim_h, SAVEFOLD=''):
batch_size = 32
tot_acc = 0
i=0
batch_start=0
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
embeddings = np.zeros((len(y),2048))
gradients = np.zeros((len(y), 2048))
predictions = np.zeros((len(y), 1000))
conv_embeddings=np.zeros((len(y)
, dim_c))
gradients_wrt_conv_layer = np.zeros((len(y), dim_c, dim_w, dim_h), dtype=np.float32)
conv_maps = np.zeros((len(y),dim_c,dim_w,dim_h))
print(f"embeddings shape: {embeddings.shape}")
print(f"gradients shape: {gradients.shape}")
print(f"predictions shape: {predictions.shape}")
while batch_start+batch_size < len(y)+batch_size:
# preprocessing the inputs
print(batch_start)
inputs = torch.stack([transform_normalize(transform(Image.open(paths[i]).convert("RGB"))) for i in range(batch_start, min(batch_start+batch_size, len(y)))])
inputs = inputs.clone().detach().requires_grad_(True)
batch_y=y[batch_start:min(batch_start+batch_size, len(y))]
# transfering to GPU
inputs=inputs.to(device)
model=model.to(device)
# inference pass
outs = model(inputs)
# extracting embeddings
# note: convolutional outputs should be avg pooled for this to actually make sense
pooled_embeddings=torch.nn.functional.adaptive_avg_pool2d(outs['conv'], (1, 1))
conv_embeddings[batch_start:min(batch_start+batch_size, len(y)),:]=pooled_embeddings[:,:,0,0].cpu().detach().numpy()
embeddings[batch_start:min(batch_start+batch_size, len(y)),:]=outs['avgpool'][:,:,0,0].cpu().detach().numpy()
# computing prediction loss
loss = torch.nn.CrossEntropyLoss()
pred = outs['fc']
len_=pred.shape[0]
target=np.zeros((len_, 1000))
for i in range(len(pred)):
target[i,int(batch_y[i])]=1.
target=torch.tensor(target, requires_grad=True).to(device)
outloss = loss(pred, target)
# Storing predictions
softmaxf = torch.nn.Softmax(dim=1)
predictions[batch_start:min(batch_start+batch_size, len(y)),:]=softmaxf(pred).detach().cpu()
# Computing the gradients and storing them
grads_wrt_conv = torch.autograd.grad(outloss, outs['conv'], retain_graph=True)[0]
gradients_wrt_conv_layer[batch_start:min(batch_start+batch_size, len(y)),:,:,:] = grads_wrt_conv[:,:,:,:].cpu()
conv_maps[batch_start:min(batch_start+batch_size, len(y)),:,:,:] = outs['conv'].cpu().detach()
grads = torch.autograd.grad(outloss, outs['avgpool'], retain_graph=True)[0]
gradients[batch_start:min(batch_start+batch_size, len(y)),:] = grads[:,:,0,0].cpu()
batch_start += batch_size
print(f"gradients shape {gradients.shape}, conv_embs shape {conv_embeddings.shape}, conv_maps.shape {conv_maps.shape}")
"""
SAVE INTERMEDIATE RESULTS
"""
np.save(f"{SAVEFOLD}/predictions.npy", predictions)
np.save(f"{SAVEFOLD}/gradients_wrt_conv_layer.npy", gradients_wrt_conv_layer)
np.save(f"{SAVEFOLD}/conv_maps.npy", conv_maps)
|
lomahony/sw-interpretability
|
scripts/get_embeddings.py
|
get_embeddings.py
|
py
| 3,411 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "torch.device",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "my_datasets.transform_normalize",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "my_datasets.transform",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.adaptive_avg_pool2d",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "torch.nn.Softmax",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "torch.autograd.grad",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "torch.autograd",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "torch.autograd.grad",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "torch.autograd",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "numpy.save",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 79,
"usage_type": "call"
}
] |
39359091601
|
import time
from openpyxl import Workbook
from selenium import webdriver
import openpyxl
# from selenium.webdriver.common import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import StaleElementReferenceException
driver=webdriver.Chrome()
driver.get('https://www.homechoice.co.za/home')
driver.maximize_window()
driver.implicitly_wait(5)
searchbox=driver.find_element(By.ID,'CC-headerWidget-Search')
searchbox.send_keys("beds")
clickbtn=driver.find_element(By.ID,'searchSubmit').click()
filterbtn=driver.find_element(By.XPATH,'//span[contains(text(),"HomeChoice")]')
filterbtn.click()
bedProducts=driver.find_elements(By.XPATH,'//h3[contains(@itemprop,"name")]')
print("beds present in current page",len(bedProducts))
mybeds=[]
myprice=[]
for bed in bedProducts:
# print(bed.text)
mybeds.append(bed.text)
print("=*"*50)
time.sleep(2)
bedPrices=driver.find_elements(By.XPATH,'//div[@itemprop="cash-price"]')
my_element_id = '//span[contains(@id,"CC-product-price-max")]'
ignored_exceptions=(NoSuchElementException,StaleElementReferenceException)
print("prices present in a current page",len(bedPrices))
bedPrices = WebDriverWait(driver,10,ignored_exceptions=ignored_exceptions)\
.until(expected_conditions.presence_of_all_elements_located((By.XPATH, my_element_id)))
for price in bedPrices:
# print(price.text)
myprice.append(price.text)
finallist=zip(mybeds,myprice)
# for data in list(finallist):
# print(data)
print("part1 completed")
wb=Workbook()
wb["Sheet"].title="BEDS DATA"
sh1=wb.active
sh1.append(["name","price"])
for x in list(finallist):
sh1.append(x)
wb.save("beddetail.xlsx")
print("part2 is completed")
|
Paviterence/Selenium-Python-BasicCodes
|
webScrapping.py
|
webScrapping.py
|
py
| 1,886 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "selenium.common.exceptions.NoSuchElementException",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "selenium.common.exceptions.StaleElementReferenceException",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.wait.WebDriverWait",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.presence_of_all_elements_located",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "openpyxl.Workbook",
"line_number": 49,
"usage_type": "call"
}
] |
29099740995
|
from torch.utils.data import Dataset
from typing import List
import torch
import pandas as pd
class InferenceDataset(Dataset):
def __init__(self, texts: List[list], tokenizer, max_length: int):
self.texts = texts
self.tokenizer = tokenizer
self.max_length = max_length
def __len__(self):
return len(self.texts)
def __getitem__(self, item_index):
inputs = self.tokenizer.encode_plus(
text=self.texts[item_index],
max_length=self.max_length,
padding="max_length",
return_tensors="pt",
add_special_tokens=True,
truncation=True
)
return {"inputs_ids": inputs["input_ids"].flatten(),
"attention_mask": inputs["attention_mask"].flatten()}
class PairSarcasmDataset(Dataset):
def __init__(self, texts: list, text_pairs: list, targets: list, tokenizer, max_len):
self.texts = texts
self.text_pairs = text_pairs
self.targets = targets
self.tokenizer = tokenizer
self.max_len = max_len
def __len__(self):
return len(self.texts)
def __getitem__(self, item_index):
text = self.texts[item_index]
text_pair = self.text_pairs[item_index]
target = self.targets[item_index]
inputs_ids = self.tokenizer.encode_plus(text=text,
text_pair=text_pair,
add_special_tokens=True,
max_length=2 * self.max_len,
return_tensors="pt",
padding="max_length",
truncation=True,
return_token_type_ids=True).input_ids
inputs_ids = inputs_ids.flatten()
return {"inputs_ids": inputs_ids, "targets": torch.tensor(target)}
class MultiSarcasmDataset(Dataset):
def __init__(self, data: pd.DataFrame, label_columns, tokenizer, max_len):
self.data = data
self.tokenizer = tokenizer
self.max_len = max_len
self.label_columns = label_columns
def __len__(self):
return len(self.data)
def __getitem__(self, item_index):
data_row = self.data.iloc[item_index]
text = data_row.tweets
target = data_row[self.label_columns]
inputs_ids = self.tokenizer.encode_plus(text=text,
add_special_tokens=True,
max_length=self.max_len,
return_tensors="pt",
padding="max_length",
truncation=True,
return_token_type_ids=True).input_ids
inputs_ids = inputs_ids.flatten()
return {"inputs_ids": inputs_ids, "label_sarcasm": torch.tensor(target[0]),
"label_irony": torch.tensor(target[1]),
"label_satire": torch.tensor(target[2]),
"label_understatement": torch.tensor(target[3]),
"label_overstatement": torch.tensor(target[4]),
"label_rhetorical_question": torch.tensor(target[5])}
|
MaryNJ1995/Sarcasm_Detection
|
src/inference/dataset.py
|
dataset.py
|
py
| 3,398 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "torch.utils.data.Dataset",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "torch.tensor",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 91,
"usage_type": "call"
}
] |
825675496
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 10 04:27:29 2022
@author: ThinkPad
"""
from __future__ import print_function
import argparse
import os
import numpy as np
import random
import torch
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
from PartialScan import PartialScans,unpickle,inferencePartialScans
from model import feature_transform_regularizer
from pointnetCls import PointNetCls
import torch.nn.functional as F
from tqdm import tqdm
import random
from random import sample
# import open3d as o3d
from normalizeData import normalizePoints
def add_shape_arguments(parser):
parser.add_argument(
'--batchSize', type=int, default=3, help='input batch size')
parser.add_argument(
'--num_points', type=int, default=2500, help='input batch size')
parser.add_argument(
'--workers', type=int, help='number of data loading workers', default=2)
parser.add_argument(
'--nepoch', type=int, default=250, help='number of epochs to train for')
parser.add_argument('--outf', type=str, default='cls', help='output folder')
parser.add_argument('--model', type=str, default='', help='model path')
parser.add_argument('--checkpoint', type=str,
default='/gpfs/data/ssrinath/ychen485/TextCondRobotFetch/pointnet/cls/cls_model_10.pth',
help="checkpoint dir")
parser.add_argument('--feature_transform', action='store_true', help="use feature transform")
def inference(scanpoints, latentcode, classifier, opt, ref_paths):
opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
points_r = normalizePoints(scanpoints)
points = np.random.rand(3, 1024, 3)
if points_r.shape[0] < 1024:
return False
points[0] = points_r[0:1024, :]
haveTarget = False
classifier = classifier.eval()
latent_dim = 512
for j in range(5):
ischair = 0
for i in range(10):
latents = np.zeros((1, latent_dim))
latents[0] = latentcode[j]
for k, path in enumerate(sample(ref_paths, 2), 1):
data = np.load(path)
scanpoints = data['points_r']
# points_r = normalizePoints(scanpoints)
points_r = scanpoints
points[k] = points_r[0:1024, :]
points_torch = torch.from_numpy(points[:, 0:1024, :]).to(torch.float32)
points_torch = points_torch.transpose(2, 1)
z = torch.from_numpy(latents).to(torch.float32)
points_cuda, z = points_torch.cuda(), z.cuda()
with torch.no_grad():
pred, trans, trans_feat = classifier(points_cuda, z)
pred = pred[0]
pred = torch.nn.functional.softmax(pred, dim=1)
ischair = int((pred.data.max(0)[1][1] == 0).cpu()) + ischair
print(ischair)
if ischair - 4 > 0:
haveTarget = True
break
return haveTarget
def get_text_model(opt):
classifier = PointNetCls(k=2, feature_transform=opt.feature_transform)
checkpoint = torch.load(opt.checkpoint)
classifier.load_state_dict(checkpoint)
if torch.cuda.is_available():
classifier.cuda()
return classifier
if __name__ == "__main__":
parser = argparse.ArgumentParser()
add_shape_arguments(parser)
opt = parser.parse_args()
print(opt)
blue = lambda x: '\033[94m' + x + '\033[0m'
opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
latent_code = "/gpfs/data/ssrinath/ychen485/hyperPointnet/pointnet/03001627/ocnet_shapefeature_pc/embed_feats_train.pickle"
latent_code_test = "/gpfs/data/ssrinath/ychen485/hyperPointnet/pointnet/03001627/ocnet_shapefeature_pc/embed_feats_test.pickle"
latent_code_val = "/gpfs/data/ssrinath/ychen485/hyperPointnet/pointnet/03001627/ocnet_shapefeature_pc/embed_feats_val.pickle"
shape_folder = "/gpfs/data/ssrinath/ychen485/partialPointCloud/03001627"
latent_dim = 512
dataset = PartialScans(latentcode_dir = latent_code, shapes_dir = shape_folder)
test_dataset = PartialScans(latentcode_dir = latent_code_test, shapes_dir = shape_folder)
val_dataset = PartialScans(latentcode_dir = latent_code_val, shapes_dir = shape_folder)
inference_loader = inferencePartialScans(shapes_dir = "")
inferdataloader = torch.utils.data.DataLoader(
inference_loader,
batch_size=opt.batchSize,
shuffle=False,
num_workers=int(opt.workers))
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=opt.batchSize,
shuffle=True,
num_workers=int(opt.workers))
testdataloader = torch.utils.data.DataLoader(
test_dataset,
batch_size=opt.batchSize,
shuffle=True,
num_workers=int(opt.workers))
valdataloader = torch.utils.data.DataLoader(
val_dataset,
batch_size=opt.batchSize,
shuffle=True,
num_workers=int(opt.workers))
latent_dict = unpickle(latent_code)
keylist = list(latent_dict.keys())
latent_dict_test = unpickle(latent_code_test)
keylist_test = list(latent_dict_test.keys())
latent_dict_val = unpickle(latent_code_val)
keylist_val = list(latent_dict_val.keys())
print("train set lenth: "+ str(len(dataset)) +", test set length: "+ str(len(test_dataset)))
try:
os.makedirs(opt.outf)
except OSError:
pass
classifier = PointNetCls(k=2, feature_transform=opt.feature_transform)
if opt.checkpoint != " ":
checkpoint = torch.load(opt.checkpoint)
classifier.load_state_dict(checkpoint)
pass
classifier.cuda()
# # idx = random.randint(0, len(label) - 1)
# i = random.randint(0, 2)
# j = random.randint(0, 7)
# path = shape_folder + "/" + label[t_idx] + "/pointcloud" + str(j) + str(i) + "_partial.npz"
# data = np.load(path)
# scanpoints = data['points_r']
# # points_r = normalizePoints(scanpoints)
# points_r = scanpoints
# points[1] = points_r[0:1024, :]
#
# # idx = random.randint(0, len(label) - 1)
# i = random.randint(0, 2)
# j = random.randint(0, 7)
# path = shape_folder + "/" + label[t_idx] + "/pointcloud" + str(j) + str(i) + "_partial.npz"
# data = np.load(path)
# scanpoints = data['points_r']
# # points_r = normalizePoints(scanpoints)
# points_r = scanpoints
# points[2] = points_r[0:1024, :]
num_batch = len(dataset) / opt.batchSize
total_correct = 0
for epoch in range(1):
for i, data in enumerate(valdataloader, 0):
points_o, label = data
points = points_o[:,0:1024,:].to(torch.float32)
# print(points.shape)
points.to(torch.float32)
points = points.transpose(2, 1)
target_np = np.zeros((len(label),))
t_idx = random.randint(0,len(label)-1)
target_np[t_idx] = 1
target = torch.from_numpy(target_np).to(torch.int64)
latents = np.zeros((1, latent_dim))
latents[0] = latent_dict_val[label[t_idx]]
# for j in range(opt.batchSize):
# if target[j] == 1:
# latents[j] = latent_dict[label[j]]
# else:
# idx = random.randint(0,len(keylist))
# name = keylist[idx]
# while(name == label[j]):
# idx = random.randint(0,len(keylist))
# name = keylist[idx]
# latents[j] = latent_dict[name]
z = torch.from_numpy(latents).to(torch.float32)
points, target, z = points.cuda(), target.cuda(), z.cuda()
# optimizer.zero_grad()
classifier = classifier.train()
pred, trans, trans_feat = classifier(points, z)
# print(pred.shape)
pred = pred[0]
# loss = F.nll_loss(pred, target)
# if opt.feature_transform:
# loss += feature_transform_regularizer(trans_feat) * 0.001
# loss.backward()
# optimizer.step()
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(target.data).cpu().sum()
total_correct = total_correct + correct.item()
if i%100 == 0:
print('[%d: %d/%d] accuracy: %f' % (epoch, i, num_batch, total_correct / (100* opt.batchSize)))
total_correct = 0
print(pred,pred_choice)
# print(points)
# print("inferencing:" )
path = "testpoints.npz"
# path = "/gpfs/data/ssrinath/ychen485/partialPointCloud/03001627/ff9915c51ece4848cfc689934e433906/pointcloud70_partial.npz"
data = np.load(path)
# lst = data.files
scanpoints = data['points_r']
# pcd1 = o3d.io.read_point_cloud(path)
# scanpoints = np.asarray(pcd1.points)
# print(scanpoints.shape)
points_r = normalizePoints(scanpoints)
# points_r = scanpoints
# points_o[2] = points[0:1024,:]
points = np.random.rand(3,1024,3)
# points[0] = points_r[0:1024,:]
points[0] = points_r[0:1024,:]
idx = random.randint(0,len(label)-1)
i = random.randint(0,2)
j = random.randint(0,7)
path = shape_folder +"/" + label[t_idx] + "/pointcloud"+str(j)+str(i)+"_partial.npz"
# path = "/gpfs/data/ssrinath/ychen485/partialPointCloud/03001627/ff9915c51ece4848cfc689934e433906/pointcloud41_partial.npz"
data = np.load(path)
# lst = data.files
scanpoints = data['points_r']
# pcd1 = o3d.io.read_point_cloud(path)
# scanpoints = np.asarray(pcd1.points)
# print(scanpoints.shape)
points_r = normalizePoints(scanpoints)
points_r = scanpoints
# points_o[2] = points[0:1024,:]
# points = np.zeros((3,1024,3))
# points[0] = points_r[0:1024,:]
points[1] = points_r[0:1024,:]
idx = random.randint(0,len(label)-1)
i = random.randint(0,2)
j = random.randint(0,7)
path = shape_folder +"/" + label[t_idx] + "/pointcloud"+str(j)+str(i)+"_partial.npz"
# path = "/gpfs/data/ssrinath/ychen485/partialPointCloud/03001627/589e717feb809e7c1c5a16cc04345597/pointcloud62_partial.npz"
data = np.load(path)
# lst = data.files
scanpoints = data['points_r']
# pcd1 = o3d.io.read_point_cloud(path)
# scanpoints = np.asarray(pcd1.points)
# print(scanpoints.shape)
points_r = normalizePoints(scanpoints)
points_r = scanpoints
# points_o[2] = points[0:1024,:]
# points = np.zeros((3,1024,3))
# points[0] = points_r[0:1024,:]
points[2] = points_r[0:1024,:]
# from torch.autograd import Variable
# sim_data = Variable(torch.rand(32,3,1024))
# print(points)
# print(points_o)
points = torch.from_numpy(points[:,0:1024,:]).to(torch.float32)
points.to(torch.float32)
# print(points)
points = points.transpose(2, 1)
# print(points)
latents = np.zeros((1, latent_dim))
latents[0] = latent_dict['46323c7986200588492d9da2668ec34c']
z = torch.from_numpy(latents).to(torch.float32)
# print(z)
points, target, z = points.cuda(), target.cuda(), z.cuda()
classifier = classifier.eval()
pred, trans, trans_feat = classifier(points, z)
pred = pred[0]
pred_choice = pred.data.max(1)[1]
print(torch.exp(pred),pred_choice)
latents[0] = latent_dict_val['ba673ea75085e46cbfd72d7396bc040a']
z = torch.from_numpy(latents).to(torch.float32)
points, target, z = points.cuda(), target.cuda(), z.cuda()
classifier = classifier.train()
pred, trans, trans_feat = classifier(points, z)
pred = pred[0]
pred_choice = pred.data.max(1)[1]
print(torch.exp(pred),pred_choice)
latents[0] = latent_dict_test['ff9915c51ece4848cfc689934e433906']
z = torch.from_numpy(latents).to(torch.float32)
points, target, z = points.cuda(), target.cuda(), z.cuda()
classifier = classifier.train()
pred, trans, trans_feat = classifier(points, z)
pred = pred[0]
pred_choice = pred.data.max(1)[1]
print(torch.exp(pred),pred_choice)
latents[0] = latent_dict_test['fc07472e4dd1b6698ae97f14e63e7e01']
z = torch.from_numpy(latents).to(torch.float32)
points, target, z = points.cuda(), target.cuda(), z.cuda()
classifier = classifier.train()
pred, trans, trans_feat = classifier(points, z)
pred = pred[0]
pred_choice = pred.data.max(1)[1]
print(torch.exp(pred),pred_choice)
latents[0] = latent_dict['3bd437d38068f4a61f285be552b78f9a']
latents[0] = (np.load('../language2shape/results/shape_0032.npy')[2])
z = torch.from_numpy(latents).to(torch.float32)
# print(z)
points, target, z = points.cuda(), target.cuda(), z.cuda()
classifier = classifier.eval()
pred, trans, trans_feat = classifier(points, z)
pred = pred[0]
pred_choice = pred.data.max(1)[1]
path = "testpoints.npz"
# path = "/gpfs/data/ssrinath/ychen485/partialPointCloud/03001627/ff9915c51ece4848cfc689934e433906/pointcloud70_partial.npz"
data = np.load(path)
# lst = data.files
scanpoints = data['points_r']
# pcd1 = o3d.io.read_point_cloud(path)
# scanpoints = np.asarray(pcd1.points)
# print(scanpoints.shape)
points_r = normalizePoints(scanpoints)
inference(scanpoints, np.load('../language2shape/results/shape_0032.npy'),classifier)
|
FreddieRao/TextCondRobotFetch
|
pointnet/inference.py
|
inference.py
|
py
| 13,605 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "random.randint",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "torch.manual_seed",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "normalizeData.normalizePoints",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.random.rand",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "torch.from_numpy",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "torch.no_grad",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.softmax",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "pointnetCls.PointNetCls",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "torch.manual_seed",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "PartialScan.PartialScans",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "PartialScan.PartialScans",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "PartialScan.PartialScans",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "PartialScan.inferencePartialScans",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "PartialScan.unpickle",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "PartialScan.unpickle",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "PartialScan.unpickle",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "pointnetCls.PointNetCls",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 200,
"usage_type": "attribute"
},
{
"api_name": "torch.float32",
"line_number": 202,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "torch.int64",
"line_number": 207,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "numpy.load",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "normalizeData.normalizePoints",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "numpy.random.rand",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 258,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "normalizeData.normalizePoints",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "normalizeData.normalizePoints",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 310,
"usage_type": "attribute"
},
{
"api_name": "torch.float32",
"line_number": 311,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 317,
"usage_type": "attribute"
},
{
"api_name": "torch.exp",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 327,
"usage_type": "attribute"
},
{
"api_name": "torch.exp",
"line_number": 333,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 336,
"usage_type": "attribute"
},
{
"api_name": "torch.exp",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 345,
"usage_type": "attribute"
},
{
"api_name": "torch.exp",
"line_number": 351,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 355,
"usage_type": "attribute"
},
{
"api_name": "numpy.load",
"line_number": 364,
"usage_type": "call"
},
{
"api_name": "normalizeData.normalizePoints",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 373,
"usage_type": "call"
}
] |
12902368672
|
#!/usr/bin/python3
import sqlite3
import gspread
from oauth2client.service_account import ServiceAccountCredentials
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
dbfile = 'TimeTrack4237.db'
dbconn = sqlite3.connect(dbfile)
student_hours = None
with dbconn:
dbcursor = dbconn.cursor()
dbcursor.execute("SELECT name, SUM( ROUND( CAST( (JULIANDAY(checkout) - JULIANDAY(checkin)) * 24 AS REAL), 2)) \
FROM activity, students \
WHERE activity.id = students.id \
AND checkin IS NOT NULL \
AND checkout IS NOT NULL \
GROUP BY name \
ORDER BY name")
student_hours = dbcursor.fetchall()
if student_hours is not None:
credentials = ServiceAccountCredentials.from_json_keyfile_name('credentials/timetrack4237-12f97a6ef02f.json', scope)
gc = gspread.authorize(credentials)
workbook = gc.open("TimeTrack4237")
workbook.sheet1.clear()
workbook.values_update(
'Sheet1!A1',
params={'valueInputOption': 'RAW'},
body={'values': student_hours}
)
|
washide/TimeTrack4237
|
UploadTotalHours.py
|
UploadTotalHours.py
|
py
| 1,173 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sqlite3.connect",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "oauth2client.service_account.ServiceAccountCredentials",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "gspread.authorize",
"line_number": 30,
"usage_type": "call"
}
] |
70357157629
|
# coding=utf-8
import numpy as np
import matplotlib.pyplot as plt
MapL = 15 # Chessboard size
WinN = 5 # "Five"-in-a-row
step = 0 # Steps taken
steps = [] # Coordinates of each step
end_flag = 0 # Game end flag
board = np.zeros((MapL,MapL),dtype=np.int64) # chessboard
mode = 4 # modes: 0:player-player, 1:PC-player, 2:player-PC, 3:PC-PC
# parameters 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
coeffs = [np.array([[-12, 0,-35,-15,-34,-25,-1000,-45,-1000,-30,-30,-1000,-9500,-9500,-9500,-9500,-9500,-9500,-9500,-90000],
[ 10, 3, 30, 15, 29, 12, 190, 55, 180, 20, 20, 4000, 140, 135, 130, 130, 200, 135, 135, 90000]]),
np.array([[-15, 0,-35,-15,-34,-25,-1000,-40,-1000,-30,-30,-1000,-9500,-9500,-9500,-9500,-9500,-9500,-9500,-30000],
[ 10,10, 30, 15, 29, 12, 195, 50, 180, 20, 20, 4000, 140, 135, 130, 130, 200, 135, 135, 40000]])]
#numsall = np.zeros((2,len(coeffs[0][0])))
def judge(l,c,winn):
'''judge if a player wins by taking a move (l,c)'''
# line #
count = 0
i = 0
while i < MapL-1 and count < WinN-1:
if board[l][i] and board[l][i] == board[l][i+1]:
count += 1
else: count = 0
i += 1
if count == WinN-1: return 1
# column #
count = 0
i = 0
while i < MapL-1 and count < WinN-1:
if board[i][c] and board[i][c] == board[i+1][c]:
count += 1
else: count = 0
i += 1
if count == WinN-1: return 1
# Principal diagonal #
count = 0
i = 0
l_ = l - min(l,c); c_ = c - min(l,c)
while i+l_<MapL-1 and i+c_<MapL-1 and count<WinN-1:
if board[i+l_][i+c_] and board[i+l_+1][i+c_+1] == board[i+l_][i+c_]:
count += 1
else: count = 0
i += 1
if count == WinN-1: return 1
# Subdiagonal #
count = 0
i = 0
while c > 0 and l < MapL-1:
l += 1; c -= 1
while l-i>0 and i+c<MapL and count<WinN-1:
if board[-i+l][i+c] and board[-i+l-1][i+c+1] == board[-i+l][i+c]:
count += 1
else: count = 0
i += 1
if count == WinN-1: return 1
return 0
def auto(player=2,coeff=0):
'''computer's move'''
max_score = -np.inf
ymax = 1; xmax = 1
# Calculate the scores at each point
for y in range(MapL):
for x in range(MapL):
if not board[y][x]:
cd = abs(y-MapL/2+0.5) + abs(x-MapL/2+0.5)
if (not step and cd>3) or (step and not np.any(board[max(y-2,0):min(y+3,MapL),max(x-2,0):min(x+3,MapL)])):
score = -np.inf
#print(" ",end='')
else:
board[y][x] = player
scores = score_calc(coeffs[coeff],player)
score = scores[0] - cd + np.random.randint(-6,5) # my score in this move
score_opp = scores[1] # the opponent's score in this move
board[y][x] = 3 - player
score2 = score_calc(coeffs[coeff],player)[0] # my score if the opponent take this move
# Treatment of 33, 34 and 44
if coeffs[coeff][0][12]*3<score_opp<coeffs[coeff][0][6]*0.5+coeffs[coeff][0][12]:
score -= coeffs[coeff][0][6]
if 1.5<score2/coeffs[coeff][0][6]<2.5:
score -= coeffs[coeff][0][6]*0.25
elif 1.9<score2/coeffs[coeff][0][12]<2.1 or 0.5<(score2-coeffs[coeff][0][12])/coeffs[coeff][0][6]<1.5:
score -= coeffs[coeff][0][6]*0.5
elif 0.5<score2/coeffs[coeff][0][19]<3.5:
score -= coeffs[coeff][0][12]
#print('%5d' % score,end='')
if max_score < score:
max_score = score; ymax = y+1; xmax = x+1
board[y][x] = 0
else: pass
#print(' ['+'%s'%chr(21*board[y][x]+45)+']',end='')
#print("")
#print("B:",end='')
#for j in range(len(numsall[0])): print('%2d'%int(numsall[0][j]),end=' ')
#print("\nW:",end='')
#for j in range(len(numsall[0])): print('%2d'%int(numsall[1][j]),end=' ')
#print("")
return ymax,xmax
def score_calc(coeff,player=2):
'''calculate total score'''
nums = np.zeros((2,len(coeffs[0][0])))
def one_calc(a):
'''calculate each list'''
l = len(a)
a = a.tolist()
for i in range(l-2):
if a[i:i+3]==[0,1,0]: nums[0][0]+=1
elif a[i:i+3]==[2,1,0] or a[i:i+3]==[0,1,2]: nums[0][1]+=1
elif a[i:i+3]==[0,2,0]: nums[1][0]+=1
elif a[i:i+3]==[1,2,0] or a[i:i+3]==[0,2,1]: nums[1][1]+=1
for i in range(l-3):
if a[i:i+4]==[0,1,1,0]: nums[0][2]+=1
elif a[i:i+4]==[2,1,1,0] or a[i:i+4]==[0,1,1,2]: nums[0][3]+=1
elif a[i:i+4]==[0,2,2,0]: nums[1][2]+=1
elif a[i:i+4]==[1,2,2,0] or a[i:i+4]==[0,2,2,1]: nums[1][3]+=1
for i in range(l-4):
if a[i:i+5]==[0,1,0,1,0]: nums[0][4]+=1
elif a[i:i+5]==[0,1,0,1,2] or a[i:i+5]==[2,1,0,1,0]: nums[0][5]+=1
elif a[i:i+5]==[0,1,1,1,0]: nums[0][6]+=1
elif a[i:i+5]==[0,1,1,1,2] or a[i:i+5]==[2,1,1,1,0]: nums[0][7]+=1
elif a[i:i+5]==[1,1,1,1,1]: nums[0][-1]+=1
elif a[i:i+5]==[0,2,0,2,0]: nums[1][4]+=1
elif a[i:i+5]==[0,2,0,2,1] or a[i:i+5]==[1,2,0,2,0]: nums[1][5]+=1
elif a[i:i+5]==[0,2,2,2,0]: nums[1][6]+=1
elif a[i:i+5]==[0,2,2,2,1] or a[i:i+5]==[1,2,2,2,0]: nums[1][7]+=1
elif a[i:i+5]==[2,2,2,2,2]: nums[1][-1]+=1
if l>=6:
for i in range(l-5):
if a[i:i+6]==[0,1,0,1,1,0] or a[i:i+6]==[0,1,1,0,1,0]: nums[0][8]+=1
elif a[i:i+6]==[2,1,0,1,1,0] or a[i:i+6]==[0,1,1,0,1,2]: nums[0][9]+=1
elif a[i:i+6]==[2,1,1,0,1,0] or a[i:i+6]==[0,1,0,1,1,2]: nums[0][10]+=1
elif a[i:i+6]==[0,1,1,1,1,0]: nums[0][11]+=1
elif a[i:i+6]==[2,1,1,1,1,0] or a[i:i+6]==[0,1,1,1,1,2]: nums[0][12]+=1
elif a[i:i+6]==[1,1,1,0,1,1] or a[i:i+6]==[1,1,0,1,1,1]: nums[0][13]+=1
elif a[i:i+6]==[0,2,0,2,2,0] or a[i:i+6]==[0,2,2,0,2,0]: nums[1][8]+=1
elif a[i:i+6]==[1,2,0,2,2,0] or a[i:i+6]==[0,2,2,0,2,1]: nums[1][9]+=1
elif a[i:i+6]==[0,2,2,0,2,1] or a[i:i+6]==[0,2,0,2,2,1]: nums[1][10]+=1
elif a[i:i+6]==[0,2,2,2,2,0]: nums[1][11]+=1
elif a[i:i+6]==[1,2,2,2,2,0] or a[i:i+6]==[0,2,2,2,2,1]: nums[1][12]+=1
elif a[i:i+6]==[2,2,2,0,2,2] or a[i:i+6]==[2,2,0,2,2,2]: nums[1][13]+=1
if l>=7:
for i in range(l-6):
if a[i:i+7]==[0,1,1,1,0,1,0] or a[i:i+7]==[0,1,0,1,1,1,0]: nums[0][16]+=1
elif a[i:i+7]==[2,1,1,0,1,1,2] or a[i:i+7]==[2,1,0,1,1,1,2] or a[i:i+7]==[2,1,1,1,0,1,2]: nums[0][13]+=1
elif a[i:i+7]==[2,1,1,0,1,1,0] or a[i:i+7]==[0,1,1,0,1,1,2]: nums[0][14]+=1
elif a[i:i+7]==[0,1,1,0,1,1,0] or a[i:i+7]==[0,1,1,1,0,1,2] or a[i:i+7]==[2,1,0,1,1,1,0]: nums[0][15]+=1
elif a[i:i+7]==[0,1,0,1,1,1,2] or a[i:i+7]==[2,1,1,1,0,1,0]: nums[0][17]+=1
elif a[i:i+7]==[0,2,2,2,0,2,0] or a[i:i+7]==[0,2,0,2,2,2,0]: nums[1][16]+=1
elif a[i:i+7]==[1,2,2,0,2,2,1] or a[i:i+7]==[1,2,0,2,2,2,1] or a[i:i+7]==[1,2,2,2,0,2,1]: nums[1][13]+=1
elif a[i:i+7]==[1,2,2,0,2,2,0] or a[i:i+7]==[0,2,2,0,2,2,1]: nums[1][14]+=1
elif a[i:i+7]==[0,2,2,0,2,2,0] or a[i:i+7]==[0,2,2,2,0,2,1] or a[i:i+7]==[1,2,0,2,2,2,0]: nums[1][15]+=1
elif a[i:i+7]==[0,2,0,2,2,2,1] or a[i:i+7]==[1,2,2,2,0,2,0]: nums[1][17]+=1
for i in range(MapL):
# Calculate row and column
one_calc(board[i])
one_calc(board[:,i])
for i in range(-MapL+5,MapL-4):
# Calculate the main and sub diagonals
one_calc(np.diag(board,i))
one_calc(np.diag(np.flip(board,axis=0),i))
nums[:,0] -= nums[:,4]*2 + nums[:,8]+nums[:,10]+nums[:,16]+nums[:,17]
nums[:,1] -= nums[:,5] + nums[:,9]
nums[:,2] -= nums[:,8] + nums[:,9]+nums[:,14]+nums[:,15]
nums[:,3] -= nums[:,10] + nums[:,14]
nums[:,6] -= nums[:,15] + nums[:,16]
nums[:,7] -= nums[:,17]
#global numsall
#numsall = nums
if player==2:
return np.sum(nums*coeff), np.sum(nums*np.flip(coeff,axis=0))
else:
return np.sum(nums*np.flip(coeff,axis=0)), np.sum(nums*coeff)
def button(event):
'''event handler & modes'''
if not end_flag:
try:
if mode == 0:
move(round(event.ydata),round(event.xdata))
elif mode == 1:
if not step % 2: y,x = auto(1,1); move(y,x) # auto(1-B 2-W, 0-Old 1-New)
else: move(round(event.ydata),round(event.xdata))
elif mode == 2:
if not step % 2: move(round(event.ydata),round(event.xdata))
else: y,x = auto(2); move(y,x)
elif mode == 3:
if not step % 2: y,x = auto(1); move(y,x)
else: y,x = auto(2,1); move(y,x)
except: pass
def move(i,j):
'''take a move'''
global step,board,end_flag
if step == MapL**2: end_flag = 2
try:
if not board[i-1][j-1]:
board[i-1][j-1] = step%2 + 1
step += 1
steps.append([i,j])
if judge(i-1,j-1,WinN): end_flag = 1
show()
except: pass
def show():
'''show the chessboard'''
global step,board
colors = ['w','k','w']
names = ['player','PC']
adsize = 0 if mode == 3 else step % 2
plt.clf()
fig = plt.figure(num=1)
mngr = plt.get_current_fig_manager()
mngr.window.setGeometry(0+adsize,30,701+adsize,701) # position and size of the window
fig.canvas.mpl_connect('button_press_event', button)
plt.xlim(0.5,MapL+0.5); plt.ylim(0.5,MapL+0.5)
for i in range(MapL):
for j in range(MapL):
if board[i][j]:
plt.scatter(j+1,i+1,
c=colors[board[i][j]],s=520*12/(MapL-1),
linewidths=1,edgecolors='k',zorder=128)
if step:
plt.scatter(steps[-1][1],steps[-1][0],s=100,c='r',lw=5,marker='+',zorder=256)
if MapL==15:
plt.scatter([4,4,8,12,12],[4,12,8,4,12], c='k',s=10,zorder=2)
else:
plt.scatter([4,4,MapL-3,MapL-3],[4,MapL-3,4,MapL-3], c='k',s=10,zorder=2)
plt.plot([1,1,MapL,MapL,1],[1,MapL,MapL,1,1],c='k',lw=1)
plt.fill([1,MapL,MapL,1],[1,1,MapL,MapL],c='tan',alpha=0.5,zorder=0)
plt.fill([-MapL,2*MapL,2*MapL,-MapL],[-MapL,-MapL,2*MapL,2*MapL],c='tan',alpha=0.4,zorder=1)
plt.grid(True,ls='--',c='k',zorder=1)
plt.text(MapL/2,MapL+1.5,
"Step:"+str(step)+" Black:"+names[mode & 1]+" "+str(result[0])+":"+str(result[1])+" White:"+names[(mode&2)//2],
fontsize=15,ha="center")
ax = plt.gca()
ax.set_xticks(range(1,MapL+1))
ax.set_yticks(range(1,MapL+1))
for edge in ['left','right','top','bottom']:
ax.spines[edge].set_visible(False)
if end_flag:
if end_flag == 2:
string = "Draw!"
else:
string = "Black Wins" if step%2 else "White Wins"
plt.text(MapL/2+0.5,MapL+0.5,string,fontsize=20,c='r',va="center",ha="center")
if mode & (step % 2 + 1):
if not step: plt.pause(0.01)
fig.canvas.draw_idle()
fig.canvas.start_event_loop(0.1)
if not end_flag: plt.clf()
button(1)
else:
plt.show()
def init():
'''Initialization interface'''
def choice(event):
global mode
mode = 4 - round(event.ydata)
if mode in [0,1,2,3] and 2.3 < event.xdata < 7.7: plt.close(0)
fig = plt.figure(num=0)
mngr = plt.get_current_fig_manager()
mngr.window.setGeometry(100,100,600,600)
fig.canvas.mpl_connect('button_press_event', choice)
plt.xlim(0,10); plt.ylim(0,10)
plt.xticks([]); plt.yticks([])
plt.text(5,8," Gobang ",fontsize=25,color="w",bbox=(dict(fc="k",alpha=0.5)), va="center",ha="center")
plt.text(5,5.7,"Click the chessboard to play.\n Close the chessboard to refresh\n or start a new game.",fontsize=13,va="center",ha="center")
plt.text(5,4,'● player vs ○ player',fontsize=15,bbox=dict(fc=(1, 0.85, 0.7)),va="center",ha="center")
plt.text(5,2,'● player vs ○ PC ', fontsize=15,bbox=dict(fc=(1, 0.85, 0.7)),va="center",ha="center")
plt.text(5,3,'● PC vs ○ player', fontsize=15,bbox=dict(fc=(1, 0.85, 0.7)),va="center",ha="center")
plt.text(5,1,'● PC vs ○ PC ', fontsize=15,bbox=dict(fc=(1, 0.85, 0.7)),va="center",ha="center")
img = plt.imread("go.jpg")
plt.imshow(img,extent=[0,10,5,10])
plt.show()
if mode == 4: exit()
if __name__ == "__main__":
result = [0,0]
init()
while 1:
show()
if end_flag:
if end_flag == 2: pass
elif step % 2: result[0] += 1
else: result[1] += 1
end_flag = 0
step = 0
steps.clear()
board[board != 0] = 0
print("\n----- SCORE -----\nBlack",result[0],'-',result[1],"White\n"+"-"*17)
|
BetaGem/Games
|
gobang.py
|
gobang.py
|
py
| 13,951 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "numpy.zeros",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.int64",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "numpy.any",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "numpy.diag",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "numpy.diag",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "numpy.flip",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "numpy.flip",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "numpy.flip",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 232,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 233,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.get_current_fig_manager",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 234,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 237,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 241,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 245,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 247,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 251,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.fill",
"line_number": 252,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 252,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.fill",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 253,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.grid",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 255,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 256,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.gca",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 259,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 269,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.pause",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 272,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 275,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 278,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 285,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 286,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.get_current_fig_manager",
"line_number": 287,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 287,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 291,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 292,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 293,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 294,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 295,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 296,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 297,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.text",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 298,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imread",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 299,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 300,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 301,
"usage_type": "name"
}
] |
9752254935
|
import functools
from flask_login import current_user, LoginManager
from flask import session
from src.model import UserModel
login_manager = LoginManager()
def roles_allowed(func=None, roles=None):
"""
Check if the user has at least one required role
:param func: the function to decorate
:param roles: an array of allowed roles
"""
if not func:
return functools.partial(roles_allowed, roles=roles)
@functools.wraps(func)
def f(*args, **kwargs):
role = session.get("ROLE")
if not any(role in s for s in roles):
return login_manager.unauthorized()
return func(*args, **kwargs)
return f
@login_manager.user_loader
def load_user(user_id):
# user = User.query.get(user_id)
if "current_user" in session:
user = UserModel()
user.fill_from_json(session["current_user"])
user.set_authenticated(True)
return user
return None
|
GreyTeam2020/GoOutSafe_microservice
|
gateway/src/auth.py
|
auth.py
|
py
| 949 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "flask_login.LoginManager",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask.session.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "functools.wraps",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "src.model.UserModel",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 34,
"usage_type": "name"
}
] |
14374651405
|
"""Bridgy App Engine config.
"""
import logging
class StubsFilter(logging.Filter):
"""Suppress these INFO logs:
Sandbox prevented access to file "/usr/local/Caskroom/google-cloud-sdk"
If it is a static file, check that `application_readable: true` is set in your app.yaml
"""
def filter(self, record):
msg = record.getMessage()
if (msg.startswith('Sandbox prevented access to file')
or msg.startswith('If it is a static file, check that')):
return 0
return 1
logging.getLogger().addFilter(StubsFilter())
|
snarfed/bridgy-fed
|
appengine_config.py
|
appengine_config.py
|
py
| 580 |
python
|
en
|
code
| 219 |
github-code
|
6
|
[
{
"api_name": "logging.Filter",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 19,
"usage_type": "call"
}
] |
20519423740
|
"""!
@brief Examples of usage and demonstration of abilities of K-Medoids algorithm in cluster analysis.
@authors Andrei Novikov ([email protected])
@date 2014-2020
@copyright BSD-3-Clause
"""
from pyclustering.samples.definitions import SIMPLE_SAMPLES, FCPS_SAMPLES
from pyclustering.cluster import cluster_visualizer
from pyclustering.cluster.kmedoids import kmedoids
from pyclustering.utils import read_sample, calculate_distance_matrix
from pyclustering.utils import timedcall, distance_metric, type_metric
def template_clustering(start_medoids, path, tolerance=0.25, show=True, **kwargs):
ccore = kwargs.get('ccore', True)
data_type = kwargs.get('data_type', 'points')
original_data = read_sample(path)
sample = original_data
if data_type == 'distance_matrix':
sample = calculate_distance_matrix(sample)
metric = distance_metric(type_metric.EUCLIDEAN_SQUARE, data=sample)
kmedoids_instance = kmedoids(sample, start_medoids, tolerance, metric=metric, ccore=ccore, data_type=data_type)
(ticks, result) = timedcall(kmedoids_instance.process)
clusters = kmedoids_instance.get_clusters()
print("Iterations:", kmedoids_instance.get_iterations())
print([len(cluster) for cluster in clusters])
print(clusters)
medoids = kmedoids_instance.get_medoids()
print("Sample: ", path, "\t\tExecution time: ", ticks, "\n")
if show is True:
visualizer = cluster_visualizer(1)
visualizer.append_clusters(clusters, original_data, 0)
visualizer.append_cluster([original_data[index] for index in start_medoids], marker='*', markersize=15)
visualizer.append_cluster(medoids, data=original_data, marker='*', markersize=15)
visualizer.show()
return original_data, clusters
def cluster_sample1():
template_clustering([2, 9], SIMPLE_SAMPLES.SAMPLE_SIMPLE1)
def cluster_sample2():
template_clustering([3, 12, 20], SIMPLE_SAMPLES.SAMPLE_SIMPLE2)
def cluster_sample3():
template_clustering([4, 12, 25, 37], SIMPLE_SAMPLES.SAMPLE_SIMPLE3)
def cluster_sample4():
template_clustering([4, 15, 30, 40, 50], SIMPLE_SAMPLES.SAMPLE_SIMPLE4)
def cluster_sample5():
template_clustering([4, 18, 34, 55], SIMPLE_SAMPLES.SAMPLE_SIMPLE5)
def cluster_elongate():
template_clustering([8, 56], SIMPLE_SAMPLES.SAMPLE_ELONGATE)
def cluster_lsun():
template_clustering([10, 275, 385], FCPS_SAMPLES.SAMPLE_LSUN)
def cluster_target():
template_clustering([10, 160, 310, 460, 560, 700], FCPS_SAMPLES.SAMPLE_TARGET)
def cluster_two_diamonds():
template_clustering([10, 650], FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS)
def cluster_wing_nut():
template_clustering([19, 823], FCPS_SAMPLES.SAMPLE_WING_NUT)
def cluster_chainlink():
template_clustering([30, 900], FCPS_SAMPLES.SAMPLE_CHAINLINK)
def cluster_hepta():
template_clustering([0, 35, 86, 93, 125, 171, 194], FCPS_SAMPLES.SAMPLE_HEPTA)
def cluster_tetra():
template_clustering([0, 131, 214, 265], FCPS_SAMPLES.SAMPLE_TETRA)
def cluster_atom():
template_clustering([0, 650], FCPS_SAMPLES.SAMPLE_ATOM)
def cluster_engy_time():
template_clustering([10, 3000], FCPS_SAMPLES.SAMPLE_ENGY_TIME)
def display_fcps_clustering_results():
(lsun, lsun_clusters) = template_clustering([10, 275, 385], FCPS_SAMPLES.SAMPLE_LSUN, 0.1, False)
(target, target_clusters) = template_clustering([10, 160, 310, 460, 560, 700], FCPS_SAMPLES.SAMPLE_TARGET, 0.1, False)
(two_diamonds, two_diamonds_clusters) = template_clustering([10, 650], FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, 0.1, False)
(wing_nut, wing_nut_clusters) = template_clustering([19, 823], FCPS_SAMPLES.SAMPLE_WING_NUT, 0.1, False)
(chainlink, chainlink_clusters) = template_clustering([30, 900], FCPS_SAMPLES.SAMPLE_CHAINLINK, 0.1, False)
(hepta, hepta_clusters) = template_clustering([0, 35, 86, 93, 125, 171, 194], FCPS_SAMPLES.SAMPLE_HEPTA, 0.1, False)
(tetra, tetra_clusters) = template_clustering([0, 131, 214, 265], FCPS_SAMPLES.SAMPLE_TETRA, 0.1, False)
(atom, atom_clusters) = template_clustering([0, 650], FCPS_SAMPLES.SAMPLE_ATOM, 0.1, False)
visualizer = cluster_visualizer(8, 4)
visualizer.append_clusters(lsun_clusters, lsun, 0)
visualizer.append_clusters(target_clusters, target, 1)
visualizer.append_clusters(two_diamonds_clusters, two_diamonds, 2)
visualizer.append_clusters(wing_nut_clusters, wing_nut, 3)
visualizer.append_clusters(chainlink_clusters, chainlink, 4)
visualizer.append_clusters(hepta_clusters, hepta, 5)
visualizer.append_clusters(tetra_clusters, tetra, 6)
visualizer.append_clusters(atom_clusters, atom, 7)
visualizer.show()
cluster_sample1()
cluster_sample2()
cluster_sample3()
cluster_sample4()
cluster_sample5()
cluster_elongate()
cluster_lsun()
cluster_target()
cluster_two_diamonds()
cluster_wing_nut()
cluster_chainlink()
cluster_hepta()
cluster_tetra()
cluster_atom()
cluster_engy_time()
display_fcps_clustering_results()
|
annoviko/pyclustering
|
pyclustering/cluster/examples/kmedoids_examples.py
|
kmedoids_examples.py
|
py
| 5,155 |
python
|
en
|
code
| 1,113 |
github-code
|
6
|
[
{
"api_name": "pyclustering.utils.read_sample",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pyclustering.utils.calculate_distance_matrix",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pyclustering.utils.distance_metric",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pyclustering.utils.type_metric.EUCLIDEAN_SQUARE",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "pyclustering.utils.type_metric",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "pyclustering.cluster.kmedoids.kmedoids",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pyclustering.utils.timedcall",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pyclustering.cluster.cluster_visualizer",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pyclustering.samples.definitions.SIMPLE_SAMPLES.SAMPLE_SIMPLE1",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "pyclustering.samples.definitions.SIMPLE_SAMPLES",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "pyclustering.samples.definitions.SIMPLE_SAMPLES.SAMPLE_SIMPLE2",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "pyclustering.samples.definitions.SIMPLE_SAMPLES",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "pyclustering.samples.definitions.SIMPLE_SAMPLES.SAMPLE_SIMPLE3",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "pyclustering.samples.definitions.SIMPLE_SAMPLES",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "pyclustering.samples.definitions.SIMPLE_SAMPLES.SAMPLE_SIMPLE4",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "pyclustering.samples.definitions.SIMPLE_SAMPLES",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "pyclustering.samples.definitions.SIMPLE_SAMPLES.SAMPLE_SIMPLE5",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "pyclustering.samples.definitions.SIMPLE_SAMPLES",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "pyclustering.samples.definitions.SIMPLE_SAMPLES.SAMPLE_ELONGATE",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "pyclustering.samples.definitions.SIMPLE_SAMPLES",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES.SAMPLE_LSUN",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES.SAMPLE_TARGET",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES.SAMPLE_WING_NUT",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES.SAMPLE_CHAINLINK",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES.SAMPLE_HEPTA",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES.SAMPLE_TETRA",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES.SAMPLE_ATOM",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES.SAMPLE_ENGY_TIME",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES.SAMPLE_LSUN",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES.SAMPLE_TARGET",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES.SAMPLE_WING_NUT",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES.SAMPLE_CHAINLINK",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES.SAMPLE_HEPTA",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES.SAMPLE_TETRA",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES.SAMPLE_ATOM",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "pyclustering.samples.definitions.FCPS_SAMPLES",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "pyclustering.cluster.cluster_visualizer",
"line_number": 107,
"usage_type": "call"
}
] |
73928041148
|
from pyvi import window
from pyvi.modes import normal
class Editor(object):
_command = None
active_tab = None
def __init__(self, tabs=None, config=None, normal=normal):
self.config = config
self.mode = self.normal = normal
self.count = None
if tabs is None:
tabs = self.tabs = [window.Tab(self)]
else:
tabs = self.tabs = list(tabs)
if tabs:
self.active_tab = tabs[0]
@property
def active_window(self):
return self.active_tab.active_window
def keypress(self, keys):
return self.mode.keypress(self, keys)
|
Julian/PyVi
|
pyvi/editor.py
|
editor.py
|
py
| 635 |
python
|
en
|
code
| 11 |
github-code
|
6
|
[
{
"api_name": "pyvi.modes.normal",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pyvi.modes.normal",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "pyvi.window.Tab",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pyvi.window",
"line_number": 16,
"usage_type": "name"
}
] |
21594560177
|
from django.shortcuts import render, redirect
import csv
from django.http import HttpResponse
from django.template.loader import render_to_string
# from weasyprint import HTML
# Create your views here.
from .models import Members, Loans, Deposits
from django.db.models import Avg, Sum
from .forms import MemberForm
def home(request):
total_dep = Deposits.objects.aggregate(mytotal=Sum('Amount_deposit'))
total_loan = Loans.objects.aggregate(myloan=Sum('Amount_loan'))
maximum_dep = Deposits.objects.aggregate(mymax=Avg('Amount_deposit'))
member_list=Members.objects.all()
total_members=member_list.count()
context={'member_list':member_list,'total_members':total_members,'total_dep':total_dep,'maximum_dep':maximum_dep,'total_loan':total_loan}
return render(request,'information_system/home.html',context)
def members(request,pk):
members=Members.objects.get(id=pk)
deposits=Deposits.objects.get(id=pk)
deposit = members.deposits_set.all()
deptotal=deposits.Amount_deposit
# myFilter=MemberFilter(request., qs=deposit)
# deposit=myFilter.qs
dep_count=deposit.count()
fname_by=members.FirstName
lname_by=members.LastName
dep_by=fname_by+lname_by
dep_amount=deposits.Amount_deposit
total_depos = Deposits.objects.aggregate(mytotal=Sum('Amount_deposit'))
context={'members':members,'dep_amount':dep_amount,'dep_by':dep_by,'deposit':deposit,'total_depos':total_depos,'deptotal':deptotal}
return render(request,'information_system/Members.html',context)
def export(request):
response=HttpResponse(content_type='text/csv')
writer=csv.writer(response)
writer.writerow(['Account Number','First Name','Last Name','Date_start'])
for member in Members.objects.all().values_list('AccountNumber','FirstName','LastName','Date_start'):
writer.writerow(member)
response['Content-Disposition'] = 'attachment; filename="Member_List.csv"'
return response
def update(request, pk):
member=Members.objects.get(id=pk)
form=MemberForm(instance=member)
if request.method == 'POST':
form=MemberForm(request.Post, instance=member)
if form.is_valid():
form.save()
return redirect('/')
context = {'form':form}
return render(request,'information_system/memberform.html',context)
def loan(request):
loan_list=Loans.objects.all()
total_loans=loan_list.count()
context={'total_loans':total_loans,'loan_list':loan_list}
return render(request,'information_system/loan.html',context)
def deposit(request):
deposit_list=Deposits.objects.all()
##To get total deposit
total_dep = Deposits.objects.aggregate(mytotal=Sum('Amount_deposit'))
maximum_dep=Deposits.objects.aggregate(mymax=Avg('Amount_deposit'))
context={'total_dep':total_dep,'maximum_dep':maximum_dep,'deposit_list':deposit_list}
return render(request,'information_system/Deposit.html',context)
|
laloluka/sol
|
information_system/views.py
|
views.py
|
py
| 2,960 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "models.Deposits.objects.aggregate",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "models.Deposits.objects",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "models.Deposits",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.db.models.Sum",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "models.Loans.objects.aggregate",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "models.Loans.objects",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "models.Loans",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.db.models.Sum",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "models.Deposits.objects.aggregate",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "models.Deposits.objects",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "models.Deposits",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.db.models.Avg",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "models.Members.objects.all",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "models.Members.objects",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "models.Members",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "models.Members.objects.get",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "models.Members.objects",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "models.Members",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "models.Deposits.objects.get",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "models.Deposits.objects",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "models.Deposits",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "models.Deposits.objects.aggregate",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "models.Deposits.objects",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "models.Deposits",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "django.db.models.Sum",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "models.Members.objects.all",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "models.Members.objects",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "models.Members",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "models.Members.objects.get",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "models.Members.objects",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "models.Members",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "forms.MemberForm",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "forms.MemberForm",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "models.Loans.objects.all",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "models.Loans.objects",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "models.Loans",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "models.Deposits.objects.all",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "models.Deposits.objects",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "models.Deposits",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "models.Deposits.objects.aggregate",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "models.Deposits.objects",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "models.Deposits",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "django.db.models.Sum",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "models.Deposits.objects.aggregate",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "models.Deposits.objects",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "models.Deposits",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "django.db.models.Avg",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 88,
"usage_type": "call"
}
] |
37512481914
|
import os
import pytest
from contextlib import contextmanager
from tempfile import TemporaryDirectory, NamedTemporaryFile
from unittest.mock import patch
from zipfile import ZipFile
from repo2docker.contentproviders import Hydroshare
from repo2docker.contentproviders.base import ContentProviderException
def test_content_id():
with patch.object(Hydroshare, "urlopen") as fake_urlopen:
fake_urlopen.return_value.url = (
"https://www.hydroshare.org/resource/b8f6eae9d89241cf8b5904033460af61"
)
def read():
return '{"dates": [{"type": "modified", "start_date": "2019-09-25T16:09:17.006152Z"}]}'
fake_urlopen.return_value.read = read
hydro = Hydroshare()
hydro.detect("10.4211/hs.b8f6eae9d89241cf8b5904033460af61")
assert hydro.content_id == "b8f6eae9d89241cf8b5904033460af61.v1569427757"
def test_detect_hydroshare():
with patch.object(Hydroshare, "urlopen") as fake_urlopen:
fake_urlopen.return_value.url = (
"https://www.hydroshare.org/resource/b8f6eae9d89241cf8b5904033460af61"
)
def read():
return '{"dates": [{"type": "modified", "start_date": "2019-09-25T16:09:17.006152Z"}]}'
fake_urlopen.return_value.read = read
# valid Hydroshare DOIs trigger this content provider
expected = {
"host": {
"hostname": [
"https://www.hydroshare.org/resource/",
"http://www.hydroshare.org/resource/",
],
"django_irods": "https://www.hydroshare.org/django_irods/download/bags/",
"version": "https://www.hydroshare.org/hsapi/resource/{}/scimeta/elements",
},
"resource": "b8f6eae9d89241cf8b5904033460af61",
"version": "1569427757",
}
assert (
Hydroshare().detect(
"https://www.hydroshare.org/resource/b8f6eae9d89241cf8b5904033460af61"
)
== expected
)
# assert a call to urlopen was called to fetch version
assert fake_urlopen.call_count == 1
assert (
Hydroshare().detect("10.4211/hs.b8f6eae9d89241cf8b5904033460af61")
== expected
)
# assert 2 more calls were made, one to resolve the DOI and another to fetch the version
assert fake_urlopen.call_count == 3
assert (
Hydroshare().detect(
"https://doi.org/10.4211/hs.b8f6eae9d89241cf8b5904033460af61"
)
== expected
)
# assert 2 more calls were made, one to resolve the DOI and another to fetch the version
assert fake_urlopen.call_count == 5
with patch.object(Hydroshare, "urlopen") as fake_urlopen:
# Don't trigger the Hydroshare content provider
assert Hydroshare().detect("/some/path/here") is None
assert Hydroshare().detect("https://example.com/path/here") is None
# don't handle DOIs that aren't from Hydroshare
fake_urlopen.return_value.url = (
"http://joss.theoj.org/papers/10.21105/joss.01277"
)
def read():
return '{"dates": [{"type": "modified", "start_date": "2019-09-25T16:09:17.006152Z"}]}'
fake_urlopen.return_value.read = read
assert Hydroshare().detect("https://doi.org/10.21105/joss.01277") is None
@contextmanager
def hydroshare_archive(prefix="b8f6eae9d89241cf8b5904033460af61/data/contents"):
with NamedTemporaryFile(suffix=".zip") as zfile:
with ZipFile(zfile.name, mode="w") as zip:
zip.writestr("{}/some-file.txt".format(prefix), "some content")
zip.writestr("{}/some-other-file.txt".format(prefix), "some more content")
yield zfile
class MockInfo:
def __init__(self, content_type):
self.content_type = content_type
def get_content_type(self):
return self.content_type
class MockResponse:
def __init__(self, content_type, status_code):
self.content_type = content_type
self.status_code = status_code
self.mock_info = MockInfo(self.content_type)
def getcode(self):
return self.status_code
def info(self):
return self.mock_info
def test_fetch_bag():
# we "fetch" a local ZIP file to simulate a Hydroshare resource
with hydroshare_archive() as hydro_path:
with patch.object(
Hydroshare,
"urlopen",
side_effect=[
MockResponse("application/html", 200),
MockResponse("application/zip", 200),
],
):
with patch.object(
Hydroshare, "_urlretrieve", side_effect=[(hydro_path, None)]
):
hydro = Hydroshare()
hydro.resource_id = "b8f6eae9d89241cf8b5904033460af61"
spec = {
"host": {
"hostname": [
"https://www.hydroshare.org/resource/",
"http://www.hydroshare.org/resource/",
],
"django_irods": "https://www.hydroshare.org/django_irods/download/bags/",
},
"resource": "123456789",
}
with TemporaryDirectory() as d:
output = []
for l in hydro.fetch(spec, d):
output.append(l)
unpacked_files = set(os.listdir(d))
expected = set(["some-other-file.txt", "some-file.txt"])
assert expected == unpacked_files
def test_fetch_bag_failure():
with hydroshare_archive():
with patch.object(
Hydroshare, "urlopen", side_effect=[MockResponse("application/html", 500)]
):
hydro = Hydroshare()
spec = {
"host": {
"hostname": [
"https://www.hydroshare.org/resource/",
"http://www.hydroshare.org/resource/",
],
"django_irods": "https://www.hydroshare.org/django_irods/download/bags/",
},
"resource": "123456789",
}
with TemporaryDirectory() as d:
with pytest.raises(
ContentProviderException,
match=r"Failed to download bag\. status code 500\.",
):
# loop for yield statements
for l in hydro.fetch(spec, d):
pass
def test_fetch_bag_timeout():
with hydroshare_archive():
with patch.object(
Hydroshare, "urlopen", side_effect=[MockResponse("application/html", 200)]
):
hydro = Hydroshare()
spec = {
"host": {
"hostname": [
"https://www.hydroshare.org/resource/",
"http://www.hydroshare.org/resource/",
],
"django_irods": "https://www.hydroshare.org/django_irods/download/bags/",
},
"resource": "123456789",
}
with TemporaryDirectory() as d:
with pytest.raises(
ContentProviderException,
match=r"Bag taking too long to prepare, exiting now, try again later\.",
):
# loop for yield statements
for l in hydro.fetch(spec, d, timeout=0):
pass
|
igorkatinas/jupyter
|
tests/unit/contentproviders/test_hydroshare.py
|
test_hydroshare.py
|
py
| 7,638 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "unittest.mock.patch.object",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "repo2docker.contentproviders.Hydroshare",
"line_number": 14,
"usage_type": "argument"
},
{
"api_name": "unittest.mock.patch",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "repo2docker.contentproviders.Hydroshare",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "unittest.mock.patch.object",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "repo2docker.contentproviders.Hydroshare",
"line_number": 30,
"usage_type": "argument"
},
{
"api_name": "unittest.mock.patch",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "repo2docker.contentproviders.Hydroshare",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "repo2docker.contentproviders.Hydroshare",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "repo2docker.contentproviders.Hydroshare",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "unittest.mock.patch.object",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "repo2docker.contentproviders.Hydroshare",
"line_number": 75,
"usage_type": "argument"
},
{
"api_name": "unittest.mock.patch",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "repo2docker.contentproviders.Hydroshare",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "repo2docker.contentproviders.Hydroshare",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "repo2docker.contentproviders.Hydroshare",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "tempfile.NamedTemporaryFile",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "zipfile.ZipFile",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "contextlib.contextmanager",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "unittest.mock.patch.object",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "repo2docker.contentproviders.Hydroshare",
"line_number": 126,
"usage_type": "argument"
},
{
"api_name": "unittest.mock.patch",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "unittest.mock.patch.object",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "repo2docker.contentproviders.Hydroshare",
"line_number": 134,
"usage_type": "argument"
},
{
"api_name": "unittest.mock.patch",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "repo2docker.contentproviders.Hydroshare",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "tempfile.TemporaryDirectory",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "unittest.mock.patch.object",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "repo2docker.contentproviders.Hydroshare",
"line_number": 162,
"usage_type": "argument"
},
{
"api_name": "unittest.mock.patch",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "repo2docker.contentproviders.Hydroshare",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "tempfile.TemporaryDirectory",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "repo2docker.contentproviders.base.ContentProviderException",
"line_number": 177,
"usage_type": "argument"
},
{
"api_name": "unittest.mock.patch.object",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "repo2docker.contentproviders.Hydroshare",
"line_number": 188,
"usage_type": "argument"
},
{
"api_name": "unittest.mock.patch",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "repo2docker.contentproviders.Hydroshare",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "tempfile.TemporaryDirectory",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "repo2docker.contentproviders.base.ContentProviderException",
"line_number": 203,
"usage_type": "argument"
}
] |
19400189989
|
from typing import List
import random
# 398. 随机数索引
# https://leetcode-cn.com/problems/random-pick-index/
# 蓄水池抽样
class Solution:
def __init__(self, nums: List[int]):
self.nums = nums
def pick(self, target: int) -> int:
ans = -1
k = 1
for i, each in enumerate(self.nums):
if each == target:
rand = random.randint(1, k)
if rand == 1:
# print('hit')
ans = i
k += 1
return ans
nums = [1, 2, 3, 3, 3]
# Your Solution object will be instantiated and called as such:
obj = Solution(nums)
param_1 = obj.pick(3)
print(param_1)
|
Yigang0622/LeetCode
|
randomNumIndexing.py
|
randomNumIndexing.py
|
py
| 693 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "typing.List",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 19,
"usage_type": "call"
}
] |
730586622
|
from selenium import webdriver
from selenium.webdriver.common.by import By
chrome_driver_path = r"C:\Users\Tobiloba\development\chromedriver.exe"
driver = webdriver.Chrome(executable_path=chrome_driver_path)
#driver.get('https://www.amazon.com/dp/B0963P9QTM/ref=sbl_dpx_kitchen-electric-cookware_B08GC6PL3D_0')
#price = driver.find_element(By.CLASS_NAME, "a-price")
#print(price.text)
driver.get('https://www.python.org/')
#
# search = driver.find_element(By.NAME, 'q')
# bug_link = driver.find_element(By.XPATH, '//*[@id="site-map"]/div[2]/div/ul/li[3]/a')
# print(bug_link.text, bug_link.get_attribute('a'))
# print(search.tag_name)
#
# driver.find_elements(By.XPATH, '')
event_times = driver.find_elements(By.CSS_SELECTOR, '.event-widget time')
events = driver.find_elements(By.CSS_SELECTOR, '.event-widget a')
event_dict = {}
# for i in range(len(event_times)):
# for time in event_times:
# for event in events:
# event_dict[i] = f'{time.text}, {event.text}'
for n in range(len(event_times)):
event_dict[n] = {
'name': events[n].text,
'time': event_times[n].text
}
print(event_dict)
#driver.close()
driver.quit()
|
adecool/python100days
|
day-48/main.py
|
main.py
|
py
| 1,180 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 24,
"usage_type": "name"
}
] |
42197867431
|
from django.test import TestCase
from feedback.forms import FeedbackForm
class TestForms(TestCase):
def test_feedback_form_valid_data(self):
form = FeedbackForm(data={
'titolo': 'Recensione',
'descrizione': 'Una descrizione',
'voto': 5
})
self.assertTrue(form.is_valid())
def test_user_form_no_data(self):
form = FeedbackForm(data={})
self.assertFalse(form.is_valid())
self.assertEquals(len(form.errors), 3)
|
lucacasarotti/CineDate
|
feedback/tests/test_forms.py
|
test_forms.py
|
py
| 506 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.test.TestCase",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "feedback.forms.FeedbackForm",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "feedback.forms.FeedbackForm",
"line_number": 17,
"usage_type": "call"
}
] |
650276737
|
#! /bin/python
# IMPORTANT do threadctl import first (before numpy imports)
from threadpoolctl import threadpool_limits
import os
import sys
import json
import luigi
import nifty.tools as nt
import cluster_tools.utils.volume_utils as vu
import cluster_tools.utils.function_utils as fu
from cluster_tools.cluster_tasks import SlurmTask, LocalTask, LSFTask
from elf.io.label_multiset_wrapper import LabelMultisetWrapper
from elf.label_multiset import create_multiset_from_labels, serialize_multiset
class CreateMultisetBase(luigi.Task):
""" CreateMultiset base class
"""
task_name = 'create_multiset'
src_file = os.path.abspath(__file__)
allow_retry = False
# input and output volumes
input_path = luigi.Parameter()
input_key = luigi.Parameter()
output_path = luigi.Parameter()
output_key = luigi.Parameter()
# dependency
dependency = luigi.TaskParameter()
def requires(self):
return self.dependency
@staticmethod
def default_task_config():
config = LocalTask.default_task_config()
config.update({'compression': 'gzip'})
return config
def run_impl(self):
# get the global config and init configs
shebang, block_shape, roi_begin, roi_end = self.global_config_values()
self.init(shebang)
# get shape and make block config
shape = vu.get_shape(self.input_path, self.input_key)
# load the create_multiset config
config = self.get_task_config()
compression = config.get('compression', 'gzip')
# require output dataset
with vu.file_reader(self.output_path) as f:
f.require_dataset(self.output_key, shape=shape, chunks=tuple(block_shape),
compression=compression, dtype='uint8')
# update the config with input and output paths and keys
# as well as block shape
config.update({'input_path': self.input_path, 'input_key': self.input_key,
'output_path': self.output_path, 'output_key': self.output_key,
'block_shape': block_shape})
block_list = vu.blocks_in_volume(shape, block_shape, roi_begin, roi_end)
self._write_log('scheduling %i blocks to be processed' % len(block_list))
n_jobs = min(len(block_list), self.max_jobs)
# prime and run the jobs
self.prepare_jobs(n_jobs, block_list, config)
self.submit_jobs(n_jobs)
# wait till jobs finish and check for job success
self.wait_for_jobs()
self.check_jobs(n_jobs)
class CreateMultisetLocal(CreateMultisetBase, LocalTask):
"""
CreateMultiset on local machine
"""
pass
class CreateMultisetSlurm(CreateMultisetBase, SlurmTask):
"""
CreateMultiset on slurm cluster
"""
pass
class CreateMultisetLSF(CreateMultisetBase, LSFTask):
"""
CreateMultiset on lsf cluster
"""
pass
#
# Implementation
#
@threadpool_limits.wrap(limits=1) # restrict the numpy threadpool to 1 to avoid oversubscription
def _create_multiset_block(blocking, block_id, ds_in, ds_out):
fu.log("start processing block %i" % block_id)
block = blocking.getBlock(block_id)
bb = vu.block_to_bb(block)
labels = ds_in[bb]
# we can't encode the paintra ignore label
paintera_ignore_label = 18446744073709551615
pignore_mask = labels == paintera_ignore_label
if pignore_mask.sum() > 0:
labels[pignore_mask] = 0
if labels.sum() == 0:
fu.log("block %i is empty" % block_id)
fu.log_block_success(block_id)
return
# compute multiset from input labels
multiset = create_multiset_from_labels(labels)
ser = serialize_multiset(multiset)
chunk_id = tuple(bs // ch for bs, ch in zip(block.begin, ds_out.chunks))
ds_out.write_chunk(chunk_id, ser, True)
fu.log_block_success(block_id)
def write_metadata(ds_out, max_id):
attrs = ds_out.attrs
attrs['maxId'] = max_id
attrs['isLabelMultiset'] = True
@threadpool_limits.wrap(limits=1) # restrict the numpy threadpool to 1 to avoid oversubscription
def create_multiset(job_id, config_path):
fu.log("start processing job %i" % job_id)
fu.log("reading config from %s" % config_path)
with open(config_path, 'r') as f:
config = json.load(f)
# read the input cofig
input_path = config['input_path']
input_key = config['input_key']
block_shape = list(config['block_shape'])
block_list = config['block_list']
# read the output config
output_path = config['output_path']
output_key = config['output_key']
shape = list(vu.get_shape(output_path, output_key))
# get the blocking
blocking = nt.blocking([0, 0, 0], shape, block_shape)
# submit blocks
with vu.file_reader(input_path, 'r') as f_in, vu.file_reader(output_path) as f_out:
ds_in = f_in[input_key]
if ds_in.attrs.get('isLabelMultiset', False):
ds_in = LabelMultisetWrapper(ds_in)
ds_out = f_out[output_key]
for block_id in block_list:
_create_multiset_block(blocking, block_id, ds_in, ds_out)
if job_id == 0:
max_id = ds_in.attrs['maxId']
write_metadata(ds_out, max_id)
# log success
fu.log_job_success(job_id)
if __name__ == '__main__':
path = sys.argv[1]
assert os.path.exists(path), path
job_id = int(os.path.split(path)[1].split('.')[0].split('_')[-1])
create_multiset(job_id, path)
|
constantinpape/cluster_tools
|
cluster_tools/label_multisets/create_multiset.py
|
create_multiset.py
|
py
| 5,506 |
python
|
en
|
code
| 32 |
github-code
|
6
|
[
{
"api_name": "luigi.Task",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "luigi.Parameter",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "luigi.TaskParameter",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "cluster_tools.cluster_tasks.LocalTask.default_task_config",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "cluster_tools.cluster_tasks.LocalTask",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.volume_utils.get_shape",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.volume_utils",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.volume_utils.file_reader",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.volume_utils",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.volume_utils.blocks_in_volume",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.volume_utils",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "cluster_tools.cluster_tasks.LocalTask",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "cluster_tools.cluster_tasks.SlurmTask",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "cluster_tools.cluster_tasks.LSFTask",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.volume_utils.block_to_bb",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.volume_utils",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log_block_success",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "elf.label_multiset.create_multiset_from_labels",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "elf.label_multiset.serialize_multiset",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils.log_block_success",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "threadpoolctl.threadpool_limits.wrap",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "threadpoolctl.threadpool_limits",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.volume_utils.get_shape",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.volume_utils",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "nifty.tools.blocking",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "nifty.tools",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.volume_utils.file_reader",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.volume_utils",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "elf.io.label_multiset_wrapper.LabelMultisetWrapper",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils.log_job_success",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "threadpoolctl.threadpool_limits.wrap",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "threadpoolctl.threadpool_limits",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 182,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 183,
"usage_type": "attribute"
},
{
"api_name": "os.path.split",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 184,
"usage_type": "attribute"
}
] |
11110715644
|
# coding:utf-8
import pygame
class Main(object):
def __init__(self, title, height, width, Fps=60):
self.height = height
self.width = width
self.title = title
self.Fps = Fps
self.main()
self.vars()
self.events()
def main(self):
pygame.init() # 初始化pygame
pygame.mixer.init() # 背景音乐初始化
pygame.display.set_caption(self.title) # 设置窗口标题
self.screen = pygame.display.set_mode([self.height, self.width]) # 将屏幕赋值为全局变量方便调用
def events(self):
pygame.mixer.music.play(-1, 0) # 播放背景音乐(-1是循环播放,0是从0秒开始播放)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
# 将背景图片载入到窗口0,0的位置。
self.screen.blit(self.New_Default_Background_Pic, (0, 0))
# 刷新背景(如果不刷新屏幕就不更新)
pygame.display.update()
def vars(self):
# 导入图片;
self.Old_Default_Background_Pic = pygame.image.load("bg_page.jpg")
# 将图片缩放到与窗口一样大;
self.New_Default_Background_Pic = pygame.transform.scale(self.Old_Default_Background_Pic, (self.height, self.width))
pygame.image.load("bg_page.jpg")
self.Old_Default_Background_Music = pygame.mixer.music.load("rainy-season.mp3")
if __name__ == "__main__":
Main("Pixel World", 1280, 768)
|
PatrickShun/pygameDemo
|
pygamedemo_run.py
|
pygamedemo_run.py
|
py
| 1,650 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.init",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pygame.mixer.init",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.play",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "pygame.quit",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pygame.display.update",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.load",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 40,
"usage_type": "attribute"
}
] |
37635242690
|
from videos_freeze_analyzer import VideosFreezeAnalyzer
from video_valid_points_list_generator import dowload_url
from video_valid_points_list_generator import VideoValidPointsListGeneratorFfmpeg
from video_freeze_analyzer import VideoFreezeAnalyzer
import json
def main(urls):
files = []
for url in urls:
files.append(dowload_url(url))
videos_list =[]
for file_name in files:
video_valid_list = VideoValidPointsListGeneratorFfmpeg(file_name).generate_valid_points_list()
videos_list.append(VideoFreezeAnalyzer().analyze(video_valid_list))
videos_output = VideosFreezeAnalyzer(videos_list).analyze()
results = json.dumps(videos_output, indent=4)
print(results)
if __name__ == '__main__':
urls = ["https://storage.googleapis.com/hiring_process_data/freeze_frame_input_a.mp4",
"https://storage.googleapis.com/hiring_process_data/freeze_frame_input_b.mp4",
"https://storage.googleapis.com/hiring_process_data/freeze_frame_input_c.mp4"]
main(urls)
|
EderRobins/video_freeze_analyzer
|
main.py
|
main.py
|
py
| 1,064 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "video_valid_points_list_generator.dowload_url",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "video_valid_points_list_generator.VideoValidPointsListGeneratorFfmpeg",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "video_freeze_analyzer.VideoFreezeAnalyzer",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "videos_freeze_analyzer.VideosFreezeAnalyzer",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 19,
"usage_type": "call"
}
] |
8412088860
|
from rest_framework import serializers
from .models import (
Product,
ProductImage,
Size,
Category
)
class CategoryListSerializer(serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='products:category-detail-view',
lookup_field='slug'
)
class Meta:
model = Category
fields = (
'id',
'title',
'url',
)
class CategoryDetailSerializer(CategoryListSerializer):
products = serializers.SerializerMethodField()
class Meta:
model = Category
fields = (
'id',
'title',
'products',
)
def get_products(self, obj):
# The source of the SSL context override
return ProductListSerializer(obj.product_set.all(), many=True, context=self.context).data
class ProductListSerializer(serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='products:product-detail-view', lookup_field='slug')
class Meta:
model = Product
fields = (
'id',
'slug',
'title',
'price',
'image',
'url',
)
class ProductDetailSerializer(ProductListSerializer):
sizes = serializers.SerializerMethodField()
productImages = serializers.SerializerMethodField()
categories = CategoryListSerializer(many=True)
class Meta:
model = Product
fields = (
'id',
'title',
'price',
'image',
'slug',
'categories',
'sizes',
'description',
'productImages',
)
def get_sizes(self, obj):
return SizeSerializer(obj.size_set.all(), many=True).data
def get_productImages(self, obj):
return ProductImageSerializer(
obj.productimage_set.all(),
many=True
).data
class SizeSerializer(serializers.ModelSerializer):
class Meta:
model = Size
fields = (
'id',
'size',
'slug',
'stock',
)
class ProductImageSerializer(serializers.ModelSerializer):
class Meta:
model = ProductImage
fields = (
'id',
'image',
)
|
fanimashaun-r7/Nf_Kicks_Api
|
app/products/serializers.py
|
serializers.py
|
py
| 2,365 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "rest_framework.serializers.HyperlinkedModelSerializer",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.HyperlinkedIdentityField",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "models.Category",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.SerializerMethodField",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "models.Category",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.HyperlinkedModelSerializer",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.HyperlinkedIdentityField",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "models.Product",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.SerializerMethodField",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.SerializerMethodField",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "models.Product",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "models.Size",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "models.ProductImage",
"line_number": 100,
"usage_type": "name"
}
] |
72638922747
|
import pandas as pd
from dotenv import load_dotenv
import os
# load env
load_dotenv()
# load dataset
url = "https://raw.githubusercontent.com/erijmo/3690/main/healthcare_dataset.csv"
df = pd.read_csv(url)
# set api key
api_key = os.getenv("OPENAI_API_KEY")
def get_healthcare_response(user_input, user_name, df):
# search for keyword in user input
for column in df.columns:
if column.lower() in user_input:
response = f"{user_name}, your {column.lower()} is {df[column].iloc[0]}"
return response
# if no keyword located, ask for clarification
return "I'm sorry, I couldn't understand your request. Can you please provide more details?"
# prompt response
print("HealthcareBot: Hello! I'm your HealthcareBot. May I know your name, please?")
while True:
user_name = input("User: ")
# check if the user's name is in the system
if user_name.lower() in df["Name"].str.lower().values:
print(f"HealthcareBot: Thank you, {user_name}! How can I assist you today?")
break
else:
print("HealthcareBot: I'm sorry, but I couldn't find your name in the system. Please try again.")
# user interaction loop
while True:
user_input = input("User: ")
# check if any exit-related keywords are present in the user input
if any(keyword in user_input.lower() for keyword in ['exit', 'bye', 'quit']):
print("HealthcareBot: Goodbye! If you have more questions, feel free to ask.")
break
response = get_healthcare_response(user_input, user_name, df)
if response:
print("HealthcareBot:", response)
|
erijmo/3690
|
chatbot.py
|
chatbot.py
|
py
| 1,661 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "dotenv.load_dotenv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 13,
"usage_type": "call"
}
] |
39803355853
|
import pickle
from pathlib import Path
script_location = Path(__file__).absolute().parent
data_loc = script_location / "name_gen_model"
from bangla_linga.BN_countvectorizer import CountVectorizer
import bangla_linga.BN_ngram as ng
class BN_gen_pred(object):
def __init__(self,model_name=data_loc):
self.model_name = model_name
with open(model_name, 'rb') as p:
self.ob = pickle.load(p)
def get_name_ara(self, name=None):
gram_2 = ng.n_gram(name, 2)
g2 = ' '.join(gram_2)
gram_3 = ng.n_gram(name, 3)
g3 = ' '.join(gram_3)
name = [name + " " + g2 + " " + g3]
ct = CountVectorizer()
test = ct.transform(name)
return test
def predict_gender(self, name="None"):
pred_gen = self.ob.predict(self.get_name_ara(name))
if pred_gen == 0:
return 'male'
else: return 'female'
|
Kowsher/Bangla-NLP
|
Bangla Linga/bangla_linga/gender_prediction.py
|
gender_prediction.py
|
py
| 846 |
python
|
en
|
code
| 11 |
github-code
|
6
|
[
{
"api_name": "pathlib.Path",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "bangla_linga.BN_ngram.n_gram",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "bangla_linga.BN_ngram",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "bangla_linga.BN_ngram.n_gram",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "bangla_linga.BN_ngram",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "bangla_linga.BN_countvectorizer.CountVectorizer",
"line_number": 27,
"usage_type": "call"
}
] |
27009678128
|
import numpy as np
import run as r
from sklearn.gaussian_process.kernels import ABCMeta, Matern, ConstantKernel, Exponentiation, ExpSineSquared, Hyperparameter, KernelOperator, \
NormalizedKernelMixin, PairwiseKernel, RationalQuadratic, StationaryKernelMixin, RBF, CompoundKernel, DotProduct, Product, GenericKernelMixin, WhiteKernel, \
Kernel, Sum
'''
[id]
112
[name]
GaussianProcessRegressor
[input]
x_train 训练集 训练集标签数据集 二维数组 必须 定数
y_train 测试集 测试集数据集 二维数组 必须 定数
x_test 训练集标签 训练集标签标签 一维数组 必须 定数
y_test 测试集标签 测试集标签 一维数组 必须 定数
kernel 内核 默认为None,指定GP协方差函数的内核。如果传递了None,则默认使用内核'1.0 * RBF(1.0)'。请注意,内核的超参数在拟合过程中已优化,可选字符串 字符串 不必须 定数
alpha alpha 默认为1e-10,拟合期间将值添加到内核矩阵的对角线。较大的值对应于观测结果中增加的噪声水平。通过确保计算值形成正定矩阵,这也可以防止拟合期间出现潜在的数值问题。如果传递了数组,则该数组必须具有与用于拟合的数据相同的条目数,并且用作与数据点有关的噪声水平。请注意,这等效于添加c = alpha的WhiteKernel。直接允许将噪声级别指定为参数主要是为了方便和与Ridge保持一致,可选数组,浮点数 字符串 不必须 定数
optimizer optimizer 默认为'fmin_l_bfgs_b',可以是内部支持的用于优化kernel 's parameters, specified by a string, or an externally defined optimizer passed as a callable. Per default, the ' L-BFGS-B ' algorithm from scipy.optimize.minimize is used. If None is passed, the kernel' s参数的优化器之一。可用的内部优化器是:: 'fmin_l_bfgs_b,可选'fmin_l_bfgs_b' 字符串 不必须 定数
n_restarts_optimizer 重新启动次数 默认为0,用于查找内核初始参数的优化程序的重新启动次数,以及从允许的theta值空间中随机抽取的theta采样对数均匀性中剩余的参数(如果有的话)。如果大于0,则所有边界必须是有限的。请注意,n_restarts_optimizer == 0表示执行了一次运行,可选整数 整数 不必须 定数
normalize_y normalize_y 默认为False,无论目标值y是否被归一化,目标值的均值和方差分别设置为等于0和1。对于使用零均值,单位方差先验的情况,建议使用此方法。注意,在此实现中,在报告GP预测之前,将规范化反转,可选布尔值 布尔值 不必须 定数
copy_X_train copy_X_train 默认为True,如果为True,则训练数据的永久副本存储在对象中。否则,仅存储对训练数据的引用,如果对数据进行外部修改,则可能导致预测更改,可选布尔值 布尔值 不必须 定数
random_state 随机种子 默认为None,确定用于初始化中心的随机数生成。在多个函数调用之间传递int以获得可重复的结果,可选整数 整数 不必须 定数
[output]
train_predict 预测 训练集预测结果 一维数组(数值)
test_predict 预测 测试集预测结果 一维数组(数值)
train_score 正确率 训练集预测结果的正确率 数字
test_score 正确率 测试集预测结果的正确率 数字
X_train_ X_train_ 训练数据的特征向量或其他表示形式(预测也需要) 二维数组
y_train_ y_train_ 训练数据中的目标值(预测也需要) 一维数组
L_ L_ 'X_train_'中内核的下三角Cholesky分解 二维数组
kernel_ kernel_ 用于预测的内核。内核的结构与作为参数传递的内核相同,但具有优化的超参数 字符串
alpha_ alpha 核空间中训练数据点的对偶系数 一维数组
log_marginal_likelihood_value_ 对数边际可能性 'self.kernel_.theta'的对数边际可能性 浮点数
[outline]
[describe]
高斯过程回归(GPR)。
该实现基于Rasmussen和Williams提出的高斯机器学习过程算法(GPML)的算法2.1。
除了标准的scikit-learn估计器API外,GaussianProcessRegressor:*允许进行预测而无需事先拟合(基于GP优先级)*提供其他方法sample_y(X),该方法评估在给定输入下从GPR(优先级或后验)中提取的样本*公开了一个方法log_marginal_likelihood(theta),该方法可在外部用于其他选择超参数的方式,例如通过马尔可夫链蒙特卡洛。
'''
def main(x_train, y_train, x_test, y_test,
kernel=None, alpha=1e-10, optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0, normalize_y=False, copy_X_train=True, random_state=None
):
if type(x_train) is str:
x_train = eval(x_train)
if type(y_train) is str:
y_train = eval(y_train)
if type(x_test) is str:
x_test = eval(x_test)
if type(y_test) is str:
y_test = eval(y_test)
if type(kernel) is str:
kernel = eval(kernel)
if type(alpha) is str:
alpha = eval(alpha)
if type(n_restarts_optimizer) is str:
n_restarts_optimizer = eval(n_restarts_optimizer)
if type(normalize_y) is str:
normalize_y = eval(normalize_y)
if type(copy_X_train) is str:
copy_X_train = eval(copy_X_train)
if type(random_state) is str:
random_state = eval(random_state)
return r.run(x_train=x_train, y_train=y_train, x_test=x_test, y_test=y_test, kernel=kernel,
alpha=alpha,
optimizer=optimizer,
n_restarts_optimizer=n_restarts_optimizer,
normalize_y=normalize_y,
copy_X_train=copy_X_train,
random_state=random_state)
if __name__ == '__main__':
import numpy as np
import json
array = np.loadtxt('D:\\123_2.csv', delimiter=',')
array = array[0:20, :]
y = array[:, -1].tolist()
x = np.delete(array, -1, axis=1).tolist()
array = array.tolist()
back = main(x, y, x, y)
print(back)
for i in back:
print(i + ":" + str(back[i]))
json.dumps(back)
|
lisunshine1234/mlp-algorithm-python
|
machine_learning/regression/gaussian_processes/GaussianProcessRegressor/main.py
|
main.py
|
py
| 6,034 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "run.run",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "numpy.delete",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 98,
"usage_type": "call"
}
] |
17522204148
|
import json
import sqlite3
from urllib import response
from fastapi.testclient import TestClient
import time
import pytest
from main import app, conn, c
from models import AtualizarFilme, AtualizarPlaneta, Filme, Planeta, Excluido, InserirPlaneta
client = TestClient(app)
# def test_create_schema():
# c.executescript("""
# BEGIN TRANSACTION;
# DROP TABLE IF EXISTS "Filme";
# CREATE TABLE IF NOT EXISTS "Filme" (
# "id" INTEGER NOT NULL,
# "Nome" TEXT NOT NULL,
# "Data_de_lancamento" TEXT NOT NULL,
# "Excluido" INTEGER NOT NULL,
# PRIMARY KEY("id")
# );
# DROP TABLE IF EXISTS "Planeta";
# CREATE TABLE IF NOT EXISTS "Planeta" (
# "id" INTEGER NOT NULL,
# "Nome" TEXT NOT NULL,
# "Clima" TEXT NOT NULL,
# "Diametro" INTEGER NOT NULL,
# "Populacao" INTEGER NOT NULL,
# "Excluido" INTEGER NOT NULL,
# PRIMARY KEY("id")
# );
# DROP TABLE IF EXISTS "Planeta_Apareceu_Filme";
# CREATE TABLE IF NOT EXISTS "Planeta_Apareceu_Filme" (
# "id" INTEGER NOT NULL UNIQUE,
# "PlanetaID" INTEGER NOT NULL,
# "FilmeID" INTEGER NOT NULL,
# "Excluido" INTEGER NOT NULL,
# PRIMARY KEY("id" AUTOINCREMENT)
# );
# INSERT INTO "Filme" VALUES (1,'A morte do jedi','2020-04-23 10:20:30.400000+02:30',0);
# INSERT INTO "Filme" VALUES (2,'O jedi não morreu','2021-04-23 10:20:30.400000+02:30',0);
# INSERT INTO "Filme" VALUES (3,'O jedi nunca morreu','1970-01-01 00:33:41+00:00',0);
# INSERT INTO "Filme" VALUES (4,'Ou será que morreu?','1970-01-01 00:33:41+00:00',0);
# INSERT INTO "Filme" VALUES (5,'Não morreu, eu sabia!','2032-04-23 10:20:30.400000+02:30',0);
# INSERT INTO "Planeta" VALUES (1,'Marte','vento',55,66,0);
# INSERT INTO "Planeta" VALUES (2,'Marte 2','vento',10000,564612,0);
# INSERT INTO "Planeta" VALUES (3,'Planeta Voador','string',787878,152314856,0);
# INSERT INTO "Planeta" VALUES (5,'Nao lembro','murky',5489645,5164,0);
# INSERT INTO "Planeta" VALUES (6,'Planetoide','string',48654,1,1);
# INSERT INTO "Planeta_Apareceu_Filme" VALUES (1,1,1,0);
# INSERT INTO "Planeta_Apareceu_Filme" VALUES (2,1,2,0);
# INSERT INTO "Planeta_Apareceu_Filme" VALUES (3,2,2,0);
# INSERT INTO "Planeta_Apareceu_Filme" VALUES (4,6,1,1);
# INSERT INTO "Planeta_Apareceu_Filme" VALUES (5,6,2,1);
# INSERT INTO "Planeta_Apareceu_Filme" VALUES (10,6,3,1);
# INSERT INTO "Planeta_Apareceu_Filme" VALUES (11,6,4,1);
# INSERT INTO "Planeta_Apareceu_Filme" VALUES (12,6,5,1);
# COMMIT;
# """)
def test_read_root():
response = client.get('/')
assert response.status_code == 200
assert response.json() == {'Hello,': 'World!'}
def test_read_planets_error_without_bool():
response = client.get('/api/v1/planets')
#Sem ?show_deleted=true
assert response.status_code == 422
def test_read_planets_deleted_true():
response = client.get('/api/v1/planets?show_deleted=true')
assert response.status_code == 200
print(type(response.json()))
# assert response.json() == [
# {
# "id": 1,
# "Nome": "Marte",
# "Clima": "vento",
# "Diametro": 55,
# "Populacao": 66,
# "Excluido": 0,
# "Filmes_em_que_apareceu": [
# 1,
# 2
# ]
# },
# {
# "id": 2,
# "Nome": "Marte",
# "Clima": "vento",
# "Diametro": 55,
# "Populacao": 66,
# "Excluido": 0,
# "Filmes_em_que_apareceu": [
# 2
# ]
# },
# {
# "id": 3,
# "Nome": "sexomaluco",
# "Clima": "string",
# "Diametro": 0,
# "Populacao": 0,
# "Excluido": 1,
# "Filmes_em_que_apareceu": []
# },
# {
# "id": 5,
# "Nome": "string",
# "Clima": "murky",
# "Diametro": 0,
# "Populacao": 0,
# "Excluido": 1,
# "Filmes_em_que_apareceu": []
# },
# {
# "id": 6,
# "Nome": "string",
# "Clima": "string",
# "Diametro": 0,
# "Populacao": 0,
# "Excluido": 1,
# "Filmes_em_que_apareceu": [
# 1,
# 2,
# 3,
# 4,
# 5
# ]
# }
# ]
def test_read_planets_deleted_false():
response = client.get('/api/v1/planets?show_deleted=false')
assert response.status_code == 200
def test_read_planet():
response = client.get('/api/v1/planets/1')
assert response.status_code == 200
assert response.json() == {
"id": 1,
"Nome": "Marte",
"Clima": "vento",
"Diametro": 55,
"Populacao": 66,
"Excluido": 0,
"Filmes_em_que_apareceu": [
1,
2
]
}
def test_create_planet_movie_doesnt_exist():
json={
"id": 61,
"Nome": "string",
"Diametro": 0,
"Populacao": 0,
"FilmesID": [
0
],
"Excluido": 1
}
response = client.post('/api/v1/planets',
json=json
)
assert response.status_code == 400
#assert response.json() == {"detail": 'Pelo menos um dos filmes inseridos não existe',}
# def test_create_planet():
# json={
# "id": 44,
# "Nome": "teste",
# "Diametro": 0,
# "Populacao": 0,
# "FilmesID": [
# 1
# ],
# "Excluido": 0
# }
# response = client.post('/api/v1/planets',
# json=json
# )
# assert response.status_code == 200
|
MarceloTerra0/FastAPI_TesteTuring
|
test_main.py
|
test_main.py
|
py
| 5,453 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "fastapi.testclient.TestClient",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "main.app",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "urllib.response",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "urllib.response.status_code",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "urllib.response",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "urllib.response.json",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "urllib.response",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "urllib.response",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "urllib.response.status_code",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "urllib.response",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "urllib.response",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "urllib.response.status_code",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "urllib.response",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "urllib.response.json",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "urllib.response",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "urllib.response",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "urllib.response.status_code",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "urllib.response",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "urllib.response",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "urllib.response.status_code",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "urllib.response",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "urllib.response.json",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "urllib.response",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "urllib.response",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "urllib.response.status_code",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "urllib.response",
"line_number": 173,
"usage_type": "name"
}
] |
21341173003
|
import torch
from torch.optim import SGD
import torch.nn.functional as F
from sklearn.metrics import accuracy_score
from models_torch.FFM import FFM_Layer
from utils.load_data import load_criteo_data
if __name__ == '__main__':
(X_train, y_train), (X_test, y_test), feature_info = load_criteo_data('dataset/criteo_sample.csv',
sparse_return='category')
X_train = torch.tensor(X_train, dtype=torch.float32)
X_test = torch.tensor(X_test, dtype=torch.float32)
y_train = torch.tensor(y_train, dtype=torch.float32)
y_test = torch.tensor(y_test, dtype=torch.float32)
# 参数
k = 8
n_epoch = 10
lr = 0.01
# 初始化
model = FFM_Layer(dense_features=feature_info['dense_feature'], sparse_features=feature_info['sparse_feature'],
sparse_feature_dim=feature_info['max_one_hot_dim'], k=k)
optim = SGD(lr=lr, params=model.parameters(), weight_decay=1e-4)
criterion = F.binary_cross_entropy
# 训练模型
for epoch in range(n_epoch):
model.train()
logits = torch.reshape(model(X_train), (-1,))
loss = criterion(logits, y_train)
# 更新权重
optim.zero_grad() # 清除累计梯度
loss.backward()
optim.step()
if epoch % 1 == 0 and epoch:
print('epoch: {}, loss: {}'.format(epoch, loss))
# 模型评估
model.eval()
with torch.no_grad():
pred = torch.reshape(model(X_test), (-1,))
loss = criterion(pred, y_test)
pred = [1 if x > 0.5 else 0 for x in pred]
print('acc: {}, loss: {}'.format(accuracy_score(y_test, pred), loss))
|
KrianJ/CtrEstimate
|
predict_ffm_torch.py
|
predict_ffm_torch.py
|
py
| 1,739 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "utils.load_data.load_criteo_data",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "models_torch.FFM.FFM_Layer",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.optim.SGD",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.binary_cross_entropy",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "torch.reshape",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torch.reshape",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 43,
"usage_type": "call"
}
] |
10695567948
|
import subprocess
from multiprocessing import Pool
import os
import numpy as np
import sys
def Thread(arg):
print(arg)
file = open('output/' + str(0) + '.log', 'w')
subprocess.call(arg, shell=True, stdout=file)
def main():
seed = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
batch = np.array([10, 50, 100, 200])
# batch = batch.repeat(9)
batch = np.tile(batch, 9)
hidden = np.array([25, 50, 100])
hidden = hidden.repeat(4)
hidden = np.tile(hidden, 3)
optim = {0: 'adam', 1: 'adagrad', 2: 'adadelta', 3: 'sgd'}
op_idx = np.array([0, 1, 2, 3])
op_idx = op_idx.repeat(12)
lr = np.array([0.1, 0.01, 0.001])
ed_pass = np.array([4, 8, 10])
idx = [x for x in range(36)]
arglist = []
st = int(sys.argv[1])
print(st)
end = int(sys.argv[2])
print(end)
for i in range(st, end):
opt_st = optim[op_idx[i]]
pcmd = "python dt_pl_parser.py --train data/wsj10_tr --tag_num 1 --hidden " + str(
hidden[i]) + " " + "--batch " + str(
batch[i]) + " " + "--optim " + opt_st + " " + "--do_eval --use_trigram " + "--sample_idx " + str(idx[i])
arglist.append(pcmd)
print(pcmd)
p = Pool(4)
p.map(Thread, arglist, chunksize=1)
p.close()
p.join()
if __name__ == '__main__':
main()
|
mikufan/NCRFAE_DepParsing
|
noderun_pl_model.py
|
noderun_pl_model.py
|
py
| 1,323 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "subprocess.call",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.tile",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.tile",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 44,
"usage_type": "call"
}
] |
25316393069
|
from typing import List, Set, Callable, Optional, Iterator
import math
class Tile:
def __init__(self, tile: List[str], tile_id: int = 0):
self.tile = tile
self.id = tile_id
self.edge_len = len(tile)
def right_edge(self) -> str:
return "".join(t[-1] for t in self.tile)
def left_edge(self) -> str:
return "".join(t[0] for t in self.tile)
def top_edge(self) -> str:
return self.tile[0]
def bottom_edge(self) -> str:
return self.tile[-1]
def rotate_right(self) -> None:
rotated = []
for ix in range(self.edge_len):
rotated.append(
"".join(
[
self.tile[self.edge_len - jx - 1][ix]
for jx in range(self.edge_len)
]
)
)
self.tile = rotated
def flip(self) -> None:
flipped = []
for t in self.tile[::-1]:
flipped.append(t)
self.tile = flipped
def check(order: List[Tile], tile: Tile, edge_size: int) -> bool:
return (
False
if (
(len(order) + 1) % edge_size != 1
and tile.left_edge() != order[len(order) - 1].right_edge()
)
or (
len(order) >= edge_size
and tile.top_edge() != order[len(order) - edge_size].bottom_edge()
)
else True
)
reassemble: List[Callable[[Tile], Optional[Tile]]] = [
lambda tile: tile,
lambda tile: tile.rotate_right(),
lambda tile: tile.rotate_right(),
lambda tile: tile.rotate_right(),
lambda tile: tile.flip(),
lambda tile: tile.rotate_right(),
lambda tile: tile.rotate_right(),
lambda tile: tile.rotate_right(),
]
def recursion(
order: List[Tile], visited: Set[Tile], tiles: List[Tile], edge_size: int
) -> List[Tile]:
if len(order) == len(tiles):
return order
result = []
for tile in tiles:
if tile not in visited:
for r in reassemble:
r(tile)
if check(order, tile, edge_size):
result = recursion(
order + [tile], visited.union({tile}), tiles, edge_size
)
if result:
return result
return result
def part1(tiles: List[Tile]) -> int:
size = len(tiles)
edge_size = int(math.sqrt(size))
order = recursion([], set(), tiles, edge_size)
upper_left = 0
upper_right = edge_size - 1
bottom_left = size - edge_size
bottom_right = size - 1
return (
order[upper_left].id
* order[upper_right].id
* order[bottom_left].id
* order[bottom_right].id
)
def extract_data(lines: List[str]) -> Iterator[Tile]:
tile: List[str] = []
for line in lines + [""]:
if "Tile" in line:
tile_id = int(line.split()[1].strip(":"))
elif line:
tile += [line]
elif tile:
yield Tile(tile, tile_id)
tile = []
with open("input") as input_file:
lines = [line for line in input_file.read().splitlines()]
print(part1(list(extract_data(lines))))
|
stx73/aoc2020
|
day20/p1.py
|
p1.py
|
py
| 3,211 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.List",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "typing.Set",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "math.sqrt",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "typing.Iterator",
"line_number": 108,
"usage_type": "name"
}
] |
24905708193
|
import cadquery as cq
import cadquery.selectors as cqs
import logging
import importlib
import utilities # TODO: Change to a relative import ".utilities" to preempt name clashes.
from types import SimpleNamespace as Measures
from math import sin, cos, radians
# A parametric cover that can be hooked to the top edge of an eyeglasses lens.
#
# You might want to reduce the amount of light reaching the eye even more for practical use. For
# that, you can drill chains of small holes through the top face and the inclined face at the
# bottom and then sew flexible black material there. Cut the material so that it seals well against
# the user's face.
#
# To use this design, you need Python, CadQuery (https://cadquery.readthedocs.io/en/latest/) and
# ideally also CQ-Editor, the CadQuery IDE (https://github.com/CadQuery/CQ-editor).
#
# License: Unlicence and Creative Commons Public Domain Dedication (CC-0).
#
# Tasks for now
# TODO: Rework the design so that it consists of a sweep operation along a single path, with wires
# automatically swept orthogonal to the path. Wires are then defined as a parametrized hook
# profile together with a position along that path in percent or millimeters from both ends.
# In addition, the type of transition (ruled or round) between wires should be defined. The
# problem is of course that sweeps always interpolate between wires with splines. So probably
# lofting should be used, and the rounded path is only there to place the wires exactly, not to
# sweep them along. The rounding is achieved by choosing round transition for lofting.
# TODO: Replace the upper hook bar over the lens with two narrow hooks, also a bit shorter than now.
# TODO: Remove the hook along the current stem cover part, keeping the hook infill though. This
# should help attaching and detaching the lens cover.
# TODO: Round the lower back corner, so it cannot poke into the head. This can be done by using
# three profiles with "round" transition for lofting.
# TODO: Cut off the lower right corner according to the manually cut prototype. This can be done
# by using three profiles with "round" transition for lofting.
# TODO: Add a top surface light blocker, carved according to the face contour. The shape should be
# an arc, with the distance between arc and outer corner being 25 mm. Can be created from the
# lens cover path and this arc, then extruding that face by 1.6 mm. Since this can easily be
# 3D printed, there is no reason to use other material.
#
# Tasks for later
# TODO: Add documentation for all methods.
# TODO: Reduce the size of this script by a lot by replacing all the *_start_wire() and *_end_wire()
# methods with calls to a more general method. This requires proper specification of position
# and rotation at once, possibly also by combining multiple other such positions and rotations.
# In CadQuery, the Location type is for that (or maybe it has a different name, but there is one).
# TODO: Add the general edge chamfering. That's very impractical though for a design like this,
# as edge selection is very difficult here.
# TODO: Add sewing holes along the upper horizontal surface and on the lower 45° angled surface,
# for sewing on flexible parts that block out light entering from below and above the glasses.
# TODO: Implement vertical arcing for the outside of the cover. But not really needed in practice.
# TODO: Add the chamfers to the lower corner of the lens.
# TODO: Improve how space is made for the stem-to-lens frame element of 1.2 mm additional thickness.
# TODO: Add a clip that connects to the bottom of the glasses lens. Can be done by
# adjusting the shape of the "hook" profile used for the sweep.
# TODO: Replace the bent section between lens and stem cover with a spline that smoothly
# continues to both the lens and stem cover sections.
measures = Measures(
part_name = "lens_cover",
color = "steelblue",
alpha = 0.0,
debug = True,
side = "left", # "left" or "right". TODO: Implement "right" using mirroring.
thickness = 1.6, # For FDM, that's 4 walls with a 0.4 mm nozzle. Corrected from 0.8.
edge_smoothing = 0.4, # For all edges, to make them nicer to the touch.
lens_cover = Measures(
# Lens width 58.3 mm + stem corner width 5.6 mm - stem cover width 6.1 mm.
# Corrected from 55.5 mm
width = 57.8,
height = 35.3, # Corrected from 33.5
vertical_arc_height = 1.7, # TODO: Implement that this is utilized, then reduce hook_depth.
horizontal_arc_height = 2.3,
# Only small radii possible due to a bug. Cornercase radii may may result in non-manifoldness.
lower_corner_radius = 2.0,
hook_depth = 4.6, # Lens thickness 2.9 mm, vertical arc height 1.7 mm.
hook_height = 8.0,
frame_attachment_depth = 1.4, # Provides additional hook depth at outer side of lens, for frame.
overhang_angle_start = 45,
# Visually adapted to achieve the same lower endpoint position compared to a shape with
# frame_attachment_depth = 0.
overhang_angle_end = 48,
overhang_size_start = 7.0,
# Visually adapted to achieve the same lower endpoint position compared to a shape with
# frame_attachment_depth = 0.
overhang_size_end = 7.5
),
corner_cover = Measures(
height = 35.3,
hook_depth = 5.0, # Adapted visually to create a corner. Corrected from 7.0.
# TODO: Calculate hook_height always automatically as the midpoint between lens cover and
# hinge cover height, as when forgetting to do this manually, the interpolation can create
# shapes that let the lofts partially fail.
hook_height = 8.0, # Midpoint between lens cover and hinge cover hook heights.
hook_height_infill = 2.7, # Midpoint between lens cover and stem cover hook heights. Avoids interpolation issues.
overhang_angle = 45,
overhang_size = 7.0
),
hinge_cover = Measures(
depth = 18.0, # Measured from the lens cover back plane.
height = 35.3,
path_angle = 100,
lower_corner_radius = 12.0,
hook_depth = 4.5, # Measured glasses stem width is 3.8 mm.
hook_height = 8.0,
hook_height_infill = 5.4,
overhang_angle = 45,
overhang_size = 7.0
),
stem_cover = Measures(
depth = 22.0, # Measured from the lens cover back plane.
height = 35.3,
path_angle = 100,
lower_corner_radius = 12.0,
hook_depth = 4.5, # Measured glasses stem width is 3.8 mm.
hook_height = 14.0,
hook_height_infill = 5.4,
overhang_angle = 45,
overhang_size = 7.0
),
)
# Selective reloading to pick up changes made between script executions.
# See: https://github.com/CadQuery/CQ-editor/issues/99#issue-525367146
importlib.reload(utilities)
class LensCover:
def __init__(self, workplane, measures):
"""
A parametric eye cover that can be hooked to the top edge of eyeglasses.
:param workplane: The CadQuery workplane to create the eye cover on. This workplane is
assumed to be coplanar with the face of the eyeglass user, with the plane's normal
pointing into the "front" direction of the model.
:param measures: The measures to use for the parameters of this design. Expects a nested
[SimpleNamespace](https://docs.python.org/3/library/types.html#types.SimpleNamespace)
object. See example above for the possible attributes.
"""
self.model = workplane
self.measures = measures
self.log = logging.getLogger(__name__)
m = self.measures
# Points on the sweep path that we'll need repeatedly.
m.lens_startpoint = (0, 0)
# We create a space for the rounded edge that is 60-70% of the wrap radius, to achieve a
# smooth shape transition for angles slightly larger than 90°.
m.lens_endpoint = (-m.lens_cover.width, 0)
m.hinge_startpoint = (-m.lens_cover.width, -m.lens_cover.hook_depth - 2 * m.thickness)
# toTuple() yields a (x,y,z) coordinate, but we only want (x,y) here.
# When slicing in Python "[0:2]", the specified end element (index 2) will not be in the result.
m.stem_startpoint =self.hinge_path().val().positionAt(1).toTuple()[0:2]
self.build()
def profile_wire(self, height, hook_depth, hook_height, hook_height_infill = 0.1,
overhang_angle = 90, overhang_size = 0.1, debug_name = None
):
"""
Object of class Wire, representing the base shape of the hook.
A multi-section sweep requires wires placed along the path to use for the shape-adapted
sweeping. These wires should be orthogonal to the path to get the desired shape.
"""
# hook_height_infill is by default 0.1 just because the CAD kernel cannot handle 0 here.
# TODO: Create a profile with a curved section. Proposal: Use swipe() and
# convert a face of the resulting 3D shape back into a wire.
m = self.measures
# Remember that translate() uses global (!) coordinates.
wire = (
cq.Workplane("YZ")
# Covering outer element of the profile.
.rect(m.thickness, height, forConstruction = True)
.translate((0, -0.5 * m.thickness, -0.5 * height))
.toPending()
# Horizontal element of the hook, including hook infill if any.
.copyWorkplane(cq.Workplane("YZ"))
.rect(hook_depth + 2 * m.thickness, m.thickness + hook_height_infill, forConstruction = True)
.translate((0, -0.5 * (hook_depth + 2 * m.thickness), -0.5 * (m.thickness + hook_height_infill)))
.toPending()
# Vertical element of the hook with the tip.
.copyWorkplane(cq.Workplane("YZ"))
.rect(m.thickness, hook_height + m.thickness, forConstruction = True)
# -0.499 instead of -0.5 due to a malfunction of union_pending() when having a complete
# intersection in this corner. Strangely, only for this corner.
.translate((0, -hook_depth - 1.5 * m.thickness, -0.499 * (hook_height + m.thickness)))
.toPending()
# Overhang at the bottom of the profile shape.
.copyWorkplane(cq.Workplane("YZ"))
.rect(m.thickness, overhang_size, forConstruction = True)
# 0.499 because otherwise union_pending() cannot create a correct result. This happens due to
# the CAD kernel limitations of unioning shapes that share one corner exactly.
.translate((0, -0.5 * m.thickness, -height - 0.499 * overhang_size))
.rotate((1, 0, -height), (-1, 0, -height), overhang_angle)
.toPending()
.union_pending()
.ctx.pendingWires[0]
)
if m.debug and debug_name is not None:
showable_wire = cq.Workplane().newObject([wire]).wires().val()
show_object(showable_wire, name = debug_name)
return wire
# Wire at the start of the sweep, defining the lens cover cross-section next to the nose.
def lens_start_wire(self):
m = self.measures
wire = (
cq.Workplane().newObject([
self.profile_wire(
height = m.lens_cover.height,
hook_depth = m.lens_cover.hook_depth,
hook_height = m.lens_cover.hook_height,
overhang_angle = m.lens_cover.overhang_angle_start,
overhang_size = m.lens_cover.overhang_size_start
)
])
.wires()
.val()
)
if m.debug: show_object(wire, name = "lens_start_wire")
return wire
# Wire at the end of the lens / start of the bent section.
# Position is slightly approximate as it treats the path as made from straight lines.
def lens_end_wire(self):
m = self.measures
wire = (
cq.Workplane().newObject([self.profile_wire(
height = m.lens_cover.height,
hook_depth = m.lens_cover.hook_depth + m.lens_cover.frame_attachment_depth,
hook_height = m.lens_cover.hook_height,
overhang_angle = m.lens_cover.overhang_angle_end,
overhang_size = m.lens_cover.overhang_size_end
)])
.translate((*m.lens_endpoint, 0))
.translate((0, 1.4, 0)) # TODO: Make this parametric.
.val()
)
if m.debug: show_object(wire, name = "lens_end_wire")
return wire
# Wire at the end of the lens / start of the bent section.
# Position is slightly approximate as it treats the path as made from straight lines.
def corner_center_wire(self):
m = self.measures
wire = (
cq.Workplane().newObject([self.profile_wire(
height = m.corner_cover.height,
hook_depth = m.corner_cover.hook_depth,
hook_height = m.corner_cover.hook_height,
hook_height_infill = m.corner_cover.hook_height_infill,
overhang_angle = m.corner_cover.overhang_angle,
overhang_size = m.corner_cover.overhang_size
)])
# Move the wire to the +y part so we can rotate around origin to rotate around the
# back edge.
.translate((0, m.corner_cover.hook_depth + 2 * m.thickness, 0))
# Rotate around the back edge of the initial wire, now at origin.
# Rotate by half the angle that the hinge start wire will have.
.rotate((0, 0, 1), (0, 0, -1), 0.5 * (-90 + (m.hinge_cover.path_angle - 90)))
# Bring the wire into its final position.
.translate((*m.lens_endpoint, 0))
.translate((0, -m.lens_cover.hook_depth - 2 * m.thickness, 0))
.val()
)
if m.debug: show_object(wire, name = "corner_center_wire")
return wire
# Wire at the start of the stem cover / end of the bent section.
# Position is slightly approximate as it treats the path as made from straight lines.
def hinge_start_wire(self):
m = self.measures
wire = (
cq.Workplane().newObject([self.profile_wire(
height = m.hinge_cover.height,
hook_depth = m.hinge_cover.hook_depth,
hook_height = m.hinge_cover.hook_height,
hook_height_infill = m.hinge_cover.hook_height_infill,
overhang_angle = m.hinge_cover.overhang_angle,
overhang_size = m.hinge_cover.overhang_size
)])
.wires()
# Rotate around the back (-y) edge of the initial wire.
.rotate(
(0, -m.hinge_cover.hook_depth - 2 * m.thickness, 1),
(0, -m.hinge_cover.hook_depth - 2 * m.thickness, -1),
-90 + (m.hinge_cover.path_angle - 90)
)
# Move so that the original back edge is at the origin, to prepare the move along the path.
.translate((0, m.hinge_cover.hook_depth + 2 * m.thickness, 0))
# Easiest to find the point at the very start of the path is via positionAt(0)
.translate(self.hinge_path().val().positionAt(0).toTuple())
.val()
)
if m.debug: show_object(wire, name = "hinge_start_wire")
return wire
def hinge_end_wire(self):
m = self.measures
wire = (
cq.Workplane().newObject([self.profile_wire(
height = m.hinge_cover.height,
hook_depth = m.hinge_cover.hook_depth,
hook_height = m.hinge_cover.hook_height,
hook_height_infill = m.hinge_cover.hook_height_infill,
overhang_angle = m.hinge_cover.overhang_angle,
overhang_size = m.hinge_cover.overhang_size
)])
.wires()
# Rotate around the back (-y) edge of the initial wire.
.rotate(
(0, -m.hinge_cover.hook_depth - 2 * m.thickness, 1),
(0, -m.hinge_cover.hook_depth - 2 * m.thickness, -1),
-90 + (m.hinge_cover.path_angle - 90)
)
# Move so that the original back edge is at the origin, to prepare the move along the path.
.translate((0, m.hinge_cover.hook_depth + 2 * m.thickness, 0))
# Easiest to find the point at the very end of the path is via positionAt(1)
.translate(self.hinge_path().val().positionAt(1).toTuple())
.val()
)
if m.debug: show_object(wire, name = "hinge_end_wire")
return wire
def stem_start_wire(self):
m = self.measures
wire = (
cq.Workplane().newObject([self.profile_wire(
height = m.stem_cover.height,
hook_depth = m.stem_cover.hook_depth,
hook_height = m.stem_cover.hook_height,
hook_height_infill = m.stem_cover.hook_height_infill,
overhang_angle = m.stem_cover.overhang_angle,
overhang_size = m.stem_cover.overhang_size
)])
.wires()
# Rotate around the back (-y) edge of the initial wire.
.rotate(
(0, -m.stem_cover.hook_depth - 2 * m.thickness, 1),
(0, -m.stem_cover.hook_depth - 2 * m.thickness, -1),
-90 + (m.stem_cover.path_angle - 90)
)
# Move so that the original back edge is at the origin, to prepare the move along the path.
.translate((0, m.stem_cover.hook_depth + 2 * m.thickness, 0))
# Easiest to find the point at the very beginning of the path is via positionAt(0)
# But not exactly at the beginning as that would place the wire into the same position
# as the hinge end wire, and we can't loft wires in the same position.
.translate(self.stem_path().val().positionAt(0.01).toTuple())
.val()
)
if m.debug: show_object(wire, name = "stem_end_wire")
return wire
def stem_end_wire(self):
m = self.measures
wire = (
cq.Workplane().newObject([self.profile_wire(
height = m.stem_cover.height,
hook_depth = m.stem_cover.hook_depth,
hook_height = m.stem_cover.hook_height,
hook_height_infill = m.stem_cover.hook_height_infill,
overhang_angle = m.stem_cover.overhang_angle,
overhang_size = m.stem_cover.overhang_size
)])
.wires()
# Rotate around the back (-y) edge of the initial wire.
.rotate(
(0, -m.stem_cover.hook_depth - 2 * m.thickness, 1),
(0, -m.stem_cover.hook_depth - 2 * m.thickness, -1),
-90 + (m.stem_cover.path_angle - 90)
)
# Move so that the original back edge is at the origin, to prepare the move along the path.
.translate((0, m.stem_cover.hook_depth + 2 * m.thickness, 0))
# Easiest to find the point at the very end of the path is via positionAt(1)
.translate(self.stem_path().val().positionAt(1).toTuple())
.val()
)
if m.debug: show_object(wire, name = "stem_end_wire")
return wire
def lens_path(self):
"""
The sweeping path follows the planar upper edge of the eye cover shape.
Points are defined in the XY plane, drawing a cover for the left lens from origin to -x.
"""
m = self.measures
path = (
cq
.Workplane("XY")
.moveTo(*m.lens_startpoint)
.sagittaArc(m.lens_endpoint, -m.lens_cover.horizontal_arc_height)
.wire() # Since we don't want a closed wire, close() will not create the wire. We have to.
)
if m.debug: show_object(path, name = "lens_path")
return path
def hinge_path(self):
m = self.measures
path = (
cq
.Workplane("XY")
.moveTo(*m.hinge_startpoint)
.polarLine(m.hinge_cover.depth, 360 - m.hinge_cover.path_angle)
.wire() # Since we don't want a closed wire, close() will not create the wire. We have to.
)
if m.debug: show_object(path, name = "hinge_path")
return path
def stem_path(self):
m = self.measures
path = (
cq
.Workplane("XY")
.moveTo(*m.stem_startpoint)
.polarLine(m.stem_cover.depth, 360 - m.stem_cover.path_angle)
.wire() # Since we don't want a closed wire, close() will not create the wire. We have to.
)
if m.debug: show_object(path, name = "stem_path")
return path
def build(self):
cq.Workplane.union_pending = utilities.union_pending
m = self.measures
# Sweeping along the path sections.
# Due to CadQuery issue #808 (https://github.com/CadQuery/cadquery/issues/808), we cannot
# simply do one multi-section sweep along a single path with all six wires along it.
# And, the default transition = "right" would crash CadQuery-Editor due to a CAD kernel bug.
lens_cover = cq.Workplane("YZ")
lens_cover.ctx.pendingWires.extend([
self.lens_start_wire(),
self.lens_end_wire()
])
lens_cover = lens_cover.sweep(
self.lens_path(),
multisection = True,
transition = "round"
)
corner_cover = cq.Workplane("YZ")
corner_cover.ctx.pendingWires.extend([
self.lens_end_wire(),
self.corner_center_wire(),
self.hinge_start_wire()
])
corner_cover = corner_cover.loft()
hinge_and_stem_cover = cq.Workplane("YZ")
hinge_and_stem_cover.ctx.pendingWires.extend([
self.hinge_start_wire(),
self.hinge_end_wire(),
self.stem_start_wire(),
self.stem_end_wire()
])
hinge_and_stem_cover = hinge_and_stem_cover.loft(ruled = True)
# The internal combine function of loft() and sweep() is a bit fragile, so instead to obtain
# a singel solid we created the individual parts first and then union() them together here.
self.model = (
cq.Workplane("YZ")
.union(lens_cover)
.union(corner_cover)
.union(hinge_and_stem_cover)
)
# Rounding the lower corners.
# TODO: Reimplement this, as it does not work when having the 45° overhang at the bottom.
# self.model = (
# self.model
#
# # Rounding the lower corner of the lens cover.
# .faces(">X")
# .edges("<Z")
# # TODO: Fix that only small radii are possible here. This is probably because the part
# # is curved.
# .fillet(m.lens_cover.lower_corner_radius)
#
# # Rounding the lower corner of the stem cover.
# .faces("<Y")
# .edges("<Z")
# .fillet(m.stem_cover.lower_corner_radius)
# )
# =============================================================================
# Part Creation
# =============================================================================
part = LensCover(cq.Workplane(), measures)
show_options = {"color": measures.color, "alpha": measures.alpha}
show_object(part.model, name = measures.part_name, options = show_options)
|
tanius/cadquery-models
|
lenscover/lens_cover.py
|
lens_cover.py
|
py
| 23,912 |
python
|
en
|
code
| 11 |
github-code
|
6
|
[
{
"api_name": "types.SimpleNamespace",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "types.SimpleNamespace",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "types.SimpleNamespace",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "types.SimpleNamespace",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "types.SimpleNamespace",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "importlib.reload",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "cadquery.Workplane",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "cadquery.Workplane",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "cadquery.Workplane",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "cadquery.Workplane",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "cadquery.Workplane",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "cadquery.Workplane",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "cadquery.Workplane",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "cadquery.Workplane",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "cadquery.Workplane",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "cadquery.Workplane",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "cadquery.Workplane",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "cadquery.Workplane",
"line_number": 392,
"usage_type": "call"
},
{
"api_name": "cadquery.Workplane",
"line_number": 426,
"usage_type": "call"
},
{
"api_name": "cadquery.Workplane",
"line_number": 441,
"usage_type": "call"
},
{
"api_name": "cadquery.Workplane",
"line_number": 456,
"usage_type": "call"
},
{
"api_name": "cadquery.Workplane",
"line_number": 468,
"usage_type": "attribute"
},
{
"api_name": "utilities.union_pending",
"line_number": 468,
"usage_type": "attribute"
},
{
"api_name": "cadquery.Workplane",
"line_number": 475,
"usage_type": "call"
},
{
"api_name": "cadquery.Workplane",
"line_number": 486,
"usage_type": "call"
},
{
"api_name": "cadquery.Workplane",
"line_number": 494,
"usage_type": "call"
},
{
"api_name": "cadquery.Workplane",
"line_number": 506,
"usage_type": "call"
},
{
"api_name": "cadquery.Workplane",
"line_number": 535,
"usage_type": "call"
}
] |
39048517647
|
import re
import logging
from datetime import datetime, timezone
__all__ = ('datetime_to_ns',)
logger = logging.getLogger('aionationstates')
class DataClassWithId:
def __eq__(self, other):
# Ids in NS are pretty much always not globally unique.
if type(self) is not type(other):
return NotImplemented
return self.id == other.id
def __hash__(self):
return hash((self.id,))
def __repr__(self):
return f'<{self.__class__.__name__} id={self.id}>'
def normalize(identifier):
identifier = identifier.lower().replace(' ', '_')
if not re.match('^[a-z0-9_-]+$', identifier):
raise ValueError(f'provided identifier {identifier} contains invalid'
' characters.')
return identifier
def banner_url(id):
return f'https://www.nationstates.net/images/banners/{id}.jpg'
def timestamp(line):
return datetime.utcfromtimestamp(int(line))
def utc_seconds(datetime_):
return int(datetime_.replace(tzinfo=timezone.utc).timestamp())
def unscramble_encoding(text):
"""This is a workaround for a bug in the NS server-side code.
(This entire lib is, honestly.)
Specifically, somewhere in the process W-1252 encoded text is
wrongly interpreted to be ISO-8859-1, resulting in *some* characters
being deterministically unintentionally replaced with useless to the
user Unicode control chars.
This is a very common problem. Common enough, in fact, to be
accounted for in the HTML treatment of Character References as
defined by the specification. Well, it is technically a parse
error, but nobody really cares since the correct, expected character
is returned. For this reason, the bug is not present (or at least
not visible) on the NS web interface, and only shows itself when
dealing with the API.
Interestingly enough, these characters are not always serialized as
NCRs, in the dispatch CDATA they are represented literally, meaning
that even modifying the XML parser to include a bit of HTML leniency
would not be enough. Not that anyone would do that regardless.
This function reverses the process, substiuting the unprintable mess
returned by NS for the Unicode characters it must have originated
from.
It's a bit ugly, but gets the job done.
"""
return text.translate(unscramble_table)
unscramble_table = str.maketrans({
'\u0080': '\N{EURO SIGN}',
'\u0082': '\N{SINGLE LOW-9 QUOTATION MARK}',
'\u0083': '\N{LATIN SMALL LETTER F WITH HOOK}',
'\u0084': '\N{DOUBLE LOW-9 QUOTATION MARK}',
'\u0085': '\N{HORIZONTAL ELLIPSIS}',
'\u0086': '\N{DAGGER}',
'\u0087': '\N{DOUBLE DAGGER}',
'\u0088': '\N{MODIFIER LETTER CIRCUMFLEX ACCENT}',
'\u0089': '\N{PER MILLE SIGN}',
'\u008A': '\N{LATIN CAPITAL LETTER S WITH CARON}',
'\u008B': '\N{SINGLE LEFT-POINTING ANGLE QUOTATION MARK}',
'\u008C': '\N{LATIN CAPITAL LIGATURE OE}',
'\u008E': '\N{LATIN CAPITAL LETTER Z WITH CARON}',
'\u0091': '\N{LEFT SINGLE QUOTATION MARK}',
'\u0092': '\N{RIGHT SINGLE QUOTATION MARK}',
'\u0093': '\N{LEFT DOUBLE QUOTATION MARK}',
'\u0094': '\N{RIGHT DOUBLE QUOTATION MARK}',
'\u0095': '\N{BULLET}',
'\u0096': '\N{EN DASH}',
'\u0097': '\N{EM DASH}',
'\u0098': '\N{SMALL TILDE}',
'\u0099': '\N{TRADE MARK SIGN}',
'\u009A': '\N{LATIN SMALL LETTER S WITH CARON}',
'\u009B': '\N{SINGLE RIGHT-POINTING ANGLE QUOTATION MARK}',
'\u009C': '\N{LATIN SMALL LIGATURE OE}',
'\u009E': '\N{LATIN SMALL LETTER Z WITH CARON}',
'\u009F': '\N{LATIN CAPITAL LETTER Y WITH DIAERESIS}',
})
class aobject:
"""Inheriting this class allows you to define an async __init__.
Code shamelessly ripped from StackOverflow.
Before getting angry at me for abusing python features, remind
yourself that all async/await code is already an abuse of generators
and embrace the simple truth that practicality beats purity.
"""
async def __new__(cls, *a, **kw):
instance = super().__new__(cls)
await instance.__init__(*a, **kw)
return instance
async def __init__(self):
pass
def actually_synchronous(async_function):
def wrapper(*args, **kwargs):
coro_object = async_function(*args, **kwargs)
try:
coro_object.send(None)
except StopIteration as e:
return e.value
else:
raise TypeError("the function supplied isn't actually synchronous")
return wrapper
async def alist(asyncgen):
return [item async for item in asyncgen]
def datetime_to_ns(then):
"""Transform a :any:`datetime.datetime` into a NationStates-style
string.
For example "6 days ago", "105 minutes ago", etc.
"""
if then == datetime(1970, 1, 1, 0, 0):
return 'Antiquity'
now = datetime.utcnow()
delta = now - then
seconds = delta.total_seconds()
# There's gotta be a better way to do this...
years, seconds = divmod(seconds, 60*60*24*365)
days, seconds = divmod(seconds, 60*60*24)
hours, seconds = divmod(seconds, 60*60)
minutes, seconds = divmod(seconds, 60)
years = int(years)
days = int(days)
hours = int(hours)
minutes = int(minutes)
seconds = round(seconds)
if years > 1:
if days > 1:
return f'{years} years {days} days ago'
elif days == 1:
return '{years} years 1 day ago'
return '{years} years ago'
if years == 1:
if days > 1:
return f'1 year {days} days ago'
elif days == 1:
return '1 year 1 day ago'
return '1 year ago'
if days > 3:
return f'{days} days ago'
if days > 1:
if hours > 1:
return f'{days} days {hours} hours ago'
elif hours == 1:
return f'{days} days 1 hour ago'
return f'{days} days ago'
if days == 1:
if hours > 1:
return f'1 day {hours} hours ago'
elif hours == 1:
return '1 day 1 hour ago'
return '1 day ago'
if hours > 1:
return f'{hours} hours ago'
if hours == 1:
return f'{minutes + 60} minutes ago'
if minutes > 1:
return f'{minutes} minutes ago'
if minutes == 1:
return '1 minute ago'
return 'Seconds ago'
|
micha030201/aionationstates
|
aionationstates/utils.py
|
utils.py
|
py
| 6,383 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcfromtimestamp",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "datetime.timezone.utc",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "datetime.timezone",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 151,
"usage_type": "name"
}
] |
43724719541
|
from PyQt5.QtCore import QThread, QMutex, pyqtSignal
from binance.client import Client
import pyupbit
import pybithumb
import requests
from bs4 import BeautifulSoup
from debug import debuginfo
class binanceThread(QThread):
binance_data = pyqtSignal(dict)
def __init__(self):
QThread.__init__(self)
self.mutex = QMutex()
self.binance = Client()
self.binanceList = list()
self.exchange_rate = float(1100)
self.isRun = True
def delSymbol(self, symbol):
if symbol+"BTC" in self.binanceList:
self.binanceList.remove(symbol+"BTC")
def _start(self):
self.isRun = True
self.start()
def stop(self):
self.isRun = False
def get_symbol_list(self):
binanceList = list()
try:
for i in self.binance.get_all_tickers():
symbol = i['symbol']
if symbol[-3:] == 'BTC':
binanceList.append(symbol[:-3])
if symbol == 'BTCUSDT':
binanceList.append(symbol[:-4])
except Exception as e:
debuginfo(e)
pass
return binanceList
def save_list(self, list):
for i in list:
if i == 'BTC':
self.binanceList.append('BTCUSDT')
else:
self.binanceList.append(i+'BTC')
def get_dollor(self):
try:
res = requests.get('http://finance.naver.com/')
text = res.text
soup = BeautifulSoup(text, 'html.parser')
td = soup.select_one(
"#content > div.article2 > div.section1 > div.group1 > table > tbody > tr > td")
exchange_rate = ''
for i in td.text:
if i == ',':
pass
else:
exchange_rate += i
self.exchange_rate = float(exchange_rate)
except Exception as e:
debuginfo(e)
def get_prices(self):
prices = dict()
try:
for i in self.binance.get_all_tickers():
prices[i['symbol']] = i['price']
except Exception as e:
debuginfo(e)
pass
return prices
def get_orderbooks(self):
orderbooks = dict()
try:
for i in self.binance.get_orderbook_tickers():
orderbooks[i['symbol']] = dict()
orderbooks[i['symbol']]['bidPrice'] = i['bidPrice']
orderbooks[i['symbol']]['bidQty'] = i['bidQty']
orderbooks[i['symbol']]['askPrice'] = i['askPrice']
orderbooks[i['symbol']]['askQty'] = i['askQty']
except Exception as e:
debuginfo(e)
pass
return orderbooks
def calculate_krw(self, price, BTCUSDT, exchange_rate):
return str(round(float(price) * BTCUSDT * exchange_rate, 2))
def run(self):
while self.isRun:
self.mutex.lock()
binanceDict = dict()
self.get_dollor()
prices = self.get_prices()
orderbooks = self.get_orderbooks()
try:
BTCUSDT = float(prices['BTCUSDT'])
binanceDict['BTC'] = dict()
binanceDict['BTC']['price'] = str(round(BTCUSDT * self.exchange_rate, 2))
binanceDict['BTC']['ask'] = str(
round(float(orderbooks['BTCUSDT']['askPrice']) * self.exchange_rate, 2)) + '/' + str(
round(float(orderbooks['BTCUSDT']['askQty']), 2))
binanceDict['BTC']['bid'] = str(
round(float(orderbooks['BTCUSDT']['bidPrice']) * self.exchange_rate, 2)) + '/' + str(
round(float(orderbooks['BTCUSDT']['bidQty']), 2))
except Exception as e:
debuginfo(e)
for i in self.binanceList:
if i == 'BTCUSDT':
continue
try:
symbol = i.replace('BTC', '')
binanceDict[symbol] = dict()
binanceDict[symbol]['price'] = self.calculate_krw(prices[i], BTCUSDT, self.exchange_rate)
binanceDict[symbol]['ask'] = self.calculate_krw(orderbooks[i]['askPrice'], BTCUSDT, self.exchange_rate) + '/' + str(round(float(orderbooks[i]['askQty']), 2))
binanceDict[symbol]['bid'] = self.calculate_krw(orderbooks[i]['bidPrice'], BTCUSDT, self.exchange_rate) + '/' + str(round(float(orderbooks[i]['bidQty']), 2))
except Exception as e:
debuginfo(e)
pass
self.binance_data.emit(binanceDict)
self.mutex.unlock()
class upbitThread(QThread):
upbit_data = pyqtSignal(dict)
def __init__(self):
QThread.__init__(self)
self.mutex = QMutex()
self.upbit = pyupbit
self.upbitList = list()
self.isRun = True
def delSymbol(self, symbol):
if "KRW-"+symbol in self.upbitList:
self.upbitList.remove("KRW-"+symbol)
def _start(self):
self.isRun = True
self.start()
def stop(self):
self.isRun = False
def get_symbol_list(self):
upbitList = list()
try:
for i in self.upbit.get_tickers(fiat="KRW"):
upbitList.append(i.split('KRW-')[1])
except Exception as e:
debuginfo(e)
pass
return upbitList
def save_list(self, list):
for i in list:
self.upbitList.append('KRW-'+i)
def run(self):
while self.isRun:
self.mutex.lock()
upbitDict = dict()
prices = self.upbit.get_current_price(self.upbitList)
orderbooks = self.upbit.get_orderbook(self.upbitList)
if orderbooks and prices:
for i in orderbooks:
try:
symbol = i['market'].split('-')[1]
orderbook = i['orderbook_units'][0]
ask = str(orderbook['ask_price']) + '/' + str(round(orderbook['ask_size'], 2))
bid = str(orderbook['bid_price']) + '/' + str(round(orderbook['bid_size'], 2))
upbitDict[symbol] = dict()
upbitDict[symbol]['price'] = str(round(prices[i['market']], 2))
upbitDict[symbol]['ask'] = ask
upbitDict[symbol]['bid'] = bid
except Exception as e:
debuginfo(e)
self.upbit_data.emit(upbitDict)
self.mutex.unlock()
class bithumbThread(QThread):
bithumb_data = pyqtSignal(dict)
def __init__(self):
QThread.__init__(self)
self.mutex = QMutex()
self.bithumb = pybithumb.Bithumb
self.bithumbList = list()
self.isRun = True
def delSymbol(self, symbol):
if symbol in self.bithumbList:
self.bithumbList.remove(symbol)
def _start(self):
self.isRun = True
self.start()
def stop(self):
self.isRun = False
def get_symbol_list(self):
bithumbList = list()
try:
bithumbList = self.bithumb.get_tickers()
except Exception as e:
debuginfo(e)
pass
return bithumbList
def save_list(self, list):
self.bithumbList = list
def run(self):
while self.isRun:
self.mutex.lock()
bithumbDict = dict()
prices = self.bithumb.get_current_price('ALL')
orderbooks = self.bithumb.get_orderbook('ALL')
if orderbooks and prices:
orderbooks = orderbooks['data']
for i in self.bithumbList:
try:
price = prices[i]['closing_price']
orderbook = orderbooks[i]
ask = orderbook['asks'][0]['price'] + '/' + str(round(float(orderbook['asks'][0]['quantity']), 2))
bid = orderbook['bids'][0]['price'] + '/' + str(round(float(orderbook['bids'][0]['quantity']), 2))
bithumbDict[i] = dict()
bithumbDict[i]['price'] = price
bithumbDict[i]['ask'] = ask
bithumbDict[i]['bid'] = bid
except Exception as e:
debuginfo(e)
pass
self.bithumb_data.emit(bithumbDict)
self.mutex.unlock()
if __name__ == "__main__":
binance = binanceThread()
upbit = upbitThread()
bithumb = bithumbThread()
binanceList = binance.get_symbol_list()
upbitList = upbit.get_symbol_list()
bithumbList = bithumb.get_symbol_list()
binanceUpbitDuplicate = list()
binanceBithumbDuplicate = list()
upbitBithumbDuplicate = list()
for i in binanceList:
if i in upbitList:
binanceUpbitDuplicate.append(i)
if i in bithumbList:
binanceBithumbDuplicate.append(i)
for i in upbitList:
if i in bithumbList:
upbitBithumbDuplicate.append(i)
newBinanceList = list(set(binanceUpbitDuplicate+binanceBithumbDuplicate))
newUpbitList = list(set(binanceUpbitDuplicate+upbitBithumbDuplicate))
newBithumbList = list(set(binanceBithumbDuplicate+upbitBithumbDuplicate))
binance.save_list(newBinanceList)
upbit.save_list(newUpbitList)
bithumb.save_list(newBithumbList)
|
JunTae90/coin_viewer
|
thread.py
|
thread.py
|
py
| 9,535 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "PyQt5.QtCore.QThread",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSignal",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QThread.__init__",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QThread",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.QMutex",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "binance.client.Client",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "debug.debuginfo",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "debug.debuginfo",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "debug.debuginfo",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "debug.debuginfo",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "debug.debuginfo",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "debug.debuginfo",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QThread",
"line_number": 137,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSignal",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QThread.__init__",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QThread",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.QMutex",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "debug.debuginfo",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "debug.debuginfo",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QThread",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSignal",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QThread.__init__",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.QThread",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.QMutex",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "pybithumb.Bithumb",
"line_number": 198,
"usage_type": "attribute"
},
{
"api_name": "debug.debuginfo",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "debug.debuginfo",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "binance.client",
"line_number": 251,
"usage_type": "name"
},
{
"api_name": "binance.client.get_symbol_list",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "binance.client",
"line_number": 255,
"usage_type": "name"
},
{
"api_name": "binance.client.save_list",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "binance.client",
"line_number": 277,
"usage_type": "name"
}
] |
6806255656
|
"""
Пожалуйста, приступайте к этой задаче после того, как вы сделали и получили ревью ко всем остальным задачам
в этом репозитории. Она значительно сложнее.
Есть набор сообщений из чата в следующем формате:
```
messages = [
{
"id": "efadb781-9b04-4aad-9afe-e79faef8cffb",
"sent_at": datetime.datetime(2022, 10, 11, 23, 11, 11, 721),
"sent_by": 46, # id пользователя-отправителя
"reply_for": "7b22ae19-6c58-443e-b138-e22784878581", # id сообщение, на которое это сообщение является ответом (может быть None)
"seen_by": [26, 91, 71], # идентификаторы пользователей, которые видели это сообщение
"text": "А когда ревью будет?",
}
]
```
Так же есть функция `generate_chat_history`, которая вернёт список из большого количества таких сообщений.
Установите библиотеку lorem, чтобы она работала.
Нужно:
1. Вывести айди пользователя, который написал больше всех сообщений.
2. Вывести айди пользователя, на сообщения которого больше всего отвечали.
3. Вывести айди пользователей, сообщения которых видело больше всего уникальных пользователей.
4. Определить, когда в чате больше всего сообщений: утром (до 12 часов), днём (12-18 часов) или вечером (после 18 часов).
5. Вывести идентификаторы сообщений, который стали началом для самых длинных тредов (цепочек ответов).
Весь код стоит разбить на логические части с помощью функций.
"""
import random
import uuid
import datetime
from pprint import pprint
from collections import defaultdict
import lorem
def generate_chat_history():
messages_amount = random.randint(200, 1000)
users_ids = list(
{random.randint(1, 10000) for _ in range(random.randint(5, 20))}
)
sent_at = datetime.datetime.now() - datetime.timedelta(days=100)
messages = []
for _ in range(messages_amount):
sent_at += datetime.timedelta(minutes=random.randint(0, 240))
messages.append({
"id": uuid.uuid4(),
"sent_at": sent_at,
"sent_by": random.choice(users_ids),
"reply_for": random.choice(
[
None,
(
random.choice([m["id"] for m in messages])
if messages else None
),
],
),
"seen_by": random.sample(users_ids,
random.randint(1, len(users_ids))),
"text": lorem.sentence(),
})
return messages
def find_id_user_most_messages(messages: list) -> int:
messages_per_user = defaultdict(int)
for message in messages:
messages_per_user[message['sent_by']] += 1
max_messages = 0
user_ids_with_max_messages = 0
for key, value in messages_per_user.items():
if value > max_messages:
max_messages = value
user_ids_with_max_messages = [key]
elif value == max_messages:
user_ids_with_max_messages.append(key)
return user_ids_with_max_messages
def find_id_user_most_messages_replies(messages: list) -> int:
replies_per_message = defaultdict(int)
for message in messages:
if message['reply_for'] is None:
continue
replies_per_message[message['reply_for']] += 1
most_replied_messages = set()
most_replied_count = 0
for key, value in replies_per_message.items():
if value > most_replied_count:
most_replied_count = value
most_replied_messages = {key}
elif value == most_replied_count:
most_replied_messages.add(key)
most_replied_users = []
for message in messages:
if message['id'] in most_replied_messages:
most_replied_users.append(message['sent_by'])
return most_replied_users
def find_id_user_most_see_messages(messages: list) -> list:
users = defaultdict(set)
for message in messages:
if users.get(message['sent_by']) is None:
users[message['sent_by']] = set(message['seen_by'])
else:
users[message['sent_by']] = users[message['sent_by']].union(message['seen_by'])
most_see_message_user = []
max_len_seen_by = 0
for key, value in users.items():
if len(value) > max_len_seen_by:
most_see_message_user = []
max_len_seen_by = len(value)
most_see_message_user.append(key)
elif len(value) == max_len_seen_by:
most_see_message_user.append(key)
return most_see_message_user
def when_most_messages(messages: list) -> str:
count_morning = 0
count_day = 0
count_evening = 0
for message in messages:
time = message['sent_at']
time = time.time()
if datetime.time(0, 0, 0) < time < datetime.time(12, 0, 0):
count_morning += 1
elif datetime.time(12, 0, 0) <= time <= datetime.time(18, 0, 0):
count_day += 1
else:
count_evening += 1
if count_morning > count_day and count_morning > count_evening:
return 'Утром'
elif count_day > count_evening:
return 'Днем'
else:
return 'Вечером'
# вспомогательная функция для нахождения id сообщения отправителя
def find_id_message(messages: list, id_message) -> str:
for message in messages:
if message['id'] == id_message:
return message['reply_for']
def find_id_messages_which_have_most_threads(messages: list) -> list:
dict_result = defaultdict(int)
for message in messages:
if message['reply_for'] is None:
continue
else:
id_message = message['reply_for']
count_threads = 0
while True:
count_threads += 1
id_message_find = find_id_message(messages, id_message)
if id_message_find is None:
break
else:
id_message = id_message_find
dict_result[id_message] = count_threads
id_message = []
max_value = 0
for key, value in dict_result.items():
if value > max_value:
max_value = value
id_message.clear()
id_message.append(key)
elif value == max_value:
id_message.append(key)
return id_message
if __name__ == "__main__":
# pprint(generate_chat_history())
print(find_id_user_most_messages(generate_chat_history()))
print(find_id_user_most_messages_replies(generate_chat_history()))
print(find_id_user_most_see_messages(generate_chat_history()))
print(when_most_messages(generate_chat_history()))
print(find_id_messages_which_have_most_threads(generate_chat_history()))
|
hodakoov/basic_exercises
|
for_dict_challenges_bonus.py
|
for_dict_challenges_bonus.py
|
py
| 7,598 |
python
|
ru
|
code
| null |
github-code
|
6
|
[
{
"api_name": "random.randint",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "lorem.sentence",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "datetime.time",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "datetime.time",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 165,
"usage_type": "call"
}
] |
27465756937
|
import keras.backend as K
import tensorflow as tf
import cv2
import imageio
import numpy as np
def square_sum(x):
return K.sum(K.square(x), axis=-1, keepdims=True)
def euclSq(x):
x, y = x
x = K.batch_flatten(x)
y = K.batch_flatten(y)
return square_sum(x - y)
def l2_normalize(x):
inv_sqrt = 1. / K.sqrt(K.maximum(square_sum(x), 1e-6))
return x * inv_sqrt
def gram_matrix(x):
filters = x.shape[3]
size = x.shape[1]
V = K.reshape(x, (-1, size * size, 1, filters))
V = K.permute_dimensions(V, (0, 3, 2, 1))
VT = K.permute_dimensions(V, (0, 2, 1, 3))
return K.sum(V * VT, axis=3)
def triplet_loss(x):
return K.maximum(x[0] - x[1] + 1, 0)
def gram(x):
m, n = map(int, x.shape[2:])
G = gram_matrix(x)
return G / (4 * m**2 * n**2)
def get_image(filepath):
with open(filepath, 'rb') as f:
img = imageio.imread(f)
img = crop_resize(img)
return np.clip(img / 255, 0, 1)
def crop_resize(img):
height, width = img.shape[:2]
if height > width:
center = height // 2
up = center - width // 2
down = center + width // 2
img = img[up:down, :, :]
elif height < width:
center = width // 2
left = center - height // 2
right = center + height // 2
img = img[:, left:right, :]
img = cv2.resize(img, (256, 256), cv2.INTER_LANCZOS4)
return img
|
ebatuhankaynak/DeepPotato
|
src/util.py
|
util.py
|
py
| 1,405 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "keras.backend.sum",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "keras.backend.square",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "keras.backend.batch_flatten",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "keras.backend.batch_flatten",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "keras.backend.sqrt",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "keras.backend.maximum",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "keras.backend.reshape",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "keras.backend.permute_dimensions",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "keras.backend.permute_dimensions",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "keras.backend.sum",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "keras.backend.maximum",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "imageio.imread",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_LANCZOS4",
"line_number": 59,
"usage_type": "attribute"
}
] |
37407227644
|
from matplotlib import pyplot as plt
from findiff import FinDiff
import pandas as pd
import numpy as np
from tqdm import tqdm
id_col = 'ID'
date_col = 'DATE'
px_close = 'px_last'
px_high = 'px_high'
px_low = 'px_low'
px_open = 'px_open'
def find_derivative(series): #1 day interval
'''
Compute the first and second derivatives (1-step interval) of a given series.
Parameters
----------
series: np.array
series of values to find derivatives
Returns
-------
mom: np.array
first derivative
momacc: np.array
second derivative
Examples
--------
>>> series
array([6.85, 7.06, 7.31, 8. , 7.72, 7.27, 6.57, 7.66, 8.27, 8.31])
>>> mom, momacc = find_derivative(series)
>>> mom
array([ 0.19 , 0.23 , 0.47 , 0.205, -0.365, -0.575, 0.195, 0.85 , 0.325, -0.245])
>>> momacc
array([-0.36, 0.04, 0.44, -0.97, -0.17, -0.25, 1.79, -0.48, -0.57, -0.66])
'''
d_dx = FinDiff(0, 1, 1)
d2_dx2 = FinDiff(0, 1, 2)
clarr = np.asarray(series)
mom = d_dx(clarr)
momacc = d2_dx2(clarr)
return mom, momacc
def find_local_min_max(series, mom, momacc):
'''
Find local minimum and maximum points from a series
Parameters
----------
series: np.array
series of values to find derivatives
mom: np.array
first derivative
momacc: np.array
sescond derivative
Returns
-------
local_mins: dict
dictionary of index and value of local minimum of the series
local_max: dict
dictionary of index and value of local maximum of the series
Examples
--------
>>> series
array([6.85, 7.06, 7.31, 8. , 7.72, 7.27, 6.57, 7.66, 8.27, 8.31])
>>> local_mins, local_maxs = find_local_min_max(series, mom, momacc)
>>> local_mins
{6: 6.57}
>>> local_maxs
{3: 8.0, 9: 8.31}
'''
local_mins = []
local_maxs = []
for i in range(len(mom)-1):
series_dict = {i: series[i], i+1: series[i+1]}
if mom[i] <0 and mom[i+1]> 0:
local_mins.append(min(series_dict, key=series_dict.get))
elif mom[i] > 0 and mom[i+1]<0:
local_maxs.append(max(series_dict, key=series_dict.get))
elif mom[i] == 0 and momacc[i] >0:
local_mins.append(i)
elif mom[i] == 0 and momacc[i] <0:
local_maxs.append(i)
local_mins = {i : series[i] for i in local_mins}
local_maxs = {j : series[j] for j in local_maxs}
return local_mins, local_maxs
def get_state_local_min_max(dff, col = 'px_high', ma1 = 5, ma2 = 22):
'''
Main function to get trendline. NOTE: shifted one day late to avoid look-ahead bias
Step 1:
Label period as up and down based on the spread between short ma and long ma
i) short ma > long ma: up trend
ii) long ma > short ma: down trend
Label state when there is a change in state up - down / down - up
state 1, 2, 3, ...
Aggregate max or min of state.
Step 2:
Find local min and max points of the col input
Step 3:
Filter rows where local_max == max_in_state or local_min == min_in_state
Transform the rows into wide form, calculate the m, c that connects the two points
Parameters
----------
dff: DataFrame
stock df with DATE and ohlc prices, re-index to start from 0 is necessary
col: str
price high or price low. px_high to get resistance line (down trend), px_low to get support line (up trend)
ma1: int
short moving average period (in days)
ma2: int
long moving average period (in days)
Returns
-------
dff2: DataFrame
dataframe with ma_1st, ma_2nd, state and local_min/max
line_df: DataFrame
dataframe of the y equation, start and end period date of the support/resist line
'''
# dff['ma_1st'] = dff[col].rolling(ma1).mean()
# dff['ma_2nd'] = dff[col].rolling(ma2).mean()
dff['ma_1st'] = dff[col].ewm(span=ma1, min_periods = ma1, adjust=False).mean()
dff['ma_2nd'] = dff[col].ewm(span=ma2, min_periods = ma2, adjust=False).mean()
dff['spread'] = (dff['ma_1st'] - dff['ma_2nd']).shift()
dff.dropna(subset=['spread'], inplace=True)
dff.reset_index(drop=True, inplace=True)
dff['sign'] = dff['spread'].map(lambda x: 'up' if x>0 else 'down')
dff['state'] = (dff['sign']!=dff['sign'].shift()).astype(int).cumsum()
mom, momacc = find_derivative(dff[col].values)
local_mins, local_maxs = find_local_min_max(dff[col].values, mom, momacc)
return dff, local_mins, local_maxs
def refine_end_filter(end_filter_df, local_):
end_of_state=end_filter_df.groupby('state')[date_col].rank(ascending=False) ==1
end_filter_df.loc[end_of_state, local_] = None
end_filter_df[local_] = end_filter_df.groupby('state')[local_].ffill()
return end_filter_df.dropna()
def get_line(df, local_='local_maxs', start_='up', agg = 'max', m_increase = 1):
'''
local_ = 'local_maxs'
start_ = 'up'
agg = 'max'
m_increase = 1
'''
start_rule = df['sign'] == start_
start_filter = df[start_rule].copy()
start_filter = start_filter[start_filter[local_] == start_filter.groupby('state')[local_].transform(agg)]\
.reset_index()[[id_col,'index', date_col,'state',local_]]
start_filter = start_filter.assign(state=start_filter.state+1)
next_start_filter = start_filter.assign(next_start_dt=start_filter[date_col].shift(-1)).fillna(df[date_col].max())
cols = list(start_filter.columns)
start_filter.columns = ['start_'+i if i not in [id_col,'state'] else i for i in start_filter.columns]
end_rule = df['sign'] != start_
end_filter = df[end_rule].dropna(subset=[local_]).reset_index()
# end_filter = refine_end_filter(end_filter, local_)
start_end_filter = start_filter.merge(end_filter[cols], on=[id_col,'state'], how='left').dropna()\
.merge(next_start_filter[[id_col, 'state','next_start_dt']], on=[id_col, 'state'], how='left') #######
start_end_filter['m'] = (start_end_filter[local_] - start_end_filter['start_' + local_]) / \
(start_end_filter['index'] - start_end_filter['start_index'])
start_end_filter['c'] = start_end_filter[local_] - start_end_filter['m']*start_end_filter['index']
gradient_sign = (m_increase*start_end_filter['m'] < m_increase*start_end_filter.groupby('state')['m'].shift()).map(lambda x: 1 if not x else None)
start_end_filter['m'] = (start_end_filter['m'] * gradient_sign).ffill()
start_end_filter['c'] = (start_end_filter['c'] * gradient_sign).ffill()
start_end_filter['line_group'] = gradient_sign.cumsum().ffill()
start_end_filter = start_end_filter[m_increase*start_end_filter['m']<0].drop_duplicates(subset=[date_col], keep='last')
dff2 = df.merge(start_end_filter.drop('index',1),
on=[id_col,date_col,'state', local_], how='left').ffill()
fillins = (dff2[date_col]>dff2['next_start_dt']).map(lambda x: None if x else 1)
dff2['y'] = (dff2['m']*dff2.index + dff2['c'])*fillins
dff2['y2'] = dff2['m']*dff2.index + dff2['c']
dff2['cross'] = m_increase*dff2[px_close] > m_increase*dff2['y']
first_cross = dff2[dff2['cross']==True].reset_index().groupby('line_group')[date_col].first().reset_index().assign(first_cross=1)
dff2 = dff2.merge(first_cross, on=['line_group',date_col], how='left').drop('cross',1)
dff2['first_cross'] = dff2['first_cross'].fillna(0)
start_end_filter = start_end_filter.merge(first_cross.rename(columns={date_col:'cross_'+date_col}), on='line_group', how='left')
return dff2, start_end_filter
def _trendline_doc_string(original):
def wrapper(target):
target.__doc__ = original.__doc__
return target
return wrapper
@_trendline_doc_string(get_state_local_min_max)
def get_down_trendline(dff, col = 'px_high', ma1 = 5, ma2 = 22):
dff = dff.reset_index(drop=True)
dff, _, local_maxs = get_state_local_min_max(dff, col, ma1, ma2)
dff['local_maxs'] = dff.index.map(local_maxs)
dff2, line_df = get_line(dff, local_='local_maxs', start_='up', agg = 'max', m_increase = 1)
return dff2, line_df
@_trendline_doc_string(get_state_local_min_max)
def get_up_trendline(dff, col='px_low', ma1=5, ma2=22):
dff = dff.reset_index(drop=True)
dff, local_mins, _ = get_state_local_min_max(dff, col, ma1, ma2)
dff['local_mins'] = dff.index.map(local_mins)
dff2, line_df = get_line(dff, local_='local_mins', start_='down', agg = 'min', m_increase = -1)
return dff2, line_df
def cal_ret(price_df, col='px_last', ret_days=None, shift_days=0):
'''
Calculate the future return, i.e. forward return from today.
Will return NaN if the days in future not present yet
Parameters
----------
price_df: DataFrame
dataframe with stock prices
Returns
-------
price_df: DataFrame
dataframe with forward returns calculated
'''
if ret_days == None:
ret_days = [10, 30]
for d in ret_days:
price_df['%dD_return'%d] = price_df[col].pct_change(d).shift(-1*(d+shift_days))*100
return price_df #[['DATE',col]+]
def add_features(df):
'''
Add feature to df (on the cross date)
Parameters
----------
df: DataFrame
df with required fields to generate features
Returns
-------
df: DataFrame
df with added features
'''
# cols = df.columns.tolist()
df['price_change_5D'] = df['px_last'].pct_change(5)*100
df['price_change_f0'] = df['px_last'].pct_change()*100
df['price_change_f1'] = df['px_last'].pct_change().shift(-1)*100
df['open-close_f0'] = (df['px_last']/df['px_open']-1)*100
df['open-close_f1'] = (df['px_last']/df['px_open']-1).shift(-1)*100
df['accel'] = df['px_high'].diff().diff()
df['avat'] = df['volume']/df['volume'].rolling(20).mean()
# feature_cols = list(set(df.columns).difference(set(cols)))
return df
def full_ma_line_run(df, col='px_high', ma1=5, ma2=22):
'''
Generate full trendline and crosses
get_down_trendline
Parameters
----------
df: DataFrame
full stock df with prices
col: str
px_high for downtrend, px_low for uptrend
ma1: int
short moving average (days)
ma2: int
long moving average (days)
Returns
-------
trend_line_df: DataFrame
line_df generated from trendline_func
stock_ma_line_df: DataFrame
full_stock_df with merged line_df and its repective crosses after the last_DATE
Examples
--------
>>> stock_ma_line_df, trend_line_df = full_ma_line_run(df, 'px_high', ma1=5, ma2=22, feature_func=add_features)
'''
if 'high' in col:
trendline_func = get_down_trendline
else:
trendline_func = get_up_trendline
stock_ma_line_df = pd.DataFrame()
trend_line_df = pd.DataFrame()
for stock in tqdm((sorted(df[id_col].unique()))):
dff = df[df[id_col]==stock].sort_values(date_col).copy()
try:
dff2, line_df = trendline_func(dff)
stock_ma_line_df = stock_ma_line_df.append(dff2)
trend_line_df = trend_line_df.append(line_df)
except Exception as e:
print(stock, e)
return stock_ma_line_df.reset_index(drop=True), trend_line_df
################################################ Channel Breakout ########################################################
from sklearn.linear_model import LinearRegression
def channel_lr(stock_df, start_date, end_date):
train_df = stock_df[(stock_df[date_col]>=start_date)&(stock_df[date_col]<=end_date)].copy()
y = train_df[px_close]
X = train_df.index.values
lr = LinearRegression()
lr.fit(X.reshape(-1,1), y)
a = lr.coef_[0]
b = lr.intercept_
y_pred = a*X + b
BU = max(train_df[px_high] - y_pred)
BL = min(train_df[px_low] - y_pred)
return dict(a=a, b=b, BU=BU, BL=BL)
def channel_project(stock_df, line_df, m_increase):
stock_df = stock_df.reset_index(drop=True)
line_df = line_df.drop_duplicates(subset=['line_group'])
channel_lr_df = []
for lrow in line_df.to_dict(orient='records'):
channel_lr_params = channel_lr(stock_df, lrow['start_' + date_col], lrow[date_col])
channel_lr_df.append({**lrow, **channel_lr_params})
channel_lr_df = pd.DataFrame(channel_lr_df)
stock_df = stock_df.merge(channel_lr_df[[id_col,date_col, 'a','b','BU','BL']], how='left').ffill()
fillins = (stock_df[date_col]>stock_df['next_start_dt']).map(lambda x: None if x else 1)
stock_df['project'] = (stock_df['a']*stock_df.index + stock_df['b'] + stock_df['a'] + m_increase*stock_df['BU'])*fillins
stock_df['cross'] = m_increase*stock_df[px_close] > m_increase*stock_df['project']
first_cross = stock_df[stock_df['cross']==True].reset_index().groupby('line_group')[date_col]\
.first().reset_index().assign(first_channel_cross=1)
stock_df = stock_df.merge(first_cross, on=['line_group',date_col], how='left').drop('cross',1)
stock_df['first_cross'] = stock_df['first_cross'].fillna(0)
channel_lr_df = channel_lr_df.merge(first_cross.rename(columns={date_col:'channel_cross_'+date_col}), on='line_group', how='left')
return stock_df, channel_lr_df
def full_channel_run(stock_ma_line_df, trend_line_df, col='px_high'):
m_increase = 1 if 'high' in col else -1
stock_channel_df = pd.DataFrame()
full_channel_df = pd.DataFrame()
for stock in tqdm((sorted(stock_ma_line_df[id_col].unique()))):
stock_df = stock_ma_line_df[stock_ma_line_df[id_col]==stock]
line_df = trend_line_df[trend_line_df[id_col]==stock]
try:
dff2, channel_df = channel_project(stock_df, line_df, m_increase)
stock_channel_df = stock_channel_df.append(dff2)
full_channel_df = full_channel_df.append(channel_df)
except Exception as e:
print(stock, e)
cross_dates = ['cross_%s'%date_col,'channel_cross_%s'%date_col]
full_channel_df['later_cross_date'] = full_channel_df[cross_dates].max(axis=1)
full_channel_df['both'] = full_channel_df[cross_dates].isnull().sum(axis=1).map(lambda x: 1 if x==0 else 0)
return stock_channel_df, full_channel_df
################################################ Visualization ########################################################
import plotly.graph_objects as go
from ipywidgets import interact, interactive, Dropdown, HTML, VBox, HBox
def plt_trendline(df, line_df, stock, col='px_high'):
'''
Plot price with trendline
Parameters
----------
df: DataFrame
dataframe with dates and stock prices
line_df: DataFrame
dataframe which contains start end index and date of trendline
stock: str
stock name for plot title
col: str
px_high or px_low
'''
if 'high' in col:
local_ = 'local_maxs'
else:
local_ = 'local_mins'
plt.rcParams['figure.figsize'] = (20,8)
fig, ax = plt.subplots()
df = df.set_index(date_col)
df[col].plot(color='black')
df[['ma_1st','ma_2nd']].plot(alpha=0.5, ax=ax) if 'ma_1st' in df.columns else None
plt.scatter(df.query('first_cross==1').index, df.query('first_cross==1')['y'], marker='x', color='red', s=100)
for line_g in df['line_group'].dropna().unique():
df_plot = df[df['line_group']==line_g].dropna(subset=['y']).iloc[[0, -1]].copy()
df_plot['y'].plot(color='red', linewidth=1)
for row in line_df.to_dict(orient='records'):
plt.plot([row['start_' + date_col], row[date_col]],
[row['start_' + local_] , row['m']*row['index'] + row['c']], color='purple', linewidth=1)
plt.title(stock)
return plt
def interactive_plt_trendline(df, ma1=5, ma2=22, direction='down'):
if direction == 'down':
trendline_func = get_down_trendline
col = 'px_high'
else:
trendline_func = get_up_trendline
col = 'px_low'
stock_selec = Dropdown(options = sorted(df.ID.unique()))
@interact()
def plot(stock = stock_selec):
dff = df[df[id_col]==stock].reset_index(drop=True).copy()
dff2, line_df = trendline_func(dff, ma1=ma1, ma2=ma2)
plt_trendline(dff2, line_df, stock, col)
def plt_channel(channel_df, channel_line_df, stock):
fig, ax = plt.subplots()
channel_df = channel_df.set_index(date_col)
channel_df[px_close].plot(color='black')
channel_df[['ma_1st','ma_2nd']].plot(alpha=0.5, ax=ax) if 'ma_1st' in channel_df.columns else None
for crow in channel_line_df.to_dict(orient='records'):
line_g = channel_df[channel_df['line_group']==crow['line_group']]
dff2_plot = line_g.dropna(subset=['project']).iloc[[0,-1]].copy()
dff2_plot['project'].plot(color='red', linewidth=1)
cross = line_g.query('first_channel_cross==1')
if cross.shape[0] :
plt.scatter(cross.index, cross[px_close], marker='x', color='red', s=100)
date_X = [crow['start_'+date_col], crow[date_col]]
X = np.array([crow['start_index'], crow['index']])
plt.plot(date_X, crow['a']*X+crow['b'], color='brown')
plt.plot(date_X, crow['a']*X+crow['b']+crow['BU'], color='cyan')
plt.plot(date_X, crow['a']*X+crow['b']+crow['BL'], color='cyan')
plt.title(stock)
return plt
def interactive_plt_channel(df, ma1=5, ma2=22, direction='down'):
if direction == 'down':
trendline_func = get_down_trendline
col = px_high
m_increase = 1
else:
trendline_func = get_up_trendline
col = px_low
m_increase = -1
stock_selec = Dropdown(options = sorted(df.ID.unique()))
@interact()
def plot(stock = stock_selec):
dff = df[df[id_col]==stock].reset_index(drop=True).copy()
dff2, line_df = trendline_func(dff, ma1=ma1, ma2=ma2)
dff3, channel_df = channel_project(dff2, line_df, m_increase)
plt_channel(dff3, channel_df, stock)
def interactive_plt_channel2(stock_channel_df, channel_line_df):
def _plot_cross(cross):
stock = stock_selec.value
stock_df = stock_channel_df[stock_channel_df[id_col]==stock].reset_index(drop=True).copy()
channel_df = channel_line_df[channel_line_df[id_col]==stock]
if cross == 'All':
plt_channel(stock_df, channel_df, stock)
else:
plt_channel(stock_df, channel_df.iloc[cross:cross+1], stock)
def update_cross_selec(stock):
cross_selec.options = ['All'] + list(range(channel_line_df[channel_line_df[id_col]==stock].shape[0]))
stock_selec = Dropdown(options = sorted(stock_channel_df[id_col].unique()))
init = channel_line_df[channel_line_df['ID']==stock_selec.value].shape[0]
cross_selec = Dropdown(options = range(init))
j = interactive(update_cross_selec, stock=stock_selec)
i = interactive(_plot_cross, cross=cross_selec)
k = VBox()
display(j)
display(i)
display(k)
import plotly.graph_objects as go
def plotly_trendline(df, line_df, stock, fig=None):
if not fig:
fig = go.Figure()
fig.add_trace(go.Candlestick(x=df[date_col],
open=df[px_open],
high=df[px_high],
low=df[px_low],
close=df[px_close], showlegend=False))
local_ = [i for i in line_df.columns if 'start_' in i and date_col not in i and 'index' not in i][0]
for row in line_df.to_dict(orient='records'):
line_g = df[df['line_group']==row['line_group']]
df_plot = line_g.dropna(subset=['y']).iloc[[0, -1]].copy()
fig.add_trace(go.Scatter(x=df_plot[date_col], y=df_plot['y'], mode='lines', showlegend=False,
hoverinfo='skip', line = dict(color = 'purple', width=1)))
cross = line_g.query('first_cross==1')
if cross.shape[0] :
fig.add_trace(go.Scatter(x=cross[date_col], y=cross[px_close], showlegend=False,
mode='markers', marker_symbol='x', marker_color='black'))
fig.add_trace(go.Scatter(x=[row['start_' + date_col], row[date_col]],
y=[row[local_] , row['m']*row['index'] + row['c']],
mode='lines', line_color='black', showlegend=False))
fig.update_layout(title=stock, template='ygridoff', xaxis_rangeslider_visible=False)
return fig
def plotly_channel(channel_df, channel_line_df, stock, fig=None):
if not fig:
fig = go.Figure()
# fig.add_trace(go.Scatter(x=channel_df[date_col], y=channel_df[px_close], line_color='black', showlegend=False))
fig.add_trace(go.Candlestick(x=channel_df[date_col],
open=channel_df[px_open],
high=channel_df[px_high],
low=channel_df[px_low],
close=channel_df[px_close], showlegend=False))
for line_g in channel_line_df['line_group'].dropna().unique():
dff2_plot = channel_df[channel_df['line_group']==line_g].iloc[[0,-1]].copy()
for crow in channel_line_df.to_dict(orient='records'):
date_X = [crow['start_'+date_col], crow[date_col]]
line_g = channel_df[channel_df['line_group']==crow['line_group']]
dff2_plot = line_g.dropna(subset=['project']).iloc[[0,-1]].copy()
fig.add_vline(dff2_plot[date_col].iloc[0] ,line_dash="dot", line=dict(color='black'))
fig.add_trace(go.Scatter(x=dff2_plot[date_col], y=dff2_plot['project'], mode='lines', showlegend=False,
hoverinfo='skip', line = dict(color = 'black', width=1)))
cross = line_g.query('first_channel_cross==1')
if cross.shape[0] :
fig.add_trace(go.Scatter(x=cross[date_col], y=cross[px_close], showlegend=False,
mode='markers', marker_symbol='x', marker_color='black'))
X = np.array([crow['start_index'], crow['index']])
fig.add_trace(go.Scatter(x=date_X, y=crow['a']*X+crow['b'], mode='lines', line_color='black', showlegend=False))
fig.add_trace(go.Scatter(x=date_X, y=crow['a']*X+crow['b']+crow['BU'], mode='lines',
hoverinfo='skip', showlegend=False,line = dict(color = 'blue', width=1)))
fig.add_trace(go.Scatter(x=date_X, y=crow['a']*X+crow['b']+crow['BL'], mode='lines',
hoverinfo='skip', showlegend=False,line = dict(color = 'blue', width=1)))
fig.update_layout(title=stock, template='ygridoff', xaxis_rangeslider_visible=False)
return fig
def interactive_plt_channel3(stock_channel_df, channel_line_df):
def _plot_cross(cross):
stock = stock_selec.value
stock_df = stock_channel_df[stock_channel_df[id_col]==stock].reset_index(drop=True).copy()
channel_df = channel_line_df[channel_line_df[id_col]==stock]
if cross == 'All':
fig = plotly_channel(stock_df, channel_df, stock)
fig2 = plotly_trendline(stock_df, channel_df, stock)
else:
fig = plotly_channel(stock_df, channel_df.iloc[cross:cross+1], stock)
fig2 = plotly_trendline(stock_df, channel_df.iloc[cross:cross+1], stock)
k.children= [go.FigureWidget(fig2), go.FigureWidget(fig)]
def update_cross_selec(stock):
cross_selec.options = ['All'] + list(range(channel_line_df[channel_line_df[id_col]==stock].shape[0]))
_plot_cross('All')
stock_selec = Dropdown(options = sorted(stock_channel_df[id_col].unique()))
init = channel_line_df[channel_line_df['ID']==stock_selec.value].shape[0]
cross_selec = Dropdown(options = range(init))
j = interactive(update_cross_selec, stock=stock_selec)
i = interactive(_plot_cross, cross=cross_selec)
k = VBox()
display(j)
display(i)
display(k)
|
etq-quant/etqbankloan
|
Lib/etiqalib/ta/turning_points.py
|
turning_points.py
|
py
| 24,726 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "findiff.FinDiff",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "findiff.FinDiff",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 383,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 384,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 426,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 426,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 427,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 427,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 433,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 433,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 440,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 440,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 443,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 443,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 444,
"usage_type": "name"
},
{
"api_name": "ipywidgets.Dropdown",
"line_number": 455,
"usage_type": "call"
},
{
"api_name": "ipywidgets.interact",
"line_number": 457,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 465,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 465,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 477,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 477,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 480,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 481,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 481,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 482,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 482,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 483,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 483,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 485,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 485,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 486,
"usage_type": "name"
},
{
"api_name": "ipywidgets.Dropdown",
"line_number": 499,
"usage_type": "call"
},
{
"api_name": "ipywidgets.interact",
"line_number": 501,
"usage_type": "call"
},
{
"api_name": "ipywidgets.Dropdown",
"line_number": 524,
"usage_type": "call"
},
{
"api_name": "ipywidgets.Dropdown",
"line_number": 526,
"usage_type": "call"
},
{
"api_name": "ipywidgets.interactive",
"line_number": 528,
"usage_type": "call"
},
{
"api_name": "ipywidgets.interactive",
"line_number": 529,
"usage_type": "call"
},
{
"api_name": "ipywidgets.VBox",
"line_number": 530,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects.Figure",
"line_number": 540,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects",
"line_number": 540,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objects.Candlestick",
"line_number": 542,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects",
"line_number": 542,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objects.Scatter",
"line_number": 553,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects",
"line_number": 553,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objects.Scatter",
"line_number": 558,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects",
"line_number": 558,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objects.Scatter",
"line_number": 561,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects",
"line_number": 561,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objects.Figure",
"line_number": 571,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects",
"line_number": 571,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objects.Candlestick",
"line_number": 574,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects",
"line_number": 574,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objects.Scatter",
"line_number": 590,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects",
"line_number": 590,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objects.Scatter",
"line_number": 595,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects",
"line_number": 595,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 598,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects.Scatter",
"line_number": 599,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects",
"line_number": 599,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objects.Scatter",
"line_number": 600,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects",
"line_number": 600,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objects.Scatter",
"line_number": 602,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects",
"line_number": 602,
"usage_type": "name"
},
{
"api_name": "plotly.graph_objects.FigureWidget",
"line_number": 624,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects",
"line_number": 624,
"usage_type": "name"
},
{
"api_name": "ipywidgets.Dropdown",
"line_number": 631,
"usage_type": "call"
},
{
"api_name": "ipywidgets.Dropdown",
"line_number": 633,
"usage_type": "call"
},
{
"api_name": "ipywidgets.interactive",
"line_number": 635,
"usage_type": "call"
},
{
"api_name": "ipywidgets.interactive",
"line_number": 636,
"usage_type": "call"
},
{
"api_name": "ipywidgets.VBox",
"line_number": 637,
"usage_type": "call"
}
] |
40187735381
|
from luigi.contrib.postgres import CopyToTable
from src.utils.general import read_yaml_file
from src.utils.utils import load_df
from src.pipeline.LuigiBiasFairnessTaskRDS import BiasFairnessTask
#from src.pipeline.ingesta_almacenamiento import get_s3_client
from datetime import date
from time import gmtime, strftime
import src.utils.constants as cte
import pandas as pd
import luigi
import psycopg2
import yaml
#import pickle
import marbles.core
import marbles.mixins
class BiasFairnessTest(marbles.core.TestCase, marbles.mixins.DateTimeMixins):
def __init__(self, my_date, data):
super(BiasFairnessTest, self).__init__()
self.date = my_date
self.data = data
def test_get_date_validation(self):
self.assertDateTimesPast(
sequence = [self.date],
strict = True,
msg = "La fecha solicitada debe ser menor a la fecha de hoy"
)
return True
def test_get_nrow_file_validation(self):
data = self.data
nrow = data.shape[0]
self.assertGreater(nrow, 1, note = "El archivo debe de tener al menos 2 registros")
return True
class BiasFairnessTestTask(CopyToTable):
path_cred = luigi.Parameter(default = 'credentials.yaml')
initial = luigi.BoolParameter(default=True, parsing = luigi.BoolParameter.EXPLICIT_PARSING)
limit = luigi.IntParameter(default = 300000)
date = luigi.DateParameter(default = None)
initial_date = luigi.DateParameter(default = None)
bucket_path = luigi.Parameter(default = cte.BUCKET)
exercise = luigi.BoolParameter(default=True, parsing = luigi.BoolParameter.EXPLICIT_PARSING)
with open(cte.CREDENTIALS, 'r') as f:
config = yaml.safe_load(f)
credentials = config['db']
user = credentials['user']
password = credentials['pass']
database = credentials['database']
host = credentials['host']
port = credentials['port']
table = 'metadata.test_bias_fairness'
columns = [("file_name", "VARCHAR"),
("data_date", "DATE"),
("processing_date", "TIMESTAMPTZ"),
("test_name", "VARCHAR"),
("result", "BOOLEAN")
]
def requires(self):
return BiasFairnessTask(
self.path_cred,
self.initial,
self.limit,
self.date,
self.initial_date,
self.bucket_path,
self.exercise
)
def input(self):
with open(cte.CREDENTIALS, 'r') as f:
config = yaml.safe_load(f)
credentials = config['db']
user = credentials['user']
password = credentials['pass']
database = credentials['database']
host = credentials['host']
conn = psycopg2.connect(
dbname=database,
user=user,
host=host,
password=password
)
cur = conn.cursor()
cur.execute(
""" SELECT *
FROM sesgo.bias_fairness
"""
)
rows = cur.fetchall()
data = pd.DataFrame(rows)
data.columns = [desc[0] for desc in cur.description]
return data
def rows(self):
file_name = "bias-fairness-" + self.date.strftime('%Y-%m-%d')
test = BiasFairnessTest(data = self.input(), my_date = self.date)
print("Realizando prueba unitaria: Validación de Fecha")
test_val = test.test_get_date_validation()
print("Prueba uitaria aprobada")
print("Realizando prueba unitaria: Validación de número de renglones")
test_nrow = test.test_get_nrow_file_validation()
print("Prueba uitaria aprobada")
date_time = strftime("%Y-%m-%d %H:%M:%S", gmtime())
data_test = {
"file_name": [file_name, file_name],
"data_date": [self.date, self.date],
"processing_date": [date_time, date_time],
"test_name": ["test_get_date_validation",
"test_get_nrow_file_validation"],
"result": [test_val, test_nrow]
}
data_test = pd.DataFrame(data_test)
records = data_test.to_records(index=False)
r = list(records)
for element in r:
yield element
|
Acturio/DPA-Project
|
src/pipeline/LuigiBiasFairnessTestTask.py
|
LuigiBiasFairnessTestTask.py
|
py
| 3,890 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "marbles.core.core",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "marbles.core",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "marbles.core.mixins",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "luigi.contrib.postgres.CopyToTable",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "luigi.Parameter",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "luigi.BoolParameter",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "luigi.IntParameter",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "luigi.DateParameter",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "luigi.DateParameter",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "src.utils.constants.BUCKET",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "src.utils.constants",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "luigi.BoolParameter",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "src.utils.constants.CREDENTIALS",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "src.utils.constants",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "yaml.safe_load",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "src.pipeline.LuigiBiasFairnessTaskRDS.BiasFairnessTask",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "src.utils.constants.CREDENTIALS",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "src.utils.constants",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "yaml.safe_load",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "psycopg2.connect",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "time.gmtime",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 139,
"usage_type": "call"
}
] |
3755394850
|
import asyncio
import traceback
from neptune_py.skeleton.skeleton import NeptuneServiceSkeleton
from neptune_py.skeleton.messager import (
NeptuneWriterBaseAbstract, NeptuneMessageType
)
import struct
import collections
class TLV:
_format = '!HI'
meta_size = struct.calcsize(_format)
tlv = collections.namedtuple('tlv_tuple', 'tag length')
MagicTag = 13
@classmethod
def pack(cls, tag, data):
return struct.pack(cls._format, tag, len(data)) + data
@classmethod
def unpack(cls, data):
if len(data) < cls.meta_size:
return None
tag, length = struct.unpack(cls._format, data)
return cls.tlv(tag=tag, length=length)
class TlvWriter(NeptuneWriterBaseAbstract):
def __init__(self, writer):
super().__init__()
self.writer = writer
self.closed = False
def write(self, message):
self.writer.write(TLV.pack(TLV.MagicTag, message))
def close(self):
if self.closed:
return
self.closed = True
if self.writer.can_write_eof():
self.writer.write_eof()
else:
self.writer.close()
class NeptuneTlvBase(NeptuneServiceSkeleton):
def __init__(self, host, port, messager_manager, name=None):
super().__init__(name)
self.host = host
self.port = port
self.messager_manager = messager_manager
self.messager_id = 0
async def connection_handler(self, reader, writer):
peername = writer.get_extra_info("peername")
self.get_logger().debug(f'{peername} connected')
messager_id = self.messager_id
tlv_writer = TlvWriter(writer)
self.messager_manager.on_connected(messager_id, tlv_writer)
self.messager_id += 1
try:
while True:
meta = await reader.readexactly(TLV.meta_size)
tlv = TLV.unpack(meta)
# print(tlv)
data = await reader.readexactly(tlv.length)
self.messager_manager.on_message(messager_id, data)
except asyncio.IncompleteReadError as e:
if e.partial:
# empty data indicates peer closed the connection, otherwise the data
# is illegal.
self.get_logger().debug(f'{peername} illegal data')
except Exception as e:
self.get_logger().error(traceback.format_exc())
finally:
self.get_logger().debug(f'{peername} closed')
self.messager_manager.on_disconnected(messager_id)
writer.close()
await writer.wait_closed()
def init(self):
self.get_logger().debug(f'init {self.__class__.__name__} {self.name}')
async def finish(self):
self.get_logger().debug(f'stopping {self.__class__.__name__} {self.name}...')
class NeptuneTlvService(NeptuneTlvBase):
"""
tlv message server
"""
async def logic(self):
# https://docs.python.org/3.6/library/asyncio-protocol.html
# 'Changed in version 3.6: The socket option TCP_NODELAY is now set by default.'
server = await asyncio.start_server(self.connection_handler, self.host, self.port)
async with server:
self.get_logger().debug(f'NeptuneTlvService {self.name} starts to server')
await server.serve_forever()
class NeptuneTlvClient(NeptuneTlvBase):
"""
tlv message client
"""
async def logic(self):
reader, writer = await asyncio.open_connection(self.host, self.port)
await self.connection_handler(reader, writer)
|
kstardust/neptune
|
neptune_py/skeleton/transporter/neptune_tlv.py
|
neptune_tlv.py
|
py
| 3,598 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "struct.calcsize",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "neptune_py.skeleton.messager.NeptuneWriterBaseAbstract",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "neptune_py.skeleton.skeleton.NeptuneServiceSkeleton",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "asyncio.IncompleteReadError",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "traceback.format_exc",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "asyncio.start_server",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "asyncio.open_connection",
"line_number": 113,
"usage_type": "call"
}
] |
26531296671
|
from pyhpecfm import fabric
from lib.actions import HpecfmBaseAction
class fabricIpLookup(HpecfmBaseAction):
def run(self):
cfm_fabrics = fabric.get_fabric_ip_networks(self.client)
if isinstance(cfm_fabrics, list):
fabric_data = []
# Loop through cfm_fabrics and process IPZ
for fabip in cfm_fabrics:
desc = fabip['description']
if desc == '':
desc = 'HPE Composable Fabric'
out ={
'u_desc':desc,
'u_fabu_uid':fabip['fabric_uuid'],
'u_name':fabip['name'],
'u_mode':fabip['mode'],
'u_sub_address':fabip['subnet']['address'],
'u_mask_prefix':fabip['subnet']['prefix_length']
}
fabric_data.append(out)
return (True, fabric_data)
return (False, switches)
|
HewlettPackard/stackstorm-hpe-cfm
|
actions/get_fabric_ips.py
|
get_fabric_ips.py
|
py
| 979 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "lib.actions.HpecfmBaseAction",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "pyhpecfm.fabric.get_fabric_ip_networks",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pyhpecfm.fabric",
"line_number": 6,
"usage_type": "name"
}
] |
39939937920
|
from mpl_toolkits.mplot3d import axes3d
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import csv
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from mpl_toolkits.mplot3d import Axes3D
import plotly.graph_objects as go
import plotly.express as px
import publico as func
pd.options.mode.chained_assignment = None # default='warn'
from dateutil import parser
def MediaFileRede(res_select, interval_time=5):
res_select.drop_duplicates(subset=None, keep="first", inplace=True)
# cria campos
res_select['Timer2'] = 0
res_select['Media2'] = 0.0
velo_total = 0.0
count=0
timer_atual = 0.0
timer_ant = 0.0
elapset_atual= 0.0
elapset_cumulativo = 0.0
count_timer=interval_time
for index, row in res_select.iterrows():
timer_atual = row['Tempo']
if (timer_ant!=0.0):
elapset_atual = float(row['Tempo']) - float(timer_ant)
# print(abs(elapset_atual))
elapset_cumulativo+=float(elapset_atual)
if ((elapset_cumulativo >= interval_time)):
# print('Chegou')
# break
media_velo = velo_total / count
res_select.at[index,"Media2"] = media_velo
res_select.at[index,"Timer2"] = count_timer
elapset_cumulativo=0.0
timer_ant = 0.0
velo_total=0.0
media_velo=0.0
count=0
count_timer+=interval_time
if (timer_atual != timer_ant):
timer_ant = timer_atual
velo_total = velo_total + row['Download']
count+=1
# remove zeros
res_select = res_select[(res_select['Timer2']!=0) & (res_select['Timer2']<=280) & (res_select['Media2']<300) ]
return res_select
EXP="70"
print("Loading Dataframe...")
# BASELINE GERAL ***************************************************
df1 = pd.read_csv("../repositorio/" + EXP + "/REDE_GERAL.csv")
df1['Download'] = df1['Download'].astype(float)
df1['Upload'] = df1['Upload'].astype(float)
df1['Tempo'] = df1['Tempo'].astype(float)
df1['Source'] = "BASELINE"
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
df1_select = df1[['Download', 'Source', 'Tempo']]
df1_select = MediaFileRede(df1_select)
# *************************************************************************
# BASELINE 1TO 2 **********************************************************
df2 = pd.read_csv("../repositorio/" + EXP + "/REDE_BASELINE_1TO2.csv")
df2['Download'] = df2['Download'].astype(float)
df2['Upload'] = df2['Upload'].astype(float)
# df2['Duracao'] = df2['Duracao'].astype(float)
df2['Tempo'] = df2['Tempo'].astype(float)
# df2['Bytes'] = df2['Bytes'].astype(float)
df2['Source'] = "1TO2"
# df4_filtro = 7df4.loc[(df4['Bytes'] > 0)]
df2_select = df2[['Download', 'Source', 'Tempo']]
df2_select = MediaFileRede(df2_select)
#********************************************************************
print("Loading Dataframe...")
# BASELINE RANDOM **********************************************************
df3 = pd.read_csv("../repositorio/" + EXP + "/REDE_BASELINE_RANDOM.csv")
df3['Download'] = df3['Download'].astype(float)
df3['Upload'] = df3['Upload'].astype(float)
# df3['Duracao'] = df3['Duracao'].astype(float)
df3['Tempo'] = df3['Tempo'].astype(float)
# df3['Bytes'] = df3['Bytes'].astype(float)
df3['Source'] = "RAND"
# df4_filtro = df4.loc[(df4['Bytes'] > 0)]
df3_select = df3[['Download', 'Source', 'Tempo']]
df3_select = MediaFileRede(df3_select)
#********************************************************************
print("Loading Dataframe...")
# BASELINE THRESHOLD **********************************************************
df4 = pd.read_csv("../repositorio/" + EXP + "/REDE_BASELINE_THRESHOLD.csv")
df4['Download'] = df4['Download'].astype(float)
df4['Upload'] = df4['Upload'].astype(float)
# df4['Duracao'] = df4['Duracao'].astype(float)
df4['Tempo'] = df4['Tempo'].astype(float)
# df4['Bytes'] = df4['Bytes'].astype(float)
df4['Source'] = "LIM-5"
# df4_filtro = df4.loc[(df4['Bytes'] > 0)]
df4_select = df4[['Download', 'Source', 'Tempo']]
df4_select = MediaFileRede(df4_select)
#********************************************************************
print("Loading Dataframe...")
# DBSCAN **********************************************************
df5 = pd.read_csv("../repositorio/" + EXP + "/REDE_DBSCAN.csv")
df5['Download'] = df5['Download'].astype(float)
df5['Upload'] = df5['Upload'].astype(float)
df5['Tempo'] = df5['Tempo'].astype(float)
df5['Source'] = "DBSCAN"
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
df5_select =df5[['Download', 'Source', 'Tempo']]
df5_select = MediaFileRede(df5_select)
#********************************************************************
# # # DBSCAN FILTER **********************************************************
# # df6 = pd.read_csv("../repositorio/" + EXP + "/REDE_DBSCAN_FILTER.csv")
# # df6['Download'] = df6['Download'].astype(float)
# # df6['Upload'] = df6['Upload'].astype(float)
# # df6['Duracao'] = df6['Duracao'].astype(float)
# # df6['STime'] = df6['STime'].astype(float)
# # df6['Bytes'] = df6['Bytes'].astype(float)
# # df6['Source'] = "DBSCAN - FILTER"
# # df6_filtro = df6.loc[(df6['Bytes'] > 0)]
# # df6_select = df6_filtro[['Upload','Bytes','Source', 'STime','Duracao']]
# # df6_select = MediaFileRede(df6_select)
# # #********************************************************************
# XMEANS **********************************************************
df7 = pd.read_csv("../repositorio/" + EXP + "/REDE_XMEANS.csv")
df7['Download'] = df7['Download'].astype(float)
df7['Upload'] = df7['Upload'].astype(float)
# df7['Duracao'] = df7['Duracao'].astype(float)
df7['Tempo'] = df7['Tempo'].astype(float)
# df7['Bytes'] = df7['Bytes'].astype(float)
df7['Source'] = "XMEANS"
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
df7_select =df7[['Download', 'Source', 'Tempo']]
df7_select = MediaFileRede(df7_select)
#********************************************************************
print("Loading Chart...")
# res = pd.concat([df1_select,df5_select,df7_select], sort=False)
# res = pd.concat([df1_select,df2_select,df3_select,df4_select, df5_select,df7_select], sort=False)
res = pd.concat([df1_select,df2_select,df3_select,df4_select], sort=False)
fig = px.line(res.reset_index(), x="Timer2", y="Media2", color="Source", title='Network Traffic').for_each_trace(lambda t: t.update(name=t.name.replace("Source=","")))
# https://plotly.com/python/axes/
# https://plotly.com/python/line-charts/
# fig.update_layout(
# # title = "AnaliseAlgorithms ",
# yaxis = dict(
# # range=[0,9],
# # tick0=0, dtick=2.5,
# title_text='Upload Rate',
# ),
# xaxis = dict(
# title_text='Normalized Simulation Time (<i>i</i>)',
# ),
# )
fig.update_layout(
# title = "AnaliseAlgorithms ",
yaxis = dict(
# # range=[0,9],
# tick0=0, dtick=5,
title_text='Network Traffic',
),
font=dict(size=16),
xaxis = dict(
title_text='Normalized Simulation Time (<i>t</i>)',
),
# plot_bgcolor='rgba(0,1,0,0)' # 76 64=todos
legend=dict(
x=0.76,
y=1.1,
font=dict(size=16),
orientation='h'
),
# annotations=[dict(
# xref='paper',
# yref='paper',
# )
# ]
)
fig.show()
|
urbancomp/fogarch
|
FogLayer/visualization/chart3_old.py
|
chart3_old.py
|
py
| 7,553 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "pandas.options",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "plotly.express.line",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "plotly.express",
"line_number": 194,
"usage_type": "name"
}
] |
35777431960
|
from pydoc import tempfilepager
from PIL import Image
import numpy
import cv2
slot_1_box = (905, 215, 930, 235)
slot_2_box = (933, 215, 958, 235)
slot_3_box = (961, 215, 986, 235)
slots_poss = (slot_1_box, slot_2_box, slot_3_box)
def get_crop(_source, _box):
return Image.open(_source).convert('RGB').crop(_box) # .save(tmp_path)
def calculate(image1, image2):
image1 = cv2.cvtColor(numpy.asarray(image1), cv2.COLOR_RGB2BGR)
image2 = cv2.cvtColor(numpy.asarray(image2), cv2.COLOR_RGB2BGR)
hist1 = cv2.calcHist([image1], [0], None, [256], [0.0, 255.0])
hist2 = cv2.calcHist([image2], [0], None, [256], [0.0, 255.0])
# 计算直方图的重合度
degree = 0
for i in range(len(hist1)):
if hist1[i] != hist2[i]:
degree = degree + \
(1 - abs(hist1[i] - hist2[i]) / max(hist1[i], hist2[i]))
else:
degree = degree + 1
degree = degree / len(hist1)
return degree
def classify_hist_with_split(image1, image2, size=(256, 256)):
# image1 = Image.open(image1)
image2 = Image.open(image2)
# 将图像resize后,分离为RGB三个通道,再计算每个通道的相似值
image1 = cv2.cvtColor(numpy.asarray(image1), cv2.COLOR_RGB2BGR)
image2 = cv2.cvtColor(numpy.asarray(image2), cv2.COLOR_RGB2BGR)
image1 = cv2.resize(image1, size)
image2 = cv2.resize(image2, size)
sub_image1 = cv2.split(image1)
sub_image2 = cv2.split(image2)
sub_data = 0
for im1, im2 in zip(sub_image1, sub_image2):
sub_data += calculate(im1, im2)
sub_data = sub_data / 3
return sub_data
class analyzer():
def __init__(self,sourcepath,slotpath):
self.sourcepath = sourcepath
self.slotpath = slotpath
pass
def analyze(self, img):
source_path = self.sourcepath + img
res = [0 for _ in range(len(slots_poss))]
for i in range(len(slots_poss)):
img1_path = get_crop(source_path, slots_poss[i])
for level in range(4):
img2_path = self.slotpath + 'slot_lv' + str(level + 1)+'.png'
result = classify_hist_with_split(img1_path, img2_path)
if result[0] > 0.8:
res[i] = (level+1)
# print(img + str(level) + "相似度为:" + "%.2f%%" % (result * 100))
# print(img, res)
# Image.open(source_path).crop((905, 215, 986, 235)
# ).save(tmppath + str(res) + "-" + img)
return res
|
BruceCheng1995/cyber_hunter
|
src/analyze_slot.py
|
analyze_slot.py
|
py
| 2,531 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "PIL.Image.open",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "cv2.cvtColor",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_RGB2BGR",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_RGB2BGR",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "cv2.calcHist",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.calcHist",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "cv2.cvtColor",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_RGB2BGR",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_RGB2BGR",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "cv2.split",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "cv2.split",
"line_number": 42,
"usage_type": "call"
}
] |
72715840829
|
from django.db import models
from django import forms
from django.contrib.auth import get_user_model
# Create your models here.
class Challenge(models.Model):
title = models.CharField(max_length=200)
author = models.ForeignKey ( # author info will be retrieved from the user model
get_user_model(),
on_delete=models.PROTECT # if the author user is deleted, preserve the challenge created
)
pitch = models.CharField(max_length=200)
description = models.TextField(default="")
website = models.URLField()
image_url = models.ImageField()
date_created = models.DateTimeField()
deadline = models.DateTimeField()
class Meta:
ordering = ["date_created"]
# create a Challenge Form model to store its structure
class ChallengeForm(forms.ModelForm):
class Meta:
model = Challenge
fields = (
'title', 'author', 'pitch', 'description', 'website', 'image_url', 'deadline'
)
# how do we store the tasks from the front end?
|
hackathon-team-1/ReadingChallenge
|
readingchallenge/challenges/models.py
|
models.py
|
py
| 1,024 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.db.models.Model",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.db.models.PROTECT",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.db.models.URLField",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.db.models.ImageField",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.forms.ModelForm",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 24,
"usage_type": "name"
}
] |
16916661051
|
import subprocess
import sys
import json
import platform
import os
from crmetrics import CRBase
class CRLogs(CRBase):
def _get_container_logs(self, pod, namespace, containers, kubeconfig):
for c in containers:
container = c['name']
cmd = 'kubectl logs ' + pod + ' -n ' + namespace + ' -c ' + container + ' ' + kubeconfig
#print(cmd)
print("======== Pod::" + pod + "/container::" + container + " ===========")
try:
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True).communicate()[0]
if out:
print(out)
except Exception as e:
print(e)
def get_logs(self, pod, namespace, kubeconfig):
cmd = 'kubectl get pods ' + pod + ' -n ' + namespace + ' -o json ' + kubeconfig
#print(cmd)
try:
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True).communicate()[0]
if out:
json_output = json.loads(out)
containers = json_output['spec']['containers']
self._get_container_logs(pod, namespace, containers, kubeconfig)
if 'initContainers' in json_output['spec']:
init_containers = json_output['spec']['initContainers']
self._get_container_logs(pod, namespace, init_containers, kubeconfig)
except Exception as e:
print(e)
def get_resources_composition(self, kind, instance, namespace, kubeconfig):
platf = platform.system()
kubeplus_home = os.getenv('KUBEPLUS_HOME')
cmd = ''
json_output = {}
if platf == "Darwin":
cmd = kubeplus_home + '/plugins/kubediscovery-macos composition '
elif platf == "Linux":
cmd = kubeplus_home + '/plugins/kubediscovery-linux composition '
else:
print("OS not supported:" + platf)
return json_output
cmd = cmd + kind + ' ' + instance + ' ' + namespace + ' ' + kubeconfig
#print(cmd)
out = ''
try:
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True).communicate()[0]
out = out.decode('utf-8')
except Exception as e:
print(e)
if out:
print(out)
try:
json_output = json.loads(out)
except Exception as e:
print(e)
return json_output
def get_pods1(self, resources):
pod_list = []
for resource in resources:
#print(resource)
if resource['Kind'] == 'Pod':
present = False
for p in pod_list:
if p['Name'] == resource['Name']:
present = True
break
if not present:
pod_list.append(resource)
#print(pod_list)
return pod_list
if __name__ == '__main__':
crLogs = CRLogs()
#crLogs.get_logs(sys.argv[1], sys.argv[2])
#resources = sys.argv[1]
relation = sys.argv[1]
kind = sys.argv[2]
instance = sys.argv[3]
namespace = sys.argv[4]
kubeconfig = sys.argv[5]
#print(kind + " " + instance + " " + namespace + " " + kubeconfig)
resources = {}
#if relation == 'connections':
# resources = crLogs.get_resources_connections(kind, instance, namespace, kubeconfig)
# #print(resources)
#if relation == 'composition':
# resources = crLogs.get_resources_composition(kind, instance, namespace, kubeconfig)
# #print(resources)
#resource_json = json.loads(resources)
pods = crLogs.get_pods(kind, instance, kubeconfig)
for pod in pods:
pod_name = pod['Name']
pod_namespace = pod['Namespace']
#print(pod_name)
crLogs.get_logs(pod_name, pod_namespace, kubeconfig)
print("---------------------------------------")
|
cloud-ark/kubeplus
|
plugins/crlogs.py
|
crlogs.py
|
py
| 3,366 |
python
|
en
|
code
| 555 |
github-code
|
6
|
[
{
"api_name": "crmetrics.CRBase",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "subprocess.Popen",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "subprocess.PIPE",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "subprocess.Popen",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "subprocess.PIPE",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "platform.system",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "subprocess.PIPE",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 96,
"usage_type": "attribute"
}
] |
5461750614
|
#Gets the longitude and latittude for an address using the Google Maps API
import json
import time
import pandas as pd
import urllib.error
import urllib.parse
import urllib.request
#Gets api key from txt file
with open(r".txt","r") as file:
API_KEY = r"&key=" + file.readline()
GEO_URL = r"https://maps.googleapis.com/maps/api/geocode/json?&address="
#Creates dataframe of all addresses from csv file specified
df = pd.read_csv(r".csv")
# df = pd.read_csv(r"-test.csv")
#Removes duplicate addresses
unique_search_address = df['SearchAddress'].unique()
headers = ['SearchAddress','LAT','LON']
lat_lon = []
row = 1
for addr in unique_search_address:
#formats address to be able to use in api
f_addr = addr.replace(",", "").replace(" ", "%20")
url = GEO_URL + f_addr + API_KEY
try:
result = json.load(urllib.request.urlopen(url))
lat_lon.append((
addr,
result['results'][0]['geometry']['location']['lat'],
result['results'][0]['geometry']['location']['lng']))
except:
lat_lon.append((addr, None, None))
#Lets user know what row is being searched and prints to console, helpful in knowing script is running successfully but otherwise not necessary
print(row)
row = row + 1
#Converts list of address, lat, & lon tuples to a datafram
df_join = pd.DataFrame(lat_lon, columns=headers)
#Adds lat and lon to original dataframe using a left join
df = df.merge(df_join, how='left', on='SearchAddress')
df.to_csv(r".csv", index=False)
# df.to_csv(r"test-results.csv", index=False)
|
randr000/MyPythonScripts
|
get_lat_lon_Google.py
|
get_lat_lon_Google.py
|
py
| 1,674 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "urllib.error.request.urlopen",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "urllib.error.request",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "urllib.error",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 47,
"usage_type": "call"
}
] |
21402453945
|
import torch
import math
from torch import nn
import torch.nn.functional as F
from transformers.activations import get_activation
from .utils import init_weights
def _mask(logits, mask):
return mask * logits - 1e3 * (1 - mask)
# VarMisuse -----------------------------------------------------------------
class _LocRepairPointerHead(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.prediction = nn.Linear(config.hidden_size, 2)
self.apply(init_weights)
def forward(self, input_states):
hidden = self.dense(input_states)
hidden = get_activation("gelu")(hidden)
logits = self.prediction(hidden)
logits = logits.transpose(2, 1)
return logits
class VarMisuseBaseModel(nn.Module):
def __init__(self, config, encoder):
super().__init__()
self.config = config
self.encoder = encoder
@torch.no_grad()
def score(self, logits, labels):
probs = nn.Softmax(dim = 2)(logits)
# Location metrics
loc_predict = probs[:, 0, :]
loc_labels = labels[:, 0, :]
locate = loc_predict.argmax(dim=1)
locate = torch.nn.functional.one_hot(locate, num_classes=loc_predict.shape[1]).float()
locate_acc = (locate * loc_labels).sum(dim=1)
buggy_labels = 1 - loc_labels[:, 0]
# Buggy classification
false_alarms = 1 - ((1 - buggy_labels)*locate_acc).sum() / ((1 - buggy_labels).sum() + 1e-9)
bug_acc = (buggy_labels * locate_acc).sum() / (buggy_labels.sum() + 1e-9)
# Classification
cls_predict = loc_predict[:, 0].round()
cls_labels = loc_labels[:, 0]
cls_acc = (cls_predict * cls_labels).mean() + ((1 - cls_predict) * buggy_labels).mean()
#Repair pointer
rep_probs = probs[:, 1, :]
rep_labels = labels[:, 1, :]
target_probs = (rep_labels * rep_probs).sum(dim=-1)
target_predict = target_probs.round()
target_acc = (target_predict * buggy_labels).sum() / (1e-9 + buggy_labels.sum())
joint_acc = (buggy_labels * locate_acc * target_predict).sum() / (1e-9 + buggy_labels.sum())
return {
"classification_acc": cls_acc.item(),
"localization_acc": locate_acc.mean().item(),
"bug_acc": bug_acc.item(),
"false_alarm_rate": false_alarms.item(),
"repair_acc": target_acc.item(),
"loc_repair_acc": joint_acc.item(),
"avg_prediction": cls_predict.mean().item()
}
def loc_repair_acc(self, tokens, position_ids = None, labels = None):
pass
def forward(self, tokens, token_mask = None, position_ids = None, labels = None):
prediction = self.loc_repair_logits(tokens, position_ids, labels)
# Mask prediction
if token_mask is not None:
token_mask = token_mask.float().unsqueeze(1).expand_as(prediction)
prediction = _mask(prediction, token_mask)
# Calculate a loss if necessary
if labels is not None:
log_probs = nn.LogSoftmax(dim=2)(prediction)
norm = labels.sum(dim=-1, keepdim = True)
per_token_loss = (-labels * log_probs) / (norm + 1e-9)
per_example_loss = per_token_loss.sum(dim=-1)
per_task_loss = per_example_loss.mean(dim = 0)
return per_task_loss.sum(), prediction
return prediction
class VarMisuseModel(VarMisuseBaseModel):
def __init__(self, config, encoder):
super().__init__(config, encoder)
self.head = _LocRepairPointerHead(config)
def loc_repair_logits(self, tokens, position_ids = None, labels = None):
attention_mask = tokens.sum(dim=2).clamp_(0, 1)
encoding, _ = self.encoder(
tokens = tokens,
attention_mask = attention_mask.bool(),
position_ids = position_ids
)
return self.head(encoding)
# General model that works with inner repairs and localization --------------------------------
class _LocateHead(nn.Module):
def __init__(self, config):
super().__init__()
self.ffn_in = nn.Linear(2 * config.hidden_size, config.hidden_size)
self.ffn_out = nn.Linear(config.hidden_size, 1)
self.apply(init_weights)
def forward(self, context_embed, token_embed, token_mask = None, labels = None):
assert context_embed.shape[1] == token_embed.shape[1]
# Localization prediction --------------------------------
diff_vector = token_embed - context_embed
diff_vector = torch.cat([context_embed, diff_vector], dim = 2)
hidden = self.ffn_in(diff_vector)
hidden = nn.Tanh()(hidden)
hidden = self.ffn_out(hidden)
hidden = hidden.squeeze(-1)
if token_mask is not None: hidden = _mask(hidden, token_mask)
# Loss calculation ---------------------------------------
if labels is not None:
locate_labels = labels[:, 0, :]
log_probs = nn.LogSoftmax(dim=1)(hidden)
loss = (-locate_labels * log_probs).sum(dim=1)
return loss.mean(), hidden
return None, hidden
class _RepairHead(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
if config.decoder_vocab_size > 0: # We have a target vocab
self.decoder = nn.Linear(config.hidden_size, config.decoder_vocab_size, bias = False)
self.apply(init_weights)
def forward(self, error_embed, context_embed, token_mask = None, labels = None, target_labels = None):
# Compute a local pointer --------------------------------
repair_logits = torch.bmm(error_embed.unsqueeze(1), context_embed.transpose(2, 1)).squeeze()
repair_logits /= math.sqrt(error_embed.shape[1])
if len(repair_logits.shape) < 2:
repair_logits = repair_logits.unsqueeze(0)
if token_mask is not None and not self.config.token_annotate:
repair_logits = _mask(repair_logits, token_mask)
if labels is not None:
repair_labels = labels[:, 1, :]
# Compute a global vocab index ---------------------------
if hasattr(self, "decoder"):
decoder_logits = self.decoder(error_embed)
repair_logits = torch.cat([repair_logits, decoder_logits], dim = 1)
if labels is not None and target_labels is not None:
ohe_labels = F.one_hot(target_labels, num_classes=self.config.decoder_vocab_size)
ohe_labels[:, 0] = 0
repair_labels = torch.cat([repair_labels, ohe_labels], dim = 1)
# Loss computation ---------------------------------------
if labels is not None:
repair_log_probs = nn.LogSoftmax(dim = 1)(repair_logits)
norm = repair_labels.sum(dim = -1).clamp_(0, 1)
# Collect log probs
# log sum_(t_i = w)(P(t_i)) = log sum_(t_i = w)(exp log P(t_i))
# = LSE(log P(t_i))
repair_log_probs = _mask(repair_log_probs, repair_labels)
per_example_loss = -norm * torch.logsumexp(repair_log_probs, dim = 1)
return per_example_loss.mean(), repair_logits
return None, repair_logits
class LocateRepairModel(nn.Module):
def __init__(self, config, encoder):
super().__init__()
self.config = config
self.encoder = encoder
self.locate_head = _LocateHead(config)
self.repair_head = _RepairHead(config)
@torch.no_grad()
def score(self, logits, labels):
locate_logits, repair_logits = logits
# Score for localization
loc_predict = nn.Softmax(dim = 1)(locate_logits)
loc_labels = labels[:, 0, :]
locate = loc_predict.argmax(dim=1)
locate = torch.nn.functional.one_hot(locate, num_classes=loc_predict.shape[1]).float()
locate_acc = (locate * loc_labels).sum(dim=1)
buggy_labels = 1 - loc_labels[:, 0]
# Buggy classification
false_alarms = 1 - ((1 - buggy_labels)*locate_acc).sum() / ((1 - buggy_labels).sum() + 1e-9)
bug_acc = (buggy_labels * locate_acc).sum() / (buggy_labels.sum() + 1e-9)
# Classification
cls_predict = loc_predict[:, 0].round()
cls_labels = loc_labels[:, 0]
cls_acc = (cls_predict * cls_labels).mean() + ((1 - cls_predict) * buggy_labels).mean()
# Repair scores
rep_probs = nn.Softmax(dim = 1)(repair_logits)
rep_labels = labels[:, 1, :]
if rep_probs.shape[1] != rep_labels.shape[1]:
target_labels = labels[:, 2, :]
target_labels = target_labels[loc_labels.bool()]
ohe_labels = F.one_hot(target_labels, num_classes=self.config.decoder_vocab_size)
ohe_labels[:, 0] = 0
rep_labels = torch.cat([rep_labels, ohe_labels], dim = 1)
target_probs = (rep_labels * rep_probs).sum(dim=-1)
target_predict = target_probs.round()
target_acc = (target_predict * buggy_labels).sum() / (1e-9 + buggy_labels.sum())
joint_acc = (buggy_labels * locate_acc * target_predict).sum() / (1e-9 + buggy_labels.sum())
return {
"classification_acc": cls_acc.item(),
"localization_acc": locate_acc.mean().item(),
"bug_acc": bug_acc.item(),
"false_alarm_rate": false_alarms.item(),
"repair_acc": target_acc.item(),
"loc_repair_acc": joint_acc.item(),
"avg_prediction": cls_predict.mean().item()
}
def forward(self, tokens, token_mask = None, position_ids = None, labels = None):
attention_mask = tokens.sum(dim=2).clamp_(0, 1)
context_embed, token_embed = self.encoder(
tokens = tokens,
attention_mask = attention_mask.bool(),
position_ids = position_ids,
token_type_ids = token_mask if self.config.token_annotate else None,
)
locate_loss, locate_logits = self.locate_head(context_embed,
token_embed,
token_mask,
labels)
# Either use the gold localization or the predicted to get the error position
error_repair_labels = None
if labels is not None: # We are training
locate_mask = labels[:, 0, :].bool()
if self.config.decoder_vocab_size > 0:
assert labels.shape[1] >= 2, "If a target vocabulary is specified we expect that target labels are provided."
error_repair_labels = labels[:, 2, :]
error_repair_labels = error_repair_labels[locate_mask]
else: # We are at inference
locate = locate_logits.argmax(dim=1)
locate_mask = F.one_hot(locate, num_classes=tokens.shape[1]).bool()
error_hidden = context_embed[locate_mask]
# ----------------------------------------------------------------
repair_loss, repair_logits = self.repair_head(
error_hidden,
context_embed,
token_mask,
labels,
error_repair_labels
)
if labels is not None:
return locate_loss + repair_loss, (locate_logits, repair_logits)
return (locate_logits, repair_logits)
# Masked repair ----------------------------------------------------------------
class MaskedRepairModel(nn.Module):
def __init__(self, config, encoder):
super().__init__()
self.config = config
self.encoder = encoder
self.repair_head = _RepairHead(config)
@torch.no_grad()
def score(self, repair_logits, labels):
# Repair mask
loc_labels = labels[:, 0, :]
buggy_labels = 1 - loc_labels[:, 0]
# Repair scores
rep_probs = nn.Softmax(dim = 1)(repair_logits)
rep_labels = labels[:, 1, :]
if rep_probs.shape[1] != rep_labels.shape[1]:
target_labels = labels[:, 2, :]
target_labels = target_labels[loc_labels.bool()]
ohe_labels = F.one_hot(target_labels, num_classes=self.config.decoder_vocab_size)
ohe_labels[:, 1] = 0
rep_labels = torch.cat([rep_labels, ohe_labels], dim = 1)
target_probs = (rep_labels * rep_probs).sum(dim=-1)
target_predict = target_probs.round()
target_acc = (target_predict * buggy_labels).sum() / (1e-9 + buggy_labels.sum())
return {
"repair_acc": target_acc.item()
}
def forward(self, tokens, token_mask = None, position_ids = None, labels = None, repair_mask = None):
attention_mask = tokens.sum(dim=2).clamp_(0, 1)
context_embed, _ = self.encoder(
tokens = tokens,
attention_mask = attention_mask.bool(),
position_ids = position_ids,
token_type_ids = token_mask if self.config.token_annotate else None,
)
# Either use the gold localization or the predicted to get the error position
error_repair_labels = None
if labels is not None: # We are training
locate_mask = labels[:, 0, :].bool()
if self.training and self.config.decoder_vocab_size > 0:
assert labels.shape[1] >= 2, "If a target vocabulary is specified we expect that target labels are provided."
error_repair_labels = labels[:, 2, :]
error_repair_labels = error_repair_labels[locate_mask]
else: # We are at inference
if repair_mask is None:
raise ValueError("Location labels are required to identify mask position.")
locate_mask = repair_mask.bool()
error_hidden = context_embed[locate_mask]
# ----------------------------------------------------------------
repair_loss, repair_logits = self.repair_head(
error_hidden,
context_embed,
token_mask,
labels,
error_repair_labels
)
if labels is not None:
return repair_loss, repair_logits
return repair_logits
|
cedricrupb/ctxmutants
|
ctxmutants/modelling/meta_models.py
|
meta_models.py
|
py
| 14,434 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "utils.init_weights",
"line_number": 24,
"usage_type": "argument"
},
{
"api_name": "transformers.activations.get_activation",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "torch.nn.Softmax",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.one_hot",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "torch.no_grad",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torch.nn.LogSoftmax",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "utils.init_weights",
"line_number": 142,
"usage_type": "argument"
},
{
"api_name": "torch.cat",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "torch.nn.Tanh",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "torch.nn.LogSoftmax",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "utils.init_weights",
"line_number": 178,
"usage_type": "argument"
},
{
"api_name": "torch.bmm",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.one_hot",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "torch.nn.LogSoftmax",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 210,
"usage_type": "name"
},
{
"api_name": "torch.logsumexp",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 225,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 225,
"usage_type": "name"
},
{
"api_name": "torch.nn.Softmax",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 241,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.one_hot",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 245,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Softmax",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 260,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.one_hot",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 266,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.one_hot",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 318,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 340,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 340,
"usage_type": "name"
},
{
"api_name": "torch.nn.Softmax",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 357,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.one_hot",
"line_number": 363,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 363,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 349,
"usage_type": "call"
}
] |
5479249067
|
"""
Proximal Policy Optimization Algorithms (PPO):
https://arxiv.org/pdf/1707.06347.pdf
Related Tricks(May not be useful):
Mastering Complex Control in MOBA Games with Deep Reinforcement Learning (Dual Clip)
https://arxiv.org/pdf/1912.09729.pdf
A Closer Look at Deep Policy Gradients (Value clip, Reward normalizer)
https://openreview.net/pdf?id=ryxdEkHtPS
Revisiting Design Choices in Proximal Policy Optimization
https://arxiv.org/pdf/2009.10897.pdf
Learning Complex Dexterous Manipulation with Deep Reinforcement Learning and Demonstrations (DAPG):
https://arxiv.org/pdf/1709.10087.pdf
"""
from collections import defaultdict
from copy import deepcopy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from maniskill2_learn.env import build_replay
from maniskill2_learn.networks import build_actor_critic, build_model
from maniskill2_learn.utils.torch import build_optimizer
from maniskill2_learn.utils.data import DictArray, GDict, to_np, to_torch
from maniskill2_learn.utils.meta import get_logger, get_world_rank, get_world_size
from maniskill2_learn.utils.torch import BaseAgent, RunningMeanStdTorch, RunningSecondMomentumTorch, barrier, get_flat_grads, get_flat_params, set_flat_grads
from ..builder import MFRL
@MFRL.register_module()
class PPO(BaseAgent):
def __init__(
self,
actor_cfg,
critic_cfg,
env_params,
gamma=0.99,
lmbda=0.95,
max_kl=None,
obs_norm=False,
rew_norm=True,
adv_norm=True,
recompute_value=True,
eps_clip=0.2,
critic_coeff=0.5,
entropy_coeff=0.0,
num_epoch=10,
critic_epoch=-1,
actor_epoch=-1,
num_mini_batch=-1,
critic_warmup_epoch=0,
batch_size=256,
max_grad_norm=0.5,
rms_grad_clip=None,
dual_clip=None,
critic_clip=False,
shared_backbone=False,
detach_actor_feature=True,
debug_grad=False,
demo_replay_cfg=None,
dapg_lambda=0.1,
dapg_damping=0.995,
ignore_dones=True,
visual_state_coeff=-1,
visual_state_mlp_cfg=None,
**kwargs
):
super(PPO, self).__init__()
assert dual_clip is None or dual_clip > 1.0, "Dual-clip PPO parameter should greater than 1.0."
assert max_grad_norm is None or rms_grad_clip is None, "Only one gradient clip mode is allowed!"
assert (
(num_epoch > 0 and (actor_epoch < 0 and critic_epoch < 0)) or (num_epoch < 0 and (actor_epoch > 0 and critic_epoch > 0)),
"We need only one set of the parameters num_epoch > 0, (actor_epoch > 0 and critic_epoch > 0).",
)
if not rew_norm:
assert not critic_clip, "Value clip is available only when `reward_normalization` is True"
actor_cfg = deepcopy(actor_cfg)
critic_cfg = deepcopy(critic_cfg)
actor_optim_cfg = actor_cfg.pop("optim_cfg", None)
critic_optim_cfg = critic_cfg.pop("optim_cfg", None)
obs_shape = env_params["obs_shape"]
self.is_discrete = env_params["is_discrete"]
self.gamma = gamma
self.lmbda = lmbda
self.adv_norm = adv_norm
self.obs_rms = RunningMeanStdTorch(obs_shape, clip_max=10) if obs_norm else None
self.rew_rms = RunningMeanStdTorch(1) if rew_norm else None
self.critic_coeff = critic_coeff
self.entropy_coeff = entropy_coeff
self.eps_clip = eps_clip
self.dual_clip = dual_clip
self.critic_clip = critic_clip
self.max_kl = max_kl
self.recompute_value = recompute_value
self.max_grad_norm = max_grad_norm
self.rms_grad_clip = rms_grad_clip
self.debug_grad = debug_grad
self.num_mini_batch = num_mini_batch
self.batch_size = batch_size # The batch size for policy gradient
self.critic_warmup_epoch = critic_warmup_epoch
self.num_epoch = num_epoch
self.critic_epoch = critic_epoch
self.actor_epoch = actor_epoch
# Use extra state to get better feature
self.regress_visual_state = visual_state_coeff > 0 and visual_state_mlp_cfg is not None and "visual_state" in obs_shape
self.visual_state_coeff = visual_state_coeff
if self.regress_visual_state:
assert shared_backbone, "Only Visuomotor policy supports extra state fitting"
# For DAPG
self.dapg_lambda = nn.Parameter(to_torch(dapg_lambda), requires_grad=False)
self.dapg_damping = dapg_damping
self.demo_replay = build_replay(demo_replay_cfg)
if self.demo_replay is not None:
for key in ['obs', 'actions']:
assert key in self.demo_replay.memory, f"DAPG needs {key} in your demo!"
# For done signal process.
self.ignore_dones = ignore_dones
# Build networks
actor_cfg.update(env_params)
critic_cfg.update(env_params)
self.actor, self.critic = build_actor_critic(actor_cfg, critic_cfg, shared_backbone)
if self.regress_visual_state:
visual_state_mlp_cfg.mlp_spec += [obs_shape["visual_state"]]
self.extra_fit = build_model(visual_state_mlp_cfg)
if rms_grad_clip is not None:
self.grad_rms = RunningSecondMomentumTorch(get_flat_params(self, trainable=True).shape, clip_max=rms_grad_clip)
self.shared_backbone = shared_backbone
self.detach_actor_feature = detach_actor_feature
self.actor_optim = build_optimizer(self.actor, actor_optim_cfg)
self.critic_optim = build_optimizer(self.critic, critic_optim_cfg)
def compute_critic_loss(self, samples):
# For update_actor_critic and update critic
assert isinstance(samples, (dict, GDict))
values = self.critic(
samples["obs"], episode_dones=samples["episode_dones"], save_feature=True
)
feature = self.critic.values[0].backbone.pop_attr("saved_feature")
visual_feature = self.critic.values[0].backbone.pop_attr("saved_visual_feature")
if self.detach_actor_feature and feature is not None:
feature = feature.detach()
if self.critic_clip and isinstance(self.critic_clip, float):
v_clip = samples["old_values"] + (values - samples["old_values"]).clamp(-self.critic_clip, self.critic_clip)
vf1 = (samples["returns"] - values).pow(2)
vf2 = (samples["returns"] - v_clip).pow(2)
critic_loss = torch.max(vf1, vf2)
else:
critic_loss = (samples["returns"] - values).pow(2)
critic_loss = critic_loss.mean() if samples["is_valid"] is None else critic_loss[samples["is_valid"]].mean()
return critic_loss, feature, visual_feature
def update_actor_critic(self, samples, demo_samples=None, with_critic=False):
"""
Returns True if self.max_kl is not None and
policy update causes large kl divergence between new policy and old policy,
in which case we stop the policy update and throw away the current replay buffer
"""
is_valid = samples["is_valid"]
self.actor_optim.zero_grad()
self.critic_optim.zero_grad()
ret = {}
critic_loss, actor_loss, demo_actor_loss, visual_state_loss, entropy_term = [0.0] * 5
feature, visual_feature, critic_loss, policy_std = [None] * 4
if with_critic:
critic_mse, feature, visual_feature = self.compute_critic_loss(samples)
critic_loss = critic_mse * self.critic_coeff
ret["ppo/critic_err"] = critic_mse.item()
# ret['ppo/critic_loss'] = critic_loss.item()
# Run actor forward
alls = self.actor(
samples["obs"],
episode_dones=samples["episode_dones"],
mode="dist" if self.is_discrete else "dist_std",
feature=feature,
save_feature=feature is None,
require_aux_loss=True, # auxiliary backbone self-supervision, e.g. aux_regress in VisuomotorTransformerFrame
)
if isinstance(alls, dict) and 'aux_loss' in alls.keys(): # auxiliary backbone self-supervision, e.g. aux_regress in VisuomotorTransformerFrame
alls, backbone_aux_loss = alls['feat'], alls['aux_loss']
else:
backbone_aux_loss = None
if not self.is_discrete:
new_distributions, policy_std = alls
else:
new_distributions, policy_std = alls, None
del alls
if visual_feature is None:
visual_feature = self.actor.backbone.pop_attr("saved_visual_feature")
# Compute actor loss
dist_entropy = new_distributions.entropy().mean()
recent_log_p = new_distributions.log_prob(samples["actions"])
log_ratio = recent_log_p - samples["old_log_p"]
ratio = log_ratio.exp()
# print("ratio", ratio[:20], flush=True)
# Estimation of KL divergence = p (log p - log q) with method in Schulman blog: http://joschu.net/blog/kl-approx.html
with torch.no_grad():
approx_kl_div = (ratio - 1 - log_ratio).mean().item()
clip_frac = (torch.abs(ratio - 1) > self.eps_clip).float().mean().item()
if policy_std is not None:
ret["ppo/policy_std"] = policy_std.mean().item()
ret["ppo/entropy"] = dist_entropy.item()
ret["ppo/mean_p_ratio"] = ratio.mean().item()
ret["ppo/max_p_ratio"] = ratio.max().item()
ret["ppo/log_p"] = recent_log_p.mean().item()
ret["ppo/clip_frac"] = clip_frac
ret["ppo/approx_kl"] = approx_kl_div
sign = GDict(self.max_kl is not None and approx_kl_div > self.max_kl * 1.5).allreduce(op="BOR", wrapper=False)
if sign:
return True, ret
if ratio.ndim == samples["advantages"].ndim - 1:
ratio = ratio[..., None]
surr1 = ratio * samples["advantages"]
surr2 = ratio.clamp(1 - self.eps_clip, 1 + self.eps_clip) * samples["advantages"]
surr = torch.min(surr1, surr2)
if self.dual_clip:
surr = torch.max(surr, self.dual_clip * samples["advantages"])
actor_loss = -surr[is_valid].mean()
entropy_term = -dist_entropy * self.entropy_coeff
ret["ppo/actor_loss"] = actor_loss.item()
ret["ppo/entropy_loss"] = entropy_term.item()
# DAPG actor loss
if demo_samples is not None:
new_demo_distributions = self.actor(demo_samples["obs"], mode="dist")
nll_loss_demo = -new_demo_distributions.log_prob(demo_samples["actions"]).mean()
demo_actor_loss = nll_loss_demo * self.dapg_lambda
with torch.no_grad():
ret["dapg/demo_nll_loss"] = nll_loss_demo.item()
ret["dapg/demo_actor_loss"] = demo_actor_loss.item()
# State regression loss
if self.regress_visual_state:
assert feature is not None
visual_state_mse = F.mse_loss(self.extra_fit(visual_feature), samples["obs/visual_state"], reduction="none")
visual_state_mse = visual_state_mse[is_valid].mean()
ret["ppo-extra/visual_state_mse"] = visual_state_mse
visual_state_loss = visual_state_mse * self.visual_state_coeff
ret["ppo-extra/visual_state_loss"] = visual_state_loss.item()
# Backbone auxiliary supervision loss
if backbone_aux_loss is not None:
ret["ppo-extra/backbone_auxiliary_loss"] = backbone_aux_loss.item()
loss = actor_loss + entropy_term + critic_loss + visual_state_loss + demo_actor_loss
if backbone_aux_loss is not None:
loss = loss + backbone_aux_loss
loss.backward()
net = self if with_critic else self.actor
ret["grad/grad_norm"] = net.grad_norm
if math.isnan(ret["grad/grad_norm"]):
print("############ Debugging nan grad ############", flush=True)
print("Dist mean", new_distributions.mean, flush=True)
print("Dist std", new_distributions.stddev, flush=True)
print("Samples[actions]", samples["actions"], flush=True)
print("Recent_log_p", recent_log_p, flush=True)
print("Samples[old_log_p]", samples["old_log_p"], flush=True)
for k, v in ret.keys():
print(k, v, flush=True)
if self.shared_backbone:
if getattr(self.actor.backbone, "visual_nn", None) is not None:
ret["grad/visual_grad"] = self.actor.backbone.visual_nn.grad_norm
if getattr(self.actor.backbone, "final_mlp", None) is not None:
ret["grad/actor_mlp_grad"] = self.actor.backbone.final_mlp.grad_norm
elif self.actor.final_mlp is not None:
ret["grad/actor_mlp_grad"] = self.actor.final_mlp.grad_norm
if with_critic:
if getattr(self.critic.values[0].backbone, "final_mlp", None) is not None:
ret["grad/critic_mlp_grad"] = self.critic.values[0].backbone.final_mlp.grad_norm
elif self.critic.values[0].final_mlp is not None:
ret["grad/critic_mlp_grad"] = self.critic.values[0].final_mlp.grad_norm
if self.max_grad_norm is not None:
nn.utils.clip_grad_norm_(net.parameters(), self.max_grad_norm)
elif self.rms_grad_clip is not None:
grads = get_flat_grads(self)
grads = self.grad_rms.add(grads)
set_flat_grads(self, grads)
ret["grad/clipped_grad_norm"] = net.grad_norm
self.actor_optim.step()
if with_critic:
self.critic_optim.step()
return False, ret
def update_critic(self, samples, demo_samples=None):
self.critic_optim.zero_grad()
critic_mse = self.compute_critic_loss(samples)[0]
critic_loss = critic_mse * self.critic_coeff
critic_loss.backward()
ret = {}
ret["grad/grad_norm"] = self.critic.grad_norm
if self.max_grad_norm is not None:
nn.utils.clip_grad_norm_(self.critic.parameters(), self.max_grad_norm)
elif self.rms_grad_clip is not None:
assert False
grads = get_flat_grads(self)
grads = self.grad_rms.add(grads)
set_flat_grads(self, grads)
ret["grad/clipped_grad_norm"] = self.critic.grad_norm
ret["ppo/critic_loss"] = critic_loss.item()
ret["ppo/critic_mse"] = critic_mse.item()
self.critic_optim.step()
return ret
def update_parameters(self, memory, updates, with_v=False):
world_size = get_world_size()
logger = get_logger()
ret = defaultdict(list)
process_batch_size = self.batch_size if GDict(memory["obs"]).is_big else None
if self.num_mini_batch < 0:
max_samples = GDict(len(memory)).allreduce(op="MAX", device=self.device, wrapper=False) if world_size > 1 else len(memory)
num_mini_batch = int((max_samples + self.batch_size - 1) // self.batch_size)
else:
num_mini_batch = self.num_mini_batch
logger.info(f"Number of batches in one PPO epoch: {num_mini_batch}!")
if len(memory) < memory.capacity:
memory["episode_dones"][len(memory) :] = True
# Do transformation for all valid samples
memory["episode_dones"] = (memory["episode_dones"] + memory["is_truncated"]) > 1 - 0.1
if self.has_obs_process:
self.obs_rms.sync()
obs = GDict({"obs": memory["obs"], "next_obs": memory["next_obs"]}).to_torch(device="cpu", wrapper=False)
obs = GDict(self.process_obs(obs, batch_size=process_batch_size)).to_numpy(wrapper=False)
memory.update(obs)
with torch.no_grad():
memory["old_distribution"], memory["old_log_p"] = self.get_dist_with_logp(
obs=memory["obs"], actions=memory["actions"], batch_size=process_batch_size
)
ret["ppo/old_log_p"].append(memory["old_log_p"].mean().item())
demo_memory = self.demo_replay
if demo_memory is not None:
with torch.no_grad():
demo_memory = self.demo_replay.sample(min(len(self.demo_replay), len(memory)))
if self.has_obs_process:
demo_memory = demo_memory.to_torch(device="cpu")
demo_memory = self.process_obs(demo_memory, batch_size=process_batch_size)
demo_memory = demo_memory.to_numpy()
if self.ignore_dones:
demo_memory["dones"] = demo_memory["dones"] * 0
def run_over_buffer(epoch_id, mode="v"):
nonlocal memory, ret, demo_memory, logger
assert mode in ["v", "pi", "v+pi"]
if "v" in mode and (epoch_id == 0 or self.recompute_value):
with self.critic.no_sync():
memory.update(
self.compute_gae(
obs=memory["obs"],
next_obs=memory["next_obs"],
rewards=memory["rewards"],
dones=memory["dones"],
episode_dones=memory["episode_dones"],
update_rms=True,
batch_size=process_batch_size,
ignore_dones=self.ignore_dones,
)
)
if self.adv_norm:
# print(mean_adv, std_adv)
mean_adv = memory["advantages"].mean(0)
std_adv = memory["advantages"].std(0) + 1e-8
mean_adv, std_adv = GDict([mean_adv, std_adv]).allreduce(wrapper=False)
# print(mean_adv, std_adv)
# exit(0)
memory["advantages"] = (memory["advantages"] - mean_adv) / std_adv
ret["ppo/adv_mean"].append(mean_adv.item())
ret["ppo/adv_std"].append(std_adv.item())
ret["ppo/max_normed_adv"].append(np.abs(memory["advantages"]).max().item())
ret["ppo/v_target"].append(memory["returns"].mean().item())
ret["ppo/ori_returns"].append(memory["original_returns"].mean().item())
def run_one_iter(samples, demo_samples):
if "pi" in mode:
flag, infos = self.update_actor_critic(samples, demo_samples, with_critic=(mode == "v+pi"))
for key in infos:
ret[key].append(infos[key])
elif mode == "v":
flag, infos = False, self.update_critic(samples, demo_samples)
for key in infos:
ret[key].append(infos[key])
return flag
for samples in memory.mini_batch_sampler(self.batch_size, drop_last=True, auto_restart=True, max_num_batches=num_mini_batch):
samples = DictArray(samples).to_torch(device=self.device, non_blocking=True)
demo_samples = None
if demo_memory is not None:
indices = np.random.randint(0, high=len(demo_memory), size=self.batch_size)
demo_samples = demo_memory.slice(indices).to_torch(device=self.device, non_blocking=True)
if run_one_iter(samples, demo_samples):
return True
return False
if self.critic_warmup_epoch > 0:
logger.info("**Warming up critic at the beginning of training; this causes reported ETA to be slower than actual ETA**")
for i in range(self.critic_warmup_epoch):
run_over_buffer(i, "v")
if self.num_epoch > 0:
for i in range(self.num_epoch):
num_actor_epoch = i + 1
if run_over_buffer(i, "v+pi"):
break
else:
for i in range(self.critic_epoch):
run_over_buffer(i, "v")
for i in range(self.actor_epoch):
num_actor_epoch = i + 1
if run_over_buffer(i, "pi"):
break
self.critic_warmup_epoch = 0
ret = {key: np.mean(ret[key]) for key in ret}
with torch.no_grad():
ret["param/max_policy_abs"] = torch.max(torch.abs(get_flat_params(self.actor))).item()
ret["param/policy_norm"] = torch.norm(get_flat_params(self.actor)).item()
if isinstance(self.critic, nn.Module):
ret["param/max_critic_abs"] = torch.max(torch.abs(get_flat_params(self.critic))).item()
ret["param/critic_norm"] = torch.norm(get_flat_params(self.critic)).item()
for key in ["old_distribution", "old_log_p", "old_values", "old_next_values", "original_returns", "returns", "advantages"]:
if key in memory.memory:
memory.memory.pop(key)
ret["ppo/num_actor_epoch"] = num_actor_epoch
if self.demo_replay is not None:
# For DAPG
ret["dapg/demo_lambda"] = self.dapg_lambda.item()
self.dapg_lambda *= self.dapg_damping
if with_v:
# For PPG
ret["vf"] = to_np(memory["original_returns"])
# exit(0)
return ret
|
haosulab/ManiSkill2-Learn
|
maniskill2_learn/methods/mfrl/ppo.py
|
ppo.py
|
py
| 21,464 |
python
|
en
|
code
| 53 |
github-code
|
6
|
[
{
"api_name": "maniskill2_learn.utils.torch.BaseAgent",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "maniskill2_learn.utils.torch.RunningMeanStdTorch",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "maniskill2_learn.utils.torch.RunningMeanStdTorch",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "maniskill2_learn.utils.data.to_torch",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "maniskill2_learn.env.build_replay",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "maniskill2_learn.networks.build_actor_critic",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "maniskill2_learn.networks.build_model",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "maniskill2_learn.utils.torch.RunningSecondMomentumTorch",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "maniskill2_learn.utils.torch.get_flat_params",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "maniskill2_learn.utils.torch.build_optimizer",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "maniskill2_learn.utils.torch.build_optimizer",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "maniskill2_learn.utils.data.GDict",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "torch.max",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "torch.abs",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "maniskill2_learn.utils.data.GDict",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "torch.min",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.mse_loss",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 275,
"usage_type": "name"
},
{
"api_name": "math.isnan",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "torch.nn.utils.clip_grad_norm_",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "torch.nn.utils",
"line_number": 318,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 318,
"usage_type": "name"
},
{
"api_name": "maniskill2_learn.utils.torch.get_flat_grads",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "maniskill2_learn.utils.torch.set_flat_grads",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "torch.nn.utils.clip_grad_norm_",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "torch.nn.utils",
"line_number": 340,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 340,
"usage_type": "name"
},
{
"api_name": "maniskill2_learn.utils.torch.get_flat_grads",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "maniskill2_learn.utils.torch.set_flat_grads",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "maniskill2_learn.utils.meta.get_world_size",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "maniskill2_learn.utils.meta.get_logger",
"line_number": 355,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "maniskill2_learn.utils.data.GDict",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "maniskill2_learn.utils.data.GDict",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "maniskill2_learn.utils.data.GDict",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "maniskill2_learn.utils.data.GDict",
"line_number": 374,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "maniskill2_learn.utils.data.GDict",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 423,
"usage_type": "call"
},
{
"api_name": "maniskill2_learn.utils.data.DictArray",
"line_number": 440,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 443,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 443,
"usage_type": "attribute"
},
{
"api_name": "numpy.mean",
"line_number": 468,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 469,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "torch.abs",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "maniskill2_learn.utils.torch.get_flat_params",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "torch.norm",
"line_number": 471,
"usage_type": "call"
},
{
"api_name": "maniskill2_learn.utils.torch.get_flat_params",
"line_number": 471,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 472,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 472,
"usage_type": "name"
},
{
"api_name": "torch.max",
"line_number": 473,
"usage_type": "call"
},
{
"api_name": "torch.abs",
"line_number": 473,
"usage_type": "call"
},
{
"api_name": "maniskill2_learn.utils.torch.get_flat_params",
"line_number": 473,
"usage_type": "call"
},
{
"api_name": "torch.norm",
"line_number": 474,
"usage_type": "call"
},
{
"api_name": "maniskill2_learn.utils.torch.get_flat_params",
"line_number": 474,
"usage_type": "call"
},
{
"api_name": "maniskill2_learn.utils.data.to_np",
"line_number": 487,
"usage_type": "call"
},
{
"api_name": "builder.MFRL.register_module",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "builder.MFRL",
"line_number": 35,
"usage_type": "name"
}
] |
442805106
|
import tensorflow as tf
from PIL import Image
import cv2
import numpy as np
import uuid
import os
from .admin import model_path, label_path
from .utility import load_image_into_numpy_array, calculate_area, delete_and_create_folder, shortest_longest_area
import sys
sys.path.append("../models/research")
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
# define a class brand object that will take the video input, make predictions and calculate the KPI metrics
class BrandObjectService:
def __init__(self, video_path):
self.video_path = video_path
self.save_path = "./save_path"
self.predicted_path = './predicted_frames'
delete_and_create_folder(self.save_path)
delete_and_create_folder(self.predicted_path)
def predict(self):
NUM_CLASSES = 7
KPIs_dict = dict()
#Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(model_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Loading label map
label_map = label_map_util.load_labelmap(label_path)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Size, in inches, of the output images.
IMAGE_SIZE = (500, 500)
count = 0
frame_number = 0
cap = cv2.VideoCapture(self.video_path)
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
while cap.isOpened():
frame_number += 1
ret, frame = cap.read()
filename = str(uuid.uuid4()) + ".jpg"
fullpath = os.path.join(self.save_path, filename)
cv2.imwrite(fullpath, frame)
count += 1
### for testing script...
if count == 50:
break
image = Image.open(fullpath)
image_np = load_image_into_numpy_array(image)
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection
image, box_to_display_str_map = vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
image_pil = Image.fromarray(np.uint8(image_np)).convert('RGB')
im_width, im_height = image_pil.size
area_whole = im_width * im_height
for key, value in box_to_display_str_map.items():
ymin, xmin, ymax, xmax = key
(left, right, top, bottom) = (
xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height)
area = calculate_area(top, left, bottom, right)
percent_area = round(area / area_whole, 2)
rindex = value[0].rfind(':')
brand_name = value[0][:rindex]
if brand_name in KPIs_dict.keys():
KPIs_dict[brand_name]['count'] += 1
KPIs_dict[brand_name]['area'].append(percent_area)
KPIs_dict[brand_name]['frames'].append(frame_number)
else:
KPIs_dict[brand_name] = {"count": 1}
KPIs_dict[brand_name].update({"area": [percent_area]})
KPIs_dict[brand_name].update({"frames": [frame_number]})
full_predicted_path = os.path.join(self.predicted_path, str(uuid.uuid4()) + ".jpg")
cv2.imwrite(full_predicted_path, image)
KPIs_dict = self.process_kpi(KPIs_dict)
return KPIs_dict
# define a function that will return the dictonary with KPI metrics per logo
def process_kpi(self, KPIs_dict):
for each_brand, analytics_dict in KPIs_dict.items():
area = analytics_dict['area']
response = shortest_longest_area(area)
KPIs_dict[each_brand].update(response)
return KPIs_dict
|
krishnakaushik25/Forecasting-Business-KPI
|
modular_code/src/ML_Pipeline/predict.py
|
predict.py
|
py
| 5,576 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "utility.delete_and_create_folder",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "utility.delete_and_create_folder",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "tensorflow.Graph",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "tensorflow.GraphDef",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "tensorflow.gfile.GFile",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "admin.model_path",
"line_number": 35,
"usage_type": "argument"
},
{
"api_name": "tensorflow.gfile",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.import_graph_def",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "object_detection.utils.label_map_util.load_labelmap",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "admin.label_path",
"line_number": 42,
"usage_type": "argument"
},
{
"api_name": "object_detection.utils.label_map_util",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "object_detection.utils.label_map_util.convert_label_map_to_categories",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "object_detection.utils.label_map_util",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "object_detection.utils.label_map_util.create_category_index",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "object_detection.utils.label_map_util",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "tensorflow.Session",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "cv2.imwrite",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "utility.load_image_into_numpy_array",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "object_detection.utils.visualization_utils.visualize_boxes_and_labels_on_image_array",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "object_detection.utils.visualization_utils",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "numpy.squeeze",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "numpy.squeeze",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "numpy.squeeze",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "numpy.uint8",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "utility.calculate_area",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "uuid.uuid4",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "utility.shortest_longest_area",
"line_number": 126,
"usage_type": "call"
}
] |
28041597167
|
import unittest
import os
from conans.test.utils.test_files import temp_folder
from conans.util.files import save
from time import sleep
class SaveTestCase(unittest.TestCase):
def setUp(self):
folder = temp_folder()
self.filepath = os.path.join(folder, "file.txt")
# Save some content and keep timestamp
self.content = "my content"
save(self.filepath, self.content)
self.timestamp = os.path.getmtime(self.filepath)
sleep(1) # precission is seconds, so we need to sleep
def only_if_modified_true_test(self):
save(self.filepath, self.content, only_if_modified=True)
self.assertEqual(self.timestamp, os.path.getmtime(self.filepath))
def only_if_modified_false_test(self):
save(self.filepath, self.content, only_if_modified=False)
self.assertNotEqual(self.timestamp, os.path.getmtime(self.filepath))
def modified_only_true_test(self):
save(self.filepath, "other content", only_if_modified=True)
self.assertNotEqual(self.timestamp, os.path.getmtime(self.filepath))
def modified_only_false_test(self):
save(self.filepath, "other content", only_if_modified=False)
self.assertNotEqual(self.timestamp, os.path.getmtime(self.filepath))
|
pianoslum/conan
|
conans/test/util/files_test.py
|
files_test.py
|
py
| 1,276 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "unittest.TestCase",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "conans.test.utils.test_files.temp_folder",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "conans.util.files.save",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.getmtime",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "conans.util.files.save",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path.getmtime",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "conans.util.files.save",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.getmtime",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "conans.util.files.save",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path.getmtime",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "conans.util.files.save",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.path.getmtime",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 35,
"usage_type": "attribute"
}
] |
12260712099
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import pymysql
money_all=56.75+2+938.7+83.2
money_all_str=str(money_all)
print(money_all_str)
money_real=int(money_all)
print(str(money_real))
print(7/3)
print(7//5)
print(35<54)
def sort(x):
return x['price']
mydb=pymysql.connect(
host="localhost",
user='root',
password="123456",
)
cursor=mydb.cursor()
sqltext='show databases'
cursor.execute(sqltext)
for row in cursor:
print(row)
|
hedychium/python_learning
|
erase_zero.py
|
erase_zero.py
|
py
| 459 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pymysql.connect",
"line_number": 23,
"usage_type": "call"
}
] |
30763374181
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 26 15:42:17 2016
@author: Shahidur Rahman
"""
import explorers
import stringRecorder
import pandas
from sqlalchemy import create_engine
import random
from mmh3 import hash128
#from sklearn.datasets import load_iris
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
engine = create_engine('mysql+pymysql://root:shahidur_123@localhost:3306/mwt')
j=0
#Data generation
import numpy as np
#import pdb;pdb.set_trace()
mu, sigma = 0, 1
actionValue = np.random.normal(mu, sigma, 200000)
#type(actionValue)
minVal = np.amin(actionValue)
maxVal = np.amax(actionValue)
trainData = np.random.normal(mu, sigma, 200000)
testValue = np.random.normal(mu, sigma, 200000)
#s1 = np.empty(2000, dtype=np.int)
for i in range(0,200000):
#reward generation
trainData[i] = int(round(0 + (trainData[i]-minVal)*(1-0)/(maxVal-minVal),0))
#action generation
actionValue[i] = int(round(1 + (actionValue[i]-minVal)*(10-1)/(maxVal-minVal),0))
#testData
testValue[i] = int(round(1 + (testValue[i]-minVal)*(10-0)/(maxVal-minVal),0))
X = [0] * 200000
Y = [0] * 200000
X1 = [0] * 200000
y1 = [0] * 200000
from random import randint
for i in range(0,200000):
X[i] = [randint(0,9), randint(0,9), randint(0,9), randint(0,9), randint(0,9), randint(0,9), actionValue[i]]
Y[i] = [randint(0,9), randint(0,9), randint(0,9), randint(0,9), randint(0,9), randint(0,9)]
#train data set up actionValue
for i in range(0,200000):
X1[i], y1[i] = [np.asarray(X[i])[0], np.asarray(X[i])[1], np.asarray(X[i])[2], np.asarray(X[i])[3],
np.asarray(X[i])[4], np.asarray(X[i])[5]], actionValue[i]
#train data setup rewardValue
X2, y2 = X, trainData
#model action selection
from sklearn import svm
clf = svm.SVC(kernel='rbf')
modelActionSelection = clf.fit(X1, y1)
#model reward allocation
clf = svm.SVC(kernel='rbf')
modelRewardAllocation = clf.fit(X2, y2)
for i in range(0,200000):
#epsilon
epsilon = round(random.random(),3)
#unique number generator
unique_key =hash128('my string of doom ', seed=1234)
##of actions
noOfActions = 10
print(i)
#policy decision
policyDecision = modelActionSelection.predict(Y[i])
#print("predict["+str(i)+"] is "+str(predict))
for x in policyDecision:
policyDecision = int(x)
#scores
scores = [.2,.5,.3]
callExplorer = explorers.explorers(epsilon,noOfActions,policyDecision,scores)
storeValues = callExplorer.algoSelection()
#reward check dataset
rewardCheckData = [np.asarray(Y[i])[0], np.asarray(Y[i])[1], np.asarray(Y[i])[2], np.asarray(Y[i])[3],
np.asarray(Y[i])[4], np.asarray(Y[i])[5], storeValues['actionID']]
rewardValue = int(modelRewardAllocation.predict(rewardCheckData))
record = stringRecorder.stringRecorder(str(Y[i]), str(storeValues['actionID']),str(storeValues['actionProbability']), str(unique_key), str(storeValues['isExplore']), str(epsilon), str(noOfActions),str(policyDecision),str(storeValues['explorerAlgo']), str(rewardValue))
record = record.sewStrings()
#print('record : '+str(record))
colList="context,actionID,actionProbability,unique_key,isExplore,epsilon,noOfActions,policyDecision,explorerAlgo,rewardValue".split(',')
#c1=['col1']
df = pandas.DataFrame(data=record,index=colList)
#transpose the data
df=df.T
#print("printing panda df here")
#print(df)
#push data in sql
#rf = pandas.DataFrame(data=['10',1,2,'62019057582468709482189373788949966293',4,5,6,7,'8'],index=colList)
#rf=rf.T
#rf.to_sql(con=engine, name='stringrecord', if_exists='append', index=False)
df.to_sql(con=engine, name='stringrecord', if_exists='append', index=False)
df.to_sql(con=engine, name='stringrecord_test', if_exists='append', index=False)
|
skshahidur/nlp_paper_implementation
|
Word-Embedding/mwt_v1.py
|
mwt_v1.py
|
py
| 4,021 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "warnings.filterwarnings",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "numpy.amin",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.amax",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.normal",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.SVC",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "sklearn.svm",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "sklearn.svm.SVC",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "sklearn.svm",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "random.random",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "mmh3.hash128",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "explorers.explorers",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "stringRecorder.stringRecorder",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 102,
"usage_type": "call"
}
] |
9805159119
|
import multiprocessing
# Gunicorn app
# Tell Gunicorn which application to run
wsgi_app = "django_examples.asgi:application"
# Requests
# Restart workers after so many requests, with some variability.
max_requests = 1000
max_requests_jitter = 50
# Logging
# Use stdout for logging
log_file = "-"
# Workers
bind = "0.0.0.0:8000"
workers = multiprocessing.cpu_count() * 2 + 1
worker_class = "uvicorn.workers.UvicornWorker"
|
andrewguest/django-alpine-htmx
|
gunicorn.conf.py
|
gunicorn.conf.py
|
py
| 425 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "multiprocessing.cpu_count",
"line_number": 18,
"usage_type": "call"
}
] |
27267958481
|
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from django.http.response import JsonResponse
from viteproject.models import DesignSave
from viteproject.serializers import DesignSaveSerializer
from django.core.files.storage import default_storage
# Create your views here.
@csrf_exempt
def save_designAPI(request,id=0):
if request.method == 'GET':
designSave = DesignSave.objects.all()
designSaveSerializer = DesignSaveSerializer(designSave,many=True)
return JsonResponse(designSaveSerializer.data,safe=False)
elif request.method == 'POST':
designSave_data = JSONParser().parse(request)
designSaveSerializer = DesignSaveSerializer(data=designSave_data)
if designSaveSerializer.is_valid():
designSaveSerializer.save()
return JsonResponse("Added Successfully",safe=False)
return JsonResponse("Failed to Add",safe=False)
elif request.method == 'PUT':
designSave_data = JSONParser().parse(request)
designSave = DesignSave.objects.get(DesignId = designSave_data['DesignId'])
designSaveSerializer = DesignSaveSerializer(designSave,data=designSave_data)
if designSaveSerializer.is_valid():
designSaveSerializer.save()
return JsonResponse("Update Successfully",safe=False)
return JsonResponse("Fail Update")
elif request.method=='DELETE':
designSave = DesignSave.objects.get(DesignId=id)
designSave.delete()
return JsonResponse("Delete Successfully",safe=False)
@csrf_exempt
def SaveFile(request):
file=request.FILES['file']
file_name = default_storage.save(file.name,file)
return JsonResponse(file_name,safe=False)
|
SurajBhosale003/Osdag-React-Django
|
backend/viteproject/views.py
|
views.py
|
py
| 1,795 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "viteproject.models.DesignSave.objects.all",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "viteproject.models.DesignSave.objects",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "viteproject.models.DesignSave",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "viteproject.serializers.DesignSaveSerializer",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.http.response.JsonResponse",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "rest_framework.parsers.JSONParser",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "viteproject.serializers.DesignSaveSerializer",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.http.response.JsonResponse",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.http.response.JsonResponse",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "rest_framework.parsers.JSONParser",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "viteproject.models.DesignSave.objects.get",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "viteproject.models.DesignSave.objects",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "viteproject.models.DesignSave",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "viteproject.serializers.DesignSaveSerializer",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.http.response.JsonResponse",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.http.response.JsonResponse",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "viteproject.models.DesignSave.objects.get",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "viteproject.models.DesignSave.objects",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "viteproject.models.DesignSave",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "django.http.response.JsonResponse",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.views.decorators.csrf.csrf_exempt",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.core.files.storage.default_storage.save",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "django.core.files.storage.default_storage",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "django.http.response.JsonResponse",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "django.views.decorators.csrf.csrf_exempt",
"line_number": 39,
"usage_type": "name"
}
] |
26041579406
|
from __future__ import annotations
import itertools
import re
from collections import defaultdict
from typing import Iterable, Iterator, Sequence, Tuple, TypeVar
from pkg_resources import Requirement
from typing_extensions import Protocol
from pants.backend.python.subsystems.setup import PythonSetup
from pants.backend.python.target_types import InterpreterConstraintsField
from pants.build_graph.address import Address
from pants.engine.engine_aware import EngineAwareParameter
from pants.engine.target import Target
from pants.util.docutil import bin_name
from pants.util.frozendict import FrozenDict
from pants.util.memo import memoized
from pants.util.ordered_set import FrozenOrderedSet, OrderedSet
from pants.util.strutil import softwrap
# This protocol allows us to work with any arbitrary FieldSet. See
# https://mypy.readthedocs.io/en/stable/protocols.html.
class FieldSetWithInterpreterConstraints(Protocol):
@property
def address(self) -> Address:
...
@property
def interpreter_constraints(self) -> InterpreterConstraintsField:
...
_FS = TypeVar("_FS", bound=FieldSetWithInterpreterConstraints)
RawConstraints = Tuple[str, ...]
# The current maxes are 2.7.18 and 3.6.15. We go much higher, for safety.
_PATCH_VERSION_UPPER_BOUND = 30
@memoized
def interpreter_constraints_contains(
a: RawConstraints, b: RawConstraints, interpreter_universe: tuple[str, ...]
) -> bool:
"""A memoized version of `InterpreterConstraints.contains`.
This is a function in order to keep the memoization cache on the module rather than on an
instance. It can't go on `PythonSetup`, since that would cause a cycle with this module.
"""
return InterpreterConstraints(a).contains(InterpreterConstraints(b), interpreter_universe)
@memoized
def parse_constraint(constraint: str) -> Requirement:
"""Parse an interpreter constraint, e.g., CPython>=2.7,<3.
We allow shorthand such as `>=3.7`, which gets expanded to `CPython>=3.7`. See Pex's
interpreter.py's `parse_requirement()`.
"""
try:
parsed_requirement = Requirement.parse(constraint)
except ValueError:
parsed_requirement = Requirement.parse(f"CPython{constraint}")
return parsed_requirement
# Normally we would subclass `DeduplicatedCollection`, but we want a custom constructor.
class InterpreterConstraints(FrozenOrderedSet[Requirement], EngineAwareParameter):
@classmethod
def for_fixed_python_version(
cls, python_version_str: str, interpreter_type: str = "CPython"
) -> InterpreterConstraints:
return cls([f"{interpreter_type}=={python_version_str}"])
def __init__(self, constraints: Iterable[str | Requirement] = ()) -> None:
# #12578 `parse_constraint` will sort the requirement's component constraints into a stable form.
# We need to sort the component constraints for each requirement _before_ sorting the entire list
# for the ordering to be correct.
parsed_constraints = (
i if isinstance(i, Requirement) else parse_constraint(i) for i in constraints
)
super().__init__(sorted(parsed_constraints, key=lambda c: str(c)))
def __str__(self) -> str:
return " OR ".join(str(constraint) for constraint in self)
def debug_hint(self) -> str:
return str(self)
@property
def description(self) -> str:
return str(sorted(str(c) for c in self))
@classmethod
def merge(cls, ics: Iterable[InterpreterConstraints]) -> InterpreterConstraints:
return InterpreterConstraints(
cls.merge_constraint_sets(tuple(str(requirement) for requirement in ic) for ic in ics)
)
@classmethod
def merge_constraint_sets(
cls, constraint_sets: Iterable[Iterable[str]]
) -> frozenset[Requirement]:
"""Given a collection of constraints sets, merge by ORing within each individual constraint
set and ANDing across each distinct constraint set.
For example, given `[["CPython>=2.7", "CPython<=3"], ["CPython==3.6.*"]]`, return
`["CPython>=2.7,==3.6.*", "CPython<=3,==3.6.*"]`.
"""
# A sentinel to indicate a requirement that is impossible to satisfy (i.e., one that
# requires two different interpreter types).
impossible = parse_constraint("IMPOSSIBLE")
# Each element (a Set[ParsedConstraint]) will get ANDed. We use sets to deduplicate
# identical top-level parsed constraint sets.
# First filter out any empty constraint_sets, as those represent "no constraints", i.e.,
# any interpreters are allowed, so omitting them has the logical effect of ANDing them with
# the others, without having to deal with the vacuous case below.
constraint_sets = [cs for cs in constraint_sets if cs]
if not constraint_sets:
return frozenset()
parsed_constraint_sets: set[frozenset[Requirement]] = set()
for constraint_set in constraint_sets:
# Each element (a ParsedConstraint) will get ORed.
parsed_constraint_set = frozenset(
parse_constraint(constraint) for constraint in constraint_set
)
parsed_constraint_sets.add(parsed_constraint_set)
if len(parsed_constraint_sets) == 1:
return next(iter(parsed_constraint_sets))
def and_constraints(parsed_constraints: Sequence[Requirement]) -> Requirement:
merged_specs: set[tuple[str, str]] = set()
expected_interpreter = parsed_constraints[0].project_name
for parsed_constraint in parsed_constraints:
if parsed_constraint.project_name != expected_interpreter:
return impossible
merged_specs.update(parsed_constraint.specs)
formatted_specs = ",".join(f"{op}{version}" for op, version in merged_specs)
return parse_constraint(f"{expected_interpreter}{formatted_specs}")
ored_constraints = (
and_constraints(constraints_product)
for constraints_product in itertools.product(*parsed_constraint_sets)
)
ret = frozenset(cs for cs in ored_constraints if cs != impossible)
if not ret:
# There are no possible combinations.
attempted_str = " AND ".join(f"({' OR '.join(cs)})" for cs in constraint_sets)
raise ValueError(
softwrap(
f"""
These interpreter constraints cannot be merged, as they require
conflicting interpreter types: {attempted_str}
"""
)
)
return ret
@classmethod
def create_from_targets(
cls, targets: Iterable[Target], python_setup: PythonSetup
) -> InterpreterConstraints | None:
"""Returns merged InterpreterConstraints for the given Targets.
If none of the given Targets have InterpreterConstraintsField, returns None.
NB: Because Python targets validate that they have ICs which are a subset of their
dependencies, merging constraints like this is only necessary when you are _mixing_ code
which might not have any inter-dependencies, such as when you're merging un-related roots.
"""
fields = [
tgt[InterpreterConstraintsField]
for tgt in targets
if tgt.has_field(InterpreterConstraintsField)
]
if not fields:
return None
return cls.create_from_compatibility_fields(fields, python_setup)
@classmethod
def create_from_compatibility_fields(
cls, fields: Iterable[InterpreterConstraintsField], python_setup: PythonSetup
) -> InterpreterConstraints:
"""Returns merged InterpreterConstraints for the given `InterpreterConstraintsField`s.
NB: Because Python targets validate that they have ICs which are a subset of their
dependencies, merging constraints like this is only necessary when you are _mixing_ code
which might not have any inter-dependencies, such as when you're merging un-related roots.
"""
constraint_sets = {field.value_or_global_default(python_setup) for field in fields}
# This will OR within each field and AND across fields.
merged_constraints = cls.merge_constraint_sets(constraint_sets)
return InterpreterConstraints(merged_constraints)
@classmethod
def group_field_sets_by_constraints(
cls, field_sets: Iterable[_FS], python_setup: PythonSetup
) -> FrozenDict[InterpreterConstraints, tuple[_FS, ...]]:
results = defaultdict(set)
for fs in field_sets:
constraints = cls.create_from_compatibility_fields(
[fs.interpreter_constraints], python_setup
)
results[constraints].add(fs)
return FrozenDict(
{
constraints: tuple(sorted(field_sets, key=lambda fs: fs.address))
for constraints, field_sets in sorted(results.items())
}
)
def generate_pex_arg_list(self) -> list[str]:
args = []
for constraint in self:
args.extend(["--interpreter-constraint", str(constraint)])
return args
def _valid_patch_versions(self, major: int, minor: int) -> Iterator[int]:
for p in range(0, _PATCH_VERSION_UPPER_BOUND + 1):
for req in self:
if req.specifier.contains(f"{major}.{minor}.{p}"): # type: ignore[attr-defined]
yield p
def _includes_version(self, major: int, minor: int) -> bool:
return any(True for _ in self._valid_patch_versions(major, minor))
def includes_python2(self) -> bool:
"""Checks if any of the constraints include Python 2.
This will return True even if the code works with Python 3 too, so long as at least one of
the constraints works with Python 2.
"""
return self._includes_version(2, 7)
def minimum_python_version(self, interpreter_universe: Iterable[str]) -> str | None:
"""Find the lowest major.minor Python version that will work with these constraints.
The constraints may also be compatible with later versions; this is the lowest version that
still works.
"""
for major, minor in sorted(_major_minor_to_int(s) for s in interpreter_universe):
if self._includes_version(major, minor):
return f"{major}.{minor}"
return None
def snap_to_minimum(self, interpreter_universe: Iterable[str]) -> InterpreterConstraints | None:
"""Snap to the lowest Python major.minor version that works with these constraints.
Will exclude patch versions that are expressly incompatible.
"""
for major, minor in sorted(_major_minor_to_int(s) for s in interpreter_universe):
for p in range(0, _PATCH_VERSION_UPPER_BOUND + 1):
for req in self:
if req.specifier.contains(f"{major}.{minor}.{p}"): # type: ignore[attr-defined]
# We've found the minimum major.minor that is compatible.
req_strs = [f"{req.project_name}=={major}.{minor}.*"]
# Now find any patches within that major.minor that we must exclude.
invalid_patches = sorted(
set(range(0, _PATCH_VERSION_UPPER_BOUND + 1))
- set(self._valid_patch_versions(major, minor))
)
req_strs.extend(f"!={major}.{minor}.{p}" for p in invalid_patches)
req_str = ",".join(req_strs)
snapped = parse_constraint(req_str)
return InterpreterConstraints([snapped])
return None
def _requires_python3_version_or_newer(
self, *, allowed_versions: Iterable[str], prior_version: str
) -> bool:
if not self:
return False
patch_versions = list(reversed(range(0, _PATCH_VERSION_UPPER_BOUND)))
# We only look at the prior Python release. For example, consider Python 3.8+
# looking at 3.7. If using something like `>=3.5`, Py37 will be included.
# `==3.6.*,!=3.7.*,==3.8.*` is unlikely, and even that will work correctly as
# it's an invalid constraint so setuptools returns False always. `['==2.7.*', '==3.8.*']`
# will fail because not every single constraint is exclusively 3.8.
prior_versions = [f"{prior_version}.{p}" for p in patch_versions]
allowed_versions = [
f"{major_minor}.{p}" for major_minor in allowed_versions for p in patch_versions
]
def valid_constraint(constraint: Requirement) -> bool:
if any(
constraint.specifier.contains(prior) for prior in prior_versions # type: ignore[attr-defined]
):
return False
if not any(
constraint.specifier.contains(allowed) for allowed in allowed_versions # type: ignore[attr-defined]
):
return False
return True
return all(valid_constraint(c) for c in self)
def requires_python38_or_newer(self, interpreter_universe: Iterable[str]) -> bool:
"""Checks if the constraints are all for Python 3.8+.
This will return False if Python 3.8 is allowed, but prior versions like 3.7 are also
allowed.
"""
py38_and_later = [
interp for interp in interpreter_universe if _major_minor_to_int(interp) >= (3, 8)
]
return self._requires_python3_version_or_newer(
allowed_versions=py38_and_later, prior_version="3.7"
)
def to_poetry_constraint(self) -> str:
specifiers = []
wildcard_encountered = False
for constraint in self:
specifier = str(constraint.specifier) # type: ignore[attr-defined]
if specifier:
specifiers.append(specifier)
else:
wildcard_encountered = True
if not specifiers or wildcard_encountered:
return "*"
return " || ".join(specifiers)
def enumerate_python_versions(
self, interpreter_universe: Iterable[str]
) -> FrozenOrderedSet[tuple[int, int, int]]:
"""Return a set of all plausible (major, minor, patch) tuples for all Python 2.7/3.x in the
specified interpreter universe that matches this set of interpreter constraints.
This also validates our assumptions around the `interpreter_universe`:
- Python 2.7 is the only Python 2 version in the universe, if at all.
- Python 3 is the last major release of Python, which the core devs have committed to in
public several times.
"""
if not self:
return FrozenOrderedSet()
minors = []
for major_minor in interpreter_universe:
major, minor = _major_minor_to_int(major_minor)
if major == 2:
if minor != 7:
raise AssertionError(
softwrap(
f"""
Unexpected value in `[python].interpreter_versions_universe`:
{major_minor}. Expected the only Python 2 value to be '2.7', given that
all other versions are unmaintained or do not exist.
"""
)
)
minors.append((2, minor))
elif major == 3:
minors.append((3, minor))
else:
raise AssertionError(
softwrap(
f"""
Unexpected value in `[python].interpreter_versions_universe`:
{major_minor}. Expected to only include '2.7' and/or Python 3 versions,
given that Python 3 will be the last major Python version. Please open an
issue at https://github.com/pantsbuild/pants/issues/new if this is no longer
true.
"""
)
)
valid_patches = FrozenOrderedSet(
(major, minor, patch)
for (major, minor) in sorted(minors)
for patch in self._valid_patch_versions(major, minor)
)
if not valid_patches:
raise ValueError(
softwrap(
f"""
The interpreter constraints `{self}` are not compatible with any of the
interpreter versions from `[python].interpreter_versions_universe`.
Please either change these interpreter constraints or update the
`interpreter_versions_universe` to include the interpreters set in these
constraints. Run `{bin_name()} help-advanced python` for more information on the
`interpreter_versions_universe` option.
"""
)
)
return valid_patches
def contains(self, other: InterpreterConstraints, interpreter_universe: Iterable[str]) -> bool:
"""Returns True if the `InterpreterConstraints` specified in `other` is a subset of these
`InterpreterConstraints`.
This is restricted to the set of minor Python versions specified in `universe`.
"""
if self == other:
return True
this = self.enumerate_python_versions(interpreter_universe)
that = other.enumerate_python_versions(interpreter_universe)
return this.issuperset(that)
def partition_into_major_minor_versions(
self, interpreter_universe: Iterable[str]
) -> tuple[str, ...]:
"""Return all the valid major.minor versions, e.g. `('2.7', '3.6')`."""
result: OrderedSet[str] = OrderedSet()
for major, minor, _ in self.enumerate_python_versions(interpreter_universe):
result.add(f"{major}.{minor}")
return tuple(result)
def major_minor_version_when_single_and_entire(self) -> None | tuple[int, int]:
"""Returns the (major, minor) version that these constraints cover, if they cover all of
exactly one major minor version, without rules about patch versions.
This is a best effort function, e.g. for using during inference that can be overridden.
Examples:
All of these return (3, 9): `==3.9.*`, `CPython==3.9.*`, `>=3.9,<3.10`, `<3.10,>=3.9`
All of these return None:
- `==3.9.10`: restricted to a single patch version
- `==3.9`: restricted to a single patch version (0, implicitly)
- `==3.9.*,!=3.9.2`: excludes a patch
- `>=3.9,<3.11`: more than one major version
- `>=3.9,<3.11,!=3.10`: too complicated to understand it only includes 3.9
- more than one requirement in the list: too complicated
"""
try:
return _major_minor_version_when_single_and_entire(self)
except _NonSimpleMajorMinor:
return None
def _major_minor_to_int(major_minor: str) -> tuple[int, int]:
return tuple(int(x) for x in major_minor.split(".", maxsplit=1)) # type: ignore[return-value]
class _NonSimpleMajorMinor(Exception):
pass
_ANY_PATCH_VERSION = re.compile(r"^(?P<major>\d+)\.(?P<minor>\d+)(?P<any_patch>\.\*)?$")
def _parse_simple_version(version: str, require_any_patch: bool) -> tuple[int, int]:
match = _ANY_PATCH_VERSION.fullmatch(version)
if match is None or (require_any_patch and match.group("any_patch") is None):
raise _NonSimpleMajorMinor()
return int(match.group("major")), int(match.group("minor"))
def _major_minor_version_when_single_and_entire(ics: InterpreterConstraints) -> tuple[int, int]:
if len(ics) != 1:
raise _NonSimpleMajorMinor()
req = next(iter(ics))
just_cpython = req.project_name == "CPython" and not req.extras and not req.marker
if not just_cpython:
raise _NonSimpleMajorMinor()
# ==major.minor or ==major.minor.*
if len(req.specs) == 1:
operator, version = next(iter(req.specs))
if operator != "==":
raise _NonSimpleMajorMinor()
return _parse_simple_version(version, require_any_patch=True)
# >=major.minor,<major.(minor+1)
if len(req.specs) == 2:
(operator_lo, version_lo), (operator_hi, version_hi) = iter(req.specs)
if operator_lo != ">=":
# if the lo operator isn't >=, they might be in the wrong order (or, if not, the check
# below will catch them)
operator_lo, operator_hi = operator_hi, operator_lo
version_lo, version_hi = version_hi, version_lo
if operator_lo != ">=" and operator_hi != "<":
raise _NonSimpleMajorMinor()
major_lo, minor_lo = _parse_simple_version(version_lo, require_any_patch=False)
major_hi, minor_hi = _parse_simple_version(version_hi, require_any_patch=False)
if major_lo == major_hi and minor_lo + 1 == minor_hi:
return major_lo, minor_lo
raise _NonSimpleMajorMinor()
# anything else we don't understand
raise _NonSimpleMajorMinor()
|
pantsbuild/pants
|
src/python/pants/backend/python/util_rules/interpreter_constraints.py
|
interpreter_constraints.py
|
py
| 21,381 |
python
|
en
|
code
| 2,896 |
github-code
|
6
|
[
{
"api_name": "typing_extensions.Protocol",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "pants.build_graph.address.Address",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "pants.backend.python.target_types.InterpreterConstraintsField",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "typing.TypeVar",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "typing.Tuple",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "pants.util.memo.memoized",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "pkg_resources.Requirement.parse",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "pkg_resources.Requirement",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "pkg_resources.Requirement.parse",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "pkg_resources.Requirement",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "pants.util.memo.memoized",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "pkg_resources.Requirement",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "pants.util.ordered_set.FrozenOrderedSet",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "pkg_resources.Requirement",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "pants.engine.engine_aware.EngineAwareParameter",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "pkg_resources.Requirement",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "pkg_resources.Requirement",
"line_number": 84,
"usage_type": "argument"
},
{
"api_name": "typing.Iterable",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "pkg_resources.Requirement",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "typing.Sequence",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "pkg_resources.Requirement",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "itertools.product",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "pants.util.strutil.softwrap",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "pkg_resources.Requirement",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "pants.engine.target.Target",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "pants.backend.python.subsystems.setup.PythonSetup",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "pants.backend.python.target_types.InterpreterConstraintsField",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "pants.backend.python.target_types.InterpreterConstraintsField",
"line_number": 183,
"usage_type": "argument"
},
{
"api_name": "typing.Iterable",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "pants.backend.python.target_types.InterpreterConstraintsField",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "pants.backend.python.subsystems.setup.PythonSetup",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 206,
"usage_type": "name"
},
{
"api_name": "pants.backend.python.subsystems.setup.PythonSetup",
"line_number": 206,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "pants.util.frozendict.FrozenDict",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "pants.util.frozendict.FrozenDict",
"line_number": 207,
"usage_type": "name"
},
{
"api_name": "typing.Iterator",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 244,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 255,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 278,
"usage_type": "name"
},
{
"api_name": "pkg_resources.Requirement",
"line_number": 293,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 306,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 333,
"usage_type": "name"
},
{
"api_name": "pants.util.ordered_set.FrozenOrderedSet",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "pants.util.strutil.softwrap",
"line_number": 353,
"usage_type": "call"
},
{
"api_name": "pants.util.strutil.softwrap",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "pants.util.ordered_set.FrozenOrderedSet",
"line_number": 377,
"usage_type": "call"
},
{
"api_name": "pants.util.strutil.softwrap",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "pants.util.docutil.bin_name",
"line_number": 392,
"usage_type": "call"
},
{
"api_name": "pants.util.ordered_set.FrozenOrderedSet",
"line_number": 334,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 400,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 413,
"usage_type": "name"
},
{
"api_name": "pants.util.ordered_set.OrderedSet",
"line_number": 416,
"usage_type": "name"
},
{
"api_name": "re.compile",
"line_number": 455,
"usage_type": "call"
}
] |
21499361084
|
import importlib
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
import seaborn as sns
import shutil
from datetime import timedelta
from file_read_backwards import FileReadBackwards
from functools import partial
from getpass import getuser
from openpyxl import load_workbook
from openpyxl.styles import Font, PatternFill, Alignment, Border, Side
from openpyxl.utils import get_column_letter
from pathlib import Path
from prettytable import PrettyTable
from scipy.fft import fft, fftfreq
from socket import gethostname
from statistics import stdev, mean
from tqdm import tqdm
from warnings import warn
import utils.find as find
import utils.fetch as fetch
import utils.constants as cst
# * ===================================================================================================
def issteady(run: str) -> bool:
log_file = find.find_logs(find.find_runs(run)[0])[0]
with FileReadBackwards(log_file) as frb:
for line in frb:
if line.startswith("Time ="):
if line.split()[-1].isdigit():
return True
else:
return False
# * ===================================================================================================
def ncol(handles: list) -> int:
max_text_length = 60
nhandles = len(handles)
total_length = sum(len(text) for text in handles) + 3 * nhandles
if total_length > max_text_length:
if nhandles > 6:
ncol = 4
else:
ncol = max(int(nhandles / 2), int((nhandles + 1) / 2))
row_index = range(0, nhandles - ncol, ncol)
for i in row_index:
words_in_row = [handles[k] for k in range(i, i + ncol)]
if sum(len(word) for word in words_in_row) > max_text_length:
ncol -= 1
break
else:
ncol = nhandles
return ncol
# * ===================================================================================================
def print_header(run_dirs: list[Path]) -> None:
# Check project is unique
if not len({r.parent.name for r in run_dirs}) == 1:
raise ValueError("Multiple project directories found.")
# If unique project
project = f'{cst.bmag}{run_dirs[0].parent.name}{cst.bcyan}'
runs_num = [k.name for k in run_dirs]
format_runs = f'{cst.bmag}{f"{cst.reset}, {cst.bmag}".join(sorted(runs_num))}{cst.bcyan}'
title_df = pd.DataFrame({f'{cst.reset}{cst.bold}PROJECT{cst.bcyan}': project,
f'{cst.reset}{cst.bold}RUN(S){cst.bcyan}': format_runs},
index=['Data'])
# Create a prettytable object
pt = PrettyTable()
for col in title_df.columns:
pt.add_column(col, title_df[col].values)
pt.align[col] = 'c'
pt.min_width[col] = int(shutil.get_terminal_size().columns / 2) - 4
# print the table
print(cst.bcyan)
print(pt, end=f'{cst.reset}')
print('')
# * ===================================================================================================
def get_avg(df: pd.DataFrame, *,
rng: int,
type_avg: str = 'final',
**kwargs) -> dict:
# Select all the columns except 'Time' by default
columns = list(df.columns)[1:]
# If one or more columns are specified with 'usecols'
if 'usecols' in kwargs:
usecols = kwargs.get('usecols')
columns = [col for col in list(df.columns)[1:]
if re.search(re.compile(usecols), col)]
# Return a dict of the mean value of each column over rng iterations
if type_avg == 'final':
return {c: df.loc[:, c].tail(rng).mean() for c in columns}
# Get the window of series of observations of rng size for each column
elif type_avg == 'moving':
windows = {c: df.loc[:, c].rolling(rng) for c in columns}
# Create a series of moving averages of each window for each column
moving_avgs = {k: windows.get(k).mean().tolist() for k in windows}
# Remove null entries
final_dict = {k: moving_avgs.get(k)[rng - 1:] for k in moving_avgs}
return final_dict
# * ===================================================================================================
def _format_excel(file_path):
# Load the Excel file
workbook = load_workbook(file_path)
sheet = workbook.active
# Set font styles
header_font = Font(name="Calibri", bold=True, size=14)
content_font = Font(name="Calibri", size=12)
# Set alignment
alignment = Alignment(horizontal="center", vertical="center")
# Set fill color
fill_main = PatternFill(start_color="C7D1E0", end_color="C7D1E0", fill_type="solid")
fill_data = PatternFill(start_color="F2F2F2", end_color="F2F2F2", fill_type="solid")
# Set border
border_color = "FF0000"
thin_border = Border(top=Side(style=None),
right=Side(style=None),
bottom=Side(style=None),
left=Side(style=None))
# Format header row
for cell in sheet[1]:
cell.font = header_font
cell.alignment = alignment
cell.fill = fill_main
cell.border = thin_border
cell.value = cell.value.upper()
# Format content rows
for row in sheet.iter_rows(min_row=2):
for cell in row:
cell.font = content_font
cell.alignment = alignment
cell.fill = fill_data
cell.border = thin_border
if isinstance(cell.value, (int, float)):
if cell.coordinate >= 'K':
cell.number_format = '0.00E+00' # Scientific notation format code
# Increase header row height
sheet.row_dimensions[1].height = 40
for row in sheet.iter_rows(min_row=2):
sheet.row_dimensions[row[0].row].height = 20
# Calculate the maximum text length in each column
max_text_lengths = {}
print(sheet.column_dimensions['G'].width)
for row in sheet.iter_rows(min_row=1, values_only=True):
for column_index, cell_value in enumerate(row, start=1):
column_letter = get_column_letter(column_index)
text_length = len(str(cell_value))
if column_letter not in max_text_lengths or text_length > max_text_lengths[column_letter]:
max_text_lengths[column_letter] = text_length
# Set the column width as 1.2 times the maximum text length
for column_letter, max_length in max_text_lengths.items():
column_width = (max_length * 1.2) + 2 # Add some extra padding
sheet.column_dimensions[column_letter].width = column_width
# Save the modified Excel file
workbook.save(file_path)
|
vicmcl/postpro
|
utils/misc.py
|
misc.py
|
py
| 6,902 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "utils.find.find_logs",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "utils.find",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "utils.find.find_runs",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "file_read_backwards.FileReadBackwards",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "utils.constants.bmag",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "utils.constants",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "utils.constants.bcyan",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "utils.constants.bmag",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "utils.constants",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "utils.constants.reset",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "utils.constants.bcyan",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "utils.constants.reset",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "utils.constants",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "utils.constants.bold",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "utils.constants.bcyan",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "utils.constants.reset",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "utils.constants",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "utils.constants.bold",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "utils.constants.bcyan",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "prettytable.PrettyTable",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "shutil.get_terminal_size",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "utils.constants.bcyan",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "utils.constants",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "utils.constants.reset",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "utils.constants",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "re.search",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "openpyxl.load_workbook",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Font",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Font",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Alignment",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.PatternFill",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.PatternFill",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Border",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Side",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Side",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Side",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "openpyxl.styles.Side",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "openpyxl.utils.get_column_letter",
"line_number": 182,
"usage_type": "call"
}
] |
71243441147
|
import random
import string
#Image:一个画布
#ImageDraw:一个画笔
#ImageFont:画笔的字体
from PIL import Image,ImageDraw,ImageFont
#Captcha验证码
class Captcha(object):
#生成几位验证码
number = 4
#验证码图片的宽度和高度
size = (100,30)
#验证码字体大小
fontsize = 25
#加入干扰线的条数
line_number = 2
# 构建一个验证码源文件
SOURCE = list(string.ascii_letters)
for index in range(0,10):
SOURCE.append(str(index))
#随机生成颜色
@classmethod
def __gene_random_color(cls,start=0,end=255):
random.seed()
return (random.randint(start,end),random.randint(start,end),random.randint(start,end))
#随机产生一个字体
@classmethod
def __gene_random_font(cls):
fonts=[
'cambriaz.ttf',
'consola.ttf',
# 'modern.fon',
# 'smalle.fon'
]
#随机从列表中取一个元素
font = random.choice(fonts)
return 'tool/captcha/'+font
#随机生成一个字符串(包括英文和数字)
@classmethod
def gene_text(cls,number):
#number是生成验证码的位数
return ''.join(random.sample(cls.SOURCE,number))
#生成干扰线
@classmethod
def __gene_line(cls,draw,width,height):
begin = (random.randint(0,width),random.randint(0,height))
end = (random.randint(0,width),random.randint(0,height))
draw.line([begin,end],fill=cls.__gene_random_color(),width=2)
#绘制干扰点
@classmethod
def __gene_points(cls,draw,point_chance,width,height):
chance = min(100,max(0,int(point_chance))) #大小限制在[0,100]
for w in range(width):
for h in range(height):
tmp = random.randint(0,100)
if tmp > 100 - chance:
draw.point((w,h),fill=cls.__gene_random_color())
#生成验证码
@classmethod
def gene_captcha(cls):
#验证码图片的宽和高
width,height = cls.size
#创建图片(画板)
image = Image.new('RGBA',(width,height),cls.__gene_random_color(0,100))
#验证码的字体
font = ImageFont.truetype(cls.__gene_random_font(),cls.fontsize)
#创建画笔
draw = ImageDraw.Draw(image)
# 生成字符串
text = cls.gene_text(cls.number)
#获取字体的尺寸
font_width,font_height = font.getsize(text)
#填充字符串
draw.text(((width - font_width) / 2,(height - font_height) / 2),text,font=font,fill=cls.__gene_random_color(150,255))
#绘制干扰线
for x in range(0,cls.line_number):
cls.__gene_line(draw,width,height)
#绘制噪点
cls.__gene_points(draw,10,width,height)
# with open('captcha.png','wb') as fp:
# image.save(fp)
return (image,text)
|
lubocsu/BBS
|
tool/captcha/__init__.py
|
__init__.py
|
py
| 2,943 |
python
|
en
|
code
| 23 |
github-code
|
6
|
[
{
"api_name": "string.ascii_letters",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "random.seed",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "PIL.Image.new",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "PIL.ImageFont.truetype",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "PIL.ImageFont",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "PIL.ImageDraw.Draw",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "PIL.ImageDraw",
"line_number": 75,
"usage_type": "name"
}
] |
13865138503
|
import argparse
import datetime
import json
import os
from itertools import zip_longest
from pathlib import Path
from typing import List, Optional, Tuple
import gpxpy
from rich import box
from hiking.import_export import JSON_IMPORT_EXAMPLE
from hiking.models import Hike
from hiking.utils import DATA_HOME, DEFAULT_BOX_STYLE, SlimDateRange
# TODO: find a way to auto-detect this from rich
BOX_FORMATS = [
"ASCII",
"ASCII2",
"ASCII_DOUBLE_HEAD",
"SQUARE",
"SQUARE_DOUBLE_HEAD",
"MINIMAL",
"MINIMAL_HEAVY_HEAD",
"MINIMAL_DOUBLE_HEAD",
"SIMPLE",
"SIMPLE_HEAD",
"SIMPLE_HEAVY",
"HORIZONTALS",
"ROUNDED",
"HEAVY",
"HEAVY_EDGE",
"HEAVY_HEAD",
"DOUBLE",
"DOUBLE_EDGE",
"MARKDOWN",
]
def get_valid_fields_for_args(exclude: Optional[List] = None):
exclude = exclude or []
return [
field.info["name"]
for field in Hike.FIELDS
if field.info["data_view"] and field.info["name"] not in exclude
]
class DateRangeType:
description = (
"Only include hikes contained in provided daterange (default: all hikes)"
)
examples = (
"Valid examples:\n"
"1970-01-01\n"
"1970-01-01/1970-02-01\n"
"1970-01-01/ (all hikes from start date)\n"
"/1970-01-01 (all hikes until end date)"
)
help = f"{description}\n{examples}"
def __call__(self, raw: str, *args, **kwargs):
start = end = raw
splitted = raw.split("/")
if len(splitted) == 2 and all(splitted):
start, end = splitted
elif start.endswith("/"):
start = start.rstrip("/")
end = None
elif end.startswith("/"):
end = end.lstrip("/")
start = None
try:
start = (
datetime.datetime.strptime(start, "%Y-%m-%d").date()
if start
else datetime.date.min
)
end = (
datetime.datetime.strptime(end, "%Y-%m-%d").date()
if end
else datetime.date.max
)
except ValueError as e:
raise argparse.ArgumentTypeError(f"{e.args[0]}\n{self.examples}")
return SlimDateRange(start, end)
class WritableDirPathType:
def __call__(self, raw: str, *args, **kwargs):
directory = Path(raw)
if (
not directory.exists()
or not directory.is_dir()
or not os.access(directory, os.W_OK)
):
raise argparse.ArgumentTypeError(
f'Cannot write to directory: "{directory.absolute()}". '
f"Make sure it exists and is writable."
)
return directory
class GPXFileType(argparse.FileType):
def __call__(self, *args, **kwargs):
file = super().__call__(*args, **kwargs)
try:
gpx_xml = file.read()
gpxpy.parse(gpx_xml)
except Exception as e:
raise argparse.ArgumentTypeError(f"Cannot read *.gpx file: {str(e)}")
return gpx_xml
class JsonFileType(argparse.FileType):
def __call__(self, *args, **kwargs):
file = super().__call__(*args, **kwargs)
try:
data = json.load(file)
except Exception as e:
raise argparse.ArgumentTypeError(f"Cannot read *.json file: {str(e)}")
return data
def validate_order_key(value: str) -> Tuple[str, bool]:
reverse = False
if value.startswith("-"):
reverse = True
value = value.lstrip("-")
if value not in get_valid_fields_for_args():
raise argparse.ArgumentTypeError("Invalid order_key")
return value, reverse
def validate_plot(value: str) -> Tuple[str, str]:
try:
x, y = tuple(value.split(","))
for i in x, y:
assert i in get_valid_fields_for_args(["name"])
return x, y
except (ValueError, AssertionError):
raise argparse.ArgumentTypeError("plot")
def validate_table_style(value: str) -> Tuple[str, str]:
try:
box_style = getattr(box, value.upper())
except AttributeError:
raise argparse.ArgumentTypeError("Invalid table-style")
return box_style
def set_default_subparser(
parser: argparse.ArgumentParser, default_subcommand: str, raw_args: List[str]
):
subparser_found = False
for arg in raw_args:
if arg in ["-h", "--help"]: # pragma: no cover
break
else:
for x in parser._subparsers._actions:
if not isinstance(x, argparse._SubParsersAction):
continue
for sp_name in x._name_parser_map.keys():
if sp_name in raw_args:
subparser_found = True
if not subparser_found:
raw_args.insert(0, default_subcommand)
def parse_arguments(raw_args: List[str]) -> argparse.Namespace:
"""
Parse all arguments.
"""
def format_list(data: List[str]):
data.sort()
col_1 = data[: int(round(len(data) / 2))]
col_2 = data[int(round(len(data) / 2)) :]
table_data = list(zip_longest(col_1, col_2, fillvalue=""))
longest = 0
for i in table_data:
if len(i[0]) > longest:
longest = len(i[0])
result = [f"{i[0].ljust(longest)}{' ' * 5}{i[1]}" for i in table_data]
return "\n".join(result)
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter, prog="hiking"
)
debug_arg_dict = {
"help": "Show debug information (log queries)",
"action": "store_true",
}
subparsers = parser.add_subparsers(dest="command")
show = subparsers.add_parser(
"show",
help="Show hike(s) (default)",
description="Show hike(s) (default)",
formatter_class=argparse.RawTextHelpFormatter,
)
show.add_argument(
"ids",
metavar="ID",
help="Hike ID",
nargs="*",
type=int,
)
show.add_argument(
"-d",
"--daterange",
help=DateRangeType.help,
type=DateRangeType(),
default=SlimDateRange(datetime.date.min, datetime.date.max),
)
show.add_argument(
"-s",
"--search",
help="Search for text in name and body (case insensitive)",
type=str,
)
show.add_argument(
"-t",
"--table-style",
help=(
"Table format style (default: simple)\n"
f"Available options:\n{format_list(BOX_FORMATS)}"
),
default=DEFAULT_BOX_STYLE,
type=validate_table_style,
)
show.add_argument(
"-o",
"--order-key",
help=(
'Key to use for hike sorting. To reverse, prepend with "-".\n'
"Available options:\n"
f"{format_list(get_valid_fields_for_args())}"
),
default=("date", False),
type=validate_order_key,
)
show.add_argument(
"--plot",
help=(
"Fields to plot in a graph.\n"
"*experimental*\n"
"Example:\n"
'"date,distance"\n'
"Available options:\n"
f"{format_list(get_valid_fields_for_args(exclude=['name']))}"
),
default=(),
type=validate_plot,
)
show.add_argument("--debug", **debug_arg_dict)
create = subparsers.add_parser(
"create",
help="Create a new record",
description="Create a new record.",
formatter_class=argparse.RawTextHelpFormatter,
)
create.add_argument(
"--gpx",
metavar="GPX_FILE",
type=GPXFileType("r"),
help="Import from *.gpx-file",
)
create.add_argument("--debug", **debug_arg_dict)
edit = subparsers.add_parser(
"edit",
help="Edit a record",
description="Edit a record.",
formatter_class=argparse.RawTextHelpFormatter,
)
edit.add_argument(
"id",
metavar="ID",
help="Hike ID",
type=int,
)
edit.add_argument(
"--gpx",
metavar="GPX_FILE",
type=GPXFileType("r"),
help="Import from *.gpx-file",
)
edit.add_argument("--debug", **debug_arg_dict)
delete = subparsers.add_parser(
"delete",
help="Delete records by ID",
description="Delete records by ID.",
formatter_class=argparse.RawTextHelpFormatter,
)
delete.add_argument(
"-f",
"--force",
help="Do not ask before deletion",
action="store_true",
)
delete.add_argument(
"-q",
"--quiet",
help="Do not display hikes before deletion",
action="store_true",
)
delete.add_argument(
"-a",
"--all",
help="Delete all hikes",
action="store_true",
)
delete.add_argument(
"ids",
metavar="ID",
help="Hike ID",
nargs="*",
type=int,
)
delete.add_argument("--debug", **debug_arg_dict)
_import = subparsers.add_parser(
"import",
help="Import records from JSON",
description=f"Import records from JSON.\nFormat:\n{JSON_IMPORT_EXAMPLE}",
formatter_class=argparse.RawTextHelpFormatter,
)
_import.add_argument(
"json_data",
metavar="JSON_FILE",
help="Path to JSON file",
type=JsonFileType("r"),
)
_import.add_argument("--debug", **debug_arg_dict)
export = subparsers.add_parser(
"export",
help="Export records as JSON and GPX",
description="Export records as JSON and GPX.",
formatter_class=argparse.RawTextHelpFormatter,
)
export.add_argument(
"export_dir",
metavar="EXPORT_DIR",
help="Path to export directory",
type=WritableDirPathType(),
)
export.add_argument(
"ids",
metavar="ID",
help="Hike ID",
nargs="*",
type=int,
)
export.add_argument(
"-d",
"--daterange",
help=DateRangeType.help,
type=DateRangeType(),
default=SlimDateRange(datetime.date.min, datetime.date.max),
)
export.add_argument(
"-i",
"--include-ids",
help='Include IDs in export. Needed for "update", must be omitted for "create"',
action="store_true",
)
export.add_argument("--debug", **debug_arg_dict)
set_default_subparser(parser, "show", raw_args)
args = parser.parse_args(raw_args)
if args.command == "delete" and not args.ids and not args.all:
raise parser.error("IDs or --all must be provided")
elif args.command == "delete" and args.ids and args.all:
raise parser.error("Ambiguous argument: IDs and --all provided")
try:
WritableDirPathType()(DATA_HOME.parent)
except argparse.ArgumentTypeError:
raise parser.error(f"Cannot write to user data director: {DATA_HOME.parent}")
return args
|
open-dynaMIX/hiking
|
hiking/arg_parsing.py
|
arg_parsing.py
|
py
| 10,991 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.Optional",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "hiking.models.Hike.FIELDS",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "hiking.models.Hike",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "datetime.date",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "datetime.date",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentTypeError",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "hiking.utils.SlimDateRange",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "os.access",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "os.W_OK",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentTypeError",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "gpxpy.parse",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentTypeError",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentTypeError",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentTypeError",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "typing.Tuple",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentTypeError",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "typing.Tuple",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "rich.box",
"line_number": 148,
"usage_type": "argument"
},
{
"api_name": "argparse.ArgumentTypeError",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "typing.Tuple",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "argparse._SubParsersAction",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "itertools.zip_longest",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "argparse.RawTextHelpFormatter",
"line_number": 190,
"usage_type": "attribute"
},
{
"api_name": "argparse.RawTextHelpFormatter",
"line_number": 204,
"usage_type": "attribute"
},
{
"api_name": "hiking.utils.SlimDateRange",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "hiking.utils.DEFAULT_BOX_STYLE",
"line_number": 237,
"usage_type": "name"
},
{
"api_name": "argparse.RawTextHelpFormatter",
"line_number": 273,
"usage_type": "attribute"
},
{
"api_name": "argparse.RawTextHelpFormatter",
"line_number": 289,
"usage_type": "attribute"
},
{
"api_name": "argparse.RawTextHelpFormatter",
"line_number": 312,
"usage_type": "attribute"
},
{
"api_name": "hiking.import_export.JSON_IMPORT_EXAMPLE",
"line_number": 349,
"usage_type": "name"
},
{
"api_name": "argparse.RawTextHelpFormatter",
"line_number": 350,
"usage_type": "attribute"
},
{
"api_name": "argparse.RawTextHelpFormatter",
"line_number": 366,
"usage_type": "attribute"
},
{
"api_name": "hiking.utils.SlimDateRange",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 389,
"usage_type": "attribute"
},
{
"api_name": "hiking.utils.DATA_HOME.parent",
"line_number": 411,
"usage_type": "attribute"
},
{
"api_name": "hiking.utils.DATA_HOME",
"line_number": 411,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentTypeError",
"line_number": 412,
"usage_type": "attribute"
},
{
"api_name": "hiking.utils.DATA_HOME.parent",
"line_number": 413,
"usage_type": "attribute"
},
{
"api_name": "hiking.utils.DATA_HOME",
"line_number": 413,
"usage_type": "name"
},
{
"api_name": "argparse.Namespace",
"line_number": 172,
"usage_type": "attribute"
}
] |
35729401904
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# from multiprocessing.pool import ThreadPool
import threading
from time import sleep
import requests
from selenium import webdriver
from bs4 import BeautifulSoup
import output as op
class FlaskScraper:
# groupName: webUrl
dictOfNameAndWebUrl = {}
# weburl: webCont
dictOfNameAndWebCont = {}
# weburl: webCOnt without mark
dictOfNameAndWebContWithoutMk = {}
# initialize
def __init__(self, chatBot, dictOfNameAndWebsite):
for key in dictOfNameAndWebsite:
web = dictOfNameAndWebsite[key]
if(key in FlaskScraper.dictOfNameAndWebUrl.keys()):
FlaskScraper.dictOfNameAndWebUrl[key] = web
# if(not web in FlaskScraper.dictOfNameAndWebCont.keys()):
FlaskScraper.dictOfNameAndWebCont.setdefault(web, ['null'])
FlaskScraper.dictOfNameAndWebContWithoutMk.setdefault(web, ['null'])
# FlaskScraper.dictOfNameAndWebCont.setdefault(web, self.ScraperFromFlaskByCheck(key))
else:
FlaskScraper.dictOfNameAndWebUrl.setdefault(key, web)
# if(not web in FlaskScraper.dictOfNameAndWebCont.keys()):
FlaskScraper.dictOfNameAndWebCont.setdefault(web, ['null'])
FlaskScraper.dictOfNameAndWebContWithoutMk.setdefault(web, ['null'])
# FlaskScraper.dictOfNameAndWebCont.setdefault(web, self.ScraperFromFlaskByCheck(key))
self.MultiClientController(chatBot, {key : web})
# self.UpdateWebsiteUrl(chatBot, dictOfNameAndWebsite)
# self.UpdateWebsiteCont(dictOfNameAndWebsite)
def Scraper(weburl):
driver = webdriver.Chrome("/usr/local/share/chromedriver")
driver.get(weburl)
try:
driver.find_element_by_xpath("//a[contains(text(),'Show all completed tasks')]").click()
except:
pass
# driver.find_element_by_id()
# print("------------", driver.page_source)
sleep(2)
soup = BeautifulSoup(driver.page_source, "html.parser")
spanlist = soup.find_all('span', attrs={'class':'best_in_place'})
driver.quit()
return spanlist
# bind group name with url & bind group name with web content
def UpdateWebsiteUrl(self, chatBot, key, web):
# for key in incDicOfNameAndWebsite:
# web = incDicOfNameAndWebsite[key]
# print(1)
if(web in FlaskScraper.dictOfNameAndWebUrl.values()):
return "fail"
# print(2)
if(key in FlaskScraper.dictOfNameAndWebUrl.keys()):
FlaskScraper.dictOfNameAndWebUrl[key] = web
# if(not web in FlaskScraper.dictOfNameAndWebCont.keys()):
FlaskScraper.dictOfNameAndWebCont.setdefault(web, ['null'])
FlaskScraper.dictOfNameAndWebContWithoutMk.setdefault(web, ['null'])
# FlaskScraper.dictOfNameAndWebCont.setdefault(web, self.ScraperFromFlaskByCheck(key))
else:
FlaskScraper.dictOfNameAndWebUrl.setdefault(key, web)
# if(not web in FlaskScraper.dictOfNameAndWebCont.keys()):
FlaskScraper.dictOfNameAndWebCont.setdefault(web, ['null'])
FlaskScraper.dictOfNameAndWebContWithoutMk.setdefault(web, ['null'])
# FlaskScraper.dictOfNameAndWebCont.setdefault(web, self.ScraperFromFlaskByCheck(key))
self.MultiClientController(chatBot, {key : web})
return "succeed"
# multiple clients generator
def MultiClientController(self, chatBot, nameOfGrp):
for k in nameOfGrp:
try:
thread = threading.Thread(target=ScraperTimeController, args=(k, chatBot, ))
thread.start()
except:
print ("Error: unable to start thread for %s !" %(k))
# check command
def ScraperFromFlaskByCheck(self, nameOfGrp):
# print(dictOfNameAndWebsite.value[0])
# html = requests.get(dictOfNameAndWebsite.values()[0]).content
thisKey = FlaskScraper.dictOfNameAndWebUrl[nameOfGrp]
# html = requests.get(thisKey).content
# driver.get(thisKey)
# driver.find_element_by_xpath("//a[contains(text(),'Show all completed tasks')]").click()
# sleep(2)
# soup = BeautifulSoup(driver.page_source, "html.parser")
# spanlist = soup.find_all('span', attrs={'class':'best_in_place'})
spanlist = FlaskScraper.Scraper(thisKey)
# print("before")
MessageWithoutMk, Message = op.ChangeFormatOfOutput(spanlist)
# print("in")
if (Message != FlaskScraper.dictOfNameAndWebCont[thisKey]):
# print("in if")
FlaskScraper.dictOfNameAndWebCont[thisKey] = Message
# print("in if 2")
FlaskScraper.dictOfNameAndWebContWithoutMk[thisKey] = MessageWithoutMk
# print("in if 3")
# print("after")
# Message = []
# for i in range(1,len(spanlist)):
# checkbox=str(spanlist[i].find_previous_sibling('input'))
# if 'checked' in checkbox:
# Message.append(spanlist[i].text+'-completed')
# else:
# Message.append(spanlist[i].text+'-uncompleted')
# if (Message != FlaskScraper.dictOfNameAndWebCont[thisKey]):
# FlaskScraper.dictOfNameAndWebCont[thisKey] = Message
return Message
# time controller, send news to users
def ScraperTimeController(key, chatBot):
while True:
weburl = FlaskScraper.dictOfNameAndWebUrl[key]
Message = ScraperFromFlaskByTime(weburl)
if(Message != ['null']):
# News=weburl + '\n' +'Update: \n'
News = ""
for strMessage in Message:
News = News + strMessage + '\n'
News = News + weburl
my_friend = chatBot.search(puid=key)[0]
my_friend.send(News)
sleep(3)
# listen to the website, return news
def ScraperFromFlaskByTime(weburl):
spanlist = FlaskScraper.Scraper(weburl)
print("test1")
# driver.get(thisKey)
# driver.find_element_by_xpath("//a[contains(text(),'Show all completed tasks')]").click()
# sleep(2)
# soup = BeautifulSoup(driver.page_source, "html.parser")
# spanlist = soup.find_all('span', attrs={'class':'best_in_place'})
# html = requests.get(weburl).content
# soup = BeautifulSoup(html,"html.parser")
# spanlist = soup.find_all('span',attrs={'class':'best_in_place'})
MessageWithoutMk, Message = op.ChangeFormatOfOutput(spanlist)
oldContent = []
for v in FlaskScraper.dictOfNameAndWebContWithoutMk[weburl]:
oldContent.append(v)
# first time to log
if (FlaskScraper.dictOfNameAndWebCont[weburl] == ['null'] and \
Message != FlaskScraper.dictOfNameAndWebCont[weburl]):
FlaskScraper.dictOfNameAndWebCont[weburl] = Message
FlaskScraper.dictOfNameAndWebContWithoutMk[weburl] = MessageWithoutMk
return Message
elif (Message != FlaskScraper.dictOfNameAndWebCont[weburl]):
print(Message)
print(MessageWithoutMk)
print(oldContent)
print(FlaskScraper.dictOfNameAndWebContWithoutMk[weburl])
# print("Message",Message)
# print("FlaskScraper.dictOfNameAndWebCont[weburl])", FlaskScraper.dictOfNameAndWebCont[weburl])
tmplist = []
cnt = 0
for i in range(len(MessageWithoutMk)):
if MessageWithoutMk[i] in oldContent:
if Message[i] not in FlaskScraper.dictOfNameAndWebCont[weburl]:
print("Message[%d]" %(i), Message[i])
print("oldContent index", oldContent.index(MessageWithoutMk[i]))
print("oldCOntent", oldContent)
print("cont",FlaskScraper.dictOfNameAndWebCont[weburl])
tmplist.append("\u2713 " + MessageWithoutMk[i]) # finished
oldContent.remove(MessageWithoutMk[i])
cnt = cnt + 1
else:
# print("not in MessageWithoutMk[%d]" %(i), MessageWithoutMk[i])
# print("oldContent", oldContent)
# print("tmplist[0]", MessageWithoutMk[i])
print("--------------------------------")
tmplist.append("\u2610 " + MessageWithoutMk[i]) # added
for i in range(len(oldContent)):
tmplist.append("\u2717 " + oldContent[i]) # delete
print("final",tmplist)
if(tmplist != []):
FlaskScraper.dictOfNameAndWebCont[weburl] = Message
FlaskScraper.dictOfNameAndWebContWithoutMk[weburl] = MessageWithoutMk
return tmplist
else:
return ['null']
# cnt = 0
else:
return ['null']
# newContent = []
# Message = []
# oldContent = FlaskScraper.dictOfNameAndWebCont[weburl]
# dictOldContent = {}
# for cont in oldContent:
# if cont.find('-completed') > 0:
# dictOldContent[cont[:cont.find('-completed')]] ='-completed'
# elif cont.find('-uncompleted') > 0:
# dictOldContent[cont[:cont.find('-uncompleted')]] ='-uncompleted'
# for i in range(1,len(spanlist)):
# checkbox=str(spanlist[i].find_previous_sibling('input'))
# if 'checked' in checkbox:
# newContent.append(spanlist[i].text+'-completed')
# if spanlist[i].text in dictOldContent.keys():
# if dictOldContent[spanlist[i].text] == '-uncompleted':
# Message.append('completed: '+spanlist[i].text)
# dictOldContent[spanlist[i].text] = '-checked'
# else:
# Message.append('add: '+spanlist[i].text)
# else:
# newContent.append(spanlist[i].text+'-uncompleted')
# if spanlist[i].text in dictOldContent.keys():
# if dictOldContent[spanlist[i].text] == '-completed':
# Message.append('uncompleted: '+spanlist[i].text)
# dictOldContent[spanlist[i].text] = '-checked'
# else:
# Message.append('add: '+spanlist[i].text)
# for cont in dictOldContent:
# if dictOldContent[cont] != '-checked':
# Message.append('delete: ' + cont)
# if Message !=[]:
# FlaskScraper.dictOfNameAndWebCont[weburl]=newContent
# return Message
# else:
# return ['null']
|
clamli/fdatanotice
|
scraper.py
|
scraper.py
|
py
| 8,975 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "output.ChangeFormatOfOutput",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "output.ChangeFormatOfOutput",
"line_number": 165,
"usage_type": "call"
}
] |
36106930014
|
import pyperclip
import re
matchPhone = re.compile(r'''(
(\d{3}|\(\d{3}\)) # area code
(\s|-|\.) # separator
(\d{3}) # first 3 digits
(\s|-|\.) # separator
(\d{4}) # last 4 digits
(\s*(ext|x|ext.)\s*(\d{2,5}))? #extension
)''', re.VERBOSE)
matchEmail = re.compile(r'''(
[a-zA-Z0-9._%+-]+ # address
@[a-zA-Z0-9.-]+ # domain name
\.(\w*) # domain suffix
)''', re.VERBOSE)
matchURL = re.compile(r'''(
(http://|https://) # prefix
(.*) # domain name
(\.\w{2,}) # suffix
)''', re.VERBOSE)
text = pyperclip.paste()
matches = []
for match in matchPhone.findall(text):
phoneNum = '-'.join([match[1], match[3], match[5]])
if match[8] != '':
phoneNum += ' x' + match[8]
matches.append(phoneNum)
for match in matchEmail.findall(text):
matches.append(match[0])
for match in matchURL.findall(text):
matches.append(match[0])
print('\n'.join(matches))
pyperclip.copy('\n'.join(matches))
|
kaisteussy/AtBS
|
automate_the_boring_stuff/Chapter 7/phoneAndEmail.py
|
phoneAndEmail.py
|
py
| 1,355 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "re.compile",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "re.VERBOSE",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "re.VERBOSE",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "re.VERBOSE",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "pyperclip.paste",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pyperclip.copy",
"line_number": 42,
"usage_type": "call"
}
] |
15068023113
|
from os import name
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name="home"),
path('about/', views.about, name="about"),
path('join_us/', views.join_us, name="join_us"),
path('hotel_detail/<int:hotel_id>/',
views.hotel_detail, name="hotel_detail"),
path('hotel_detail/<int:hotel_id>/<slug:extra>/',
views.hotel_detail, name="hotel_detail"),
path('cart/', views.my_cart, name="my_cart"),
path('cart/plus', views.plus_cart, name="plus_cart"),
path('cart/minus', views.minus_cart, name="minus_cart"),
path('verify_payment/', views.verifyPayment, name='verify_payment'),
path('order/', views.orders, name="order"),
path('search/', views.search_items, name="search"),
path('all_hotel/', views.all_hotel, name="all_hotel"),
path('all_menu/', views.all_menu, name="all_menu"),
path('health/', views.health, name="health"),
]
|
leenabadgujar/Online_Tiffin_Service
|
food/urls.py
|
urls.py
|
py
| 935 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 21,
"usage_type": "call"
}
] |
71840534267
|
# coding=utf8
from validate_email import validate_email
if __name__ == '__main__':
f = open("stargazers_email.txt", "r")
emails = f.readlines()
emails = [line.rstrip('\n') for line in emails]
valid_email = []
for i in range(len(emails)):
is_valid = validate_email(emails[i], verify=True)
print(is_valid)
if is_valid == 'True':
valid_email.append(emails[i])
print(len(valid_email))
with open('valid_email.txt', 'w') as f:
for item in valid_email:
f.write("%s\n" % item)
|
haoshuai999/Master-project
|
validate_stargazers_email.py
|
validate_stargazers_email.py
|
py
| 494 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "validate_email.validate_email",
"line_number": 11,
"usage_type": "call"
}
] |
24367084112
|
import numpy as np
import cv2 as cv
image = cv.imread('lena.jpg')
image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
img = np.array(image)
height = image.shape[0]
width = image.shape[1]
kernel= np.array([[-1, -1, -1],[-1, 8, -1], [-1, -1, -1]])
#print(kernel)
m= kernel.shape[0]//2
w=h=3
conv= np.zeros(image.shape)
for i in range(m,height-m):
for j in range(m, width-m):
s=0
for k in range(h):
for l in range(w):
s=s+img[i-m+k][j-m+l]*kernel[k][l]
conv[i][j]=s
#print(img)
cv.imshow('img', conv)
cv.waitKey()
#cv.destroyAllWindows()
|
maximana99/kernel-python
|
main.py
|
main.py
|
py
| 589 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.imread",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 27,
"usage_type": "call"
}
] |
11046567215
|
import urllib.request, json
import pytz
from datetime import datetime
dateTimeStr=datetime.utcnow().replace(tzinfo=pytz.utc)
def jsonReaderScooter(urlToOpen):
with urllib.request.urlopen(urlToOpen) as url:
data = json.loads(url.read().decode())
retStr='lat,lon,isdisabled,time\n'
try:
with open('outputDataUpdated.csv','r') as reader:
if retStr in reader:
retStr=''
except:
pass
for item in data["data"]["bikes"]:
retStr+=str(item['lat'])+','+str(item['lon'])+','+str(item['is_disabled'])+','+str(dateTimeStr)+'\n'
with open('outputDataUpdated.csv', 'a+') as writer:
writer.writelines(retStr)
return True
url = ["https://mds.bird.co/gbfs/los-angeles/free_bikes","https://s3.amazonaws.com/lyft-lastmile-production-iad/lbs/lax/free_bike_status.json","https://la-gbfs.getwheelsapp.com/free_bike_status.json"]
for i in url:
jsonReaderScooter(i)
|
hjames034/scooterRecordLA
|
parseScooter.py
|
parseScooter.py
|
py
| 941 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime.utcnow",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "pytz.utc",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "urllib.request.request.urlopen",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 7,
"usage_type": "call"
}
] |
34608382125
|
import sys
import pygame
from setting import Settings
from setting import Ship
import game_functions as gf
def run_game():
# Initialize game and create a screen object.
pygame.init()
ai_settings = Settings()
screen = pygame.display.set_mode(
(ai_settings.screen_width, ai_settings.screen_height))
ship = Ship(screen)
pygame.display.set_caption("Andy and Mr. Umair's Rocket game")
bg_color = (30,67,78)
# Start the main loop for the game.
while True:
gf.check_events(ship)
ship.update()
# Watch for keyboard and mouse events.
# Make the most recently drawn screen visible.
gf.update_screen(ai_settings, screen, ship)
run_game()
|
andy-miao-gu/preply_by_umair
|
old/okbruhpygame.py
|
okbruhpygame.py
|
py
| 749 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.init",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "setting.Settings",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "setting.Ship",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "game_functions.check_events",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "game_functions.update_screen",
"line_number": 29,
"usage_type": "call"
}
] |
21139527922
|
#!/usr/bin/env python3
import time
from pymavlink import mavutil
from trunk import *
#from colored import fg, bg, attr
from colored import fg, bg, attr
#if get_Setting('mainLoopStatus', 'status.json', 0) == "closed":
# print("Warning: Manual override enabled")
# set_Setting('mainLoopStatus', 'manual', 'status.json', 1)
print("Hello, Launching MavLink viewer...")
timer1 = time.time()
# Start a connection listening to a UDP port
#the_connection = mavutil.mavlink_connection('udpin:localhost:14540')
time.sleep(1)
print("--->Looking for ports, please wait... Can take up to a minute...")
print("")
port1 = '/dev/ttyACM1'
port2 = '/dev/ttyACM2'
current = port1
while 1:
try:
the_connection = mavutil.mavlink_connection(current)
time.sleep(1)
the_connection.wait_heartbeat()
break
except:
pass
if current == port1:
current = port2
elif current == port2:
current = port1
time.sleep(1)
print("Connected to port: " + current)
print("Heartbeat from System %u, Component %u" % (the_connection.target_system, the_connection.target_system))
time.sleep(4)
#https://mavlink.io/en/messages/common.html#MAV_DATA_STREAM_EXTENDED_STATUS
for i in range(0, 3):
the_connection.mav.request_data_stream_send(the_connection.target_system, the_connection.target_component, mavutil.mavlink.MAV_DATA_STREAM_ALL, 4, 1)
pevent = mavutil.periodic_event(5)
while(1):
the_connection.recv_msg()
if pevent.trigger():
try:
IMU = the_connection.messages['RAW_IMU']
IMU2= the_connection.messages['SCALED_IMU2']
try:
IMU3= the_connection.messages['SCALED_IMU3']
except:
pass
PR1= the_connection.messages['SCALED_PRESSURE']
GPS_RAW= the_connection.messages['GPS_RAW_INT']
#print(IMU)
print("\tAx\tAy\tAz\t|Gx\tGy\tGz\t|Mx\tMy\tMz")
print(f"0|\t{IMU.xacc:.0f}\t{IMU.yacc:.0f}\t{IMU.zacc:.0f}\t|{IMU.xgyro:.0f}\t{IMU.ygyro:.0f}\t{IMU.zgyro:.0f}\t|{IMU.xmag:.0f}\t{IMU.ymag:.0f}\t{IMU.zmag:.0f}")
print(f"1|\t{IMU2.xacc:.0f}\t{IMU2.yacc:.0f}\t{IMU2.zacc:.0f}\t|{IMU2.xgyro:.0f}\t{IMU2.ygyro:.0f}\t{IMU2.zgyro:.0f}\t|{IMU2.xmag:.0f}\t{IMU2.ymag:.0f}\t{IMU2.zmag:.0f}")
print(f"2|\t{IMU3.xacc:.0f}\t{IMU3.yacc:.0f}\t{IMU3.zacc:.0f}\t|{IMU3.xgyro:.0f}\t{IMU3.ygyro:.0f}\t{IMU3.zgyro:.0f}\t|{IMU3.xmag:.0f}\t{IMU3.ymag:.0f}\t{IMU3.zmag:.0f}")
print(f"Pressure1 Abs: {PR1.press_abs:.2f} \tDif: {PR1.press_diff:.1f} \tTemp: {PR1.temperature:.0f}" + "\tSats in View: " + str(GPS_RAW.satellites_visible))
#print("Sats in View: " + str(GPS_RAW.satellites_visible))
except:
print("Error")
try:
PR2= the_connection.messages['SCALED_PRESSURE2']
print(f"Pressure2 Abs: {PR2.press_abs:.2f} \tDif: {PR2.press_diff:.1f} \tTemp: {PR2.temperature:.0f}")
#print(" ")
except:
print(f"Pressure2 Abs: INOP \tDif: INOP \tTemp: INOP")
#print("...Press ctrl+c to exit...")
print('%s ...Press ctrl+c to exit... %s' % (fg(3), attr(0)))
print(" ")
time.sleep(.005)
#ALL = the_connection.recv_match(blocking=True)
#print(ALL)
|
j07rdi/controlzero_testing
|
mavlink_test.py
|
mavlink_test.py
|
py
| 3,092 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "time.time",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pymavlink.mavutil.mavlink_connection",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pymavlink.mavutil",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pymavlink.mavutil.mavlink",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "pymavlink.mavutil",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "pymavlink.mavutil.periodic_event",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "pymavlink.mavutil",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "colored.fg",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "colored.attr",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 85,
"usage_type": "call"
}
] |
41146228063
|
from flask import Flask, g, render_template, request, send_from_directory, url_for
import sqlite3, os, datetime
from werkzeug.utils import redirect, secure_filename
SITENAME = 'SaLeeMas - PicShare'
# Définir le dossier dans lequel les photos
# vont petre uploadés
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}
app = Flask(__name__)
# Attention, à préciser le répertoire local !
DATABASE = 'app.db'
# On définit une variable globale qui rendra
# nos fichiers accesssibles même via les templates
# récupéré de la doc FLASK
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
# Se connecter à la DB (code récupéré de la doc FLASK)
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
# La fonction pour spécifier les types de fichier autorisés
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
# La route de la homepage
@app.route("/", methods=["GET", "POST"])
def show_pictures():
db = get_db()
print("All categories")
if request.method == 'GET':
categories = db.execute("SELECT name from categories order by id")
pictures = db.execute("SELECT id, title, filename \
from pictures order by upload_date desc")
# print(pictures.fetchall())
return render_template("index.html",
all_pictures=pictures, all_categories=categories)
# La route de la homepage avec la categorie name en argument
@app.route("/<category>", methods=["GET", "POST"])
def show_category_pictures(category):
db = get_db()
if request.method == 'GET':
# print("Chosen category", category)
categories = db.execute("SELECT name from categories order by id")
if category:
print("category", category)
pictures = db.execute("SELECT pictures.id, title, filename \
from pictures left join categories \
on category_id = categories.id \
where categories.name = (?) \
order by upload_date desc", [category])
# print(pictures.fetchall())
return render_template("index.html",
all_pictures=pictures,
all_categories=categories,
chosen_category=category)
# La route du chemin d'accès à l'image à renvoyer, avec
# le nom du répertoire "uploads/", suivi du nom du fichier image
@app.route('/uploads/<filename>')
def download_file(filename):
print("send_from_directory",
send_from_directory(app.config["UPLOAD_FOLDER"], filename))
return send_from_directory(app.config["UPLOAD_FOLDER"], filename)
# La route de la page upload
@app.route("/upload", methods=["GET", "POST"])
def upload():
db = get_db()
categories_cursor = db.execute("select name from categories order by id;")
categories_name = categories_cursor.fetchall()
print("I am the result of your query: ", categories_name)
list_of_categories = []
for category in categories_name:
name = category[0]
list_of_categories.append(name)
print("i am the list of cat : ", list_of_categories)
if request.method == 'POST':
file = request.files['file']
print("I am the files.filename : ", file.filename)
if allowed_file(file.filename):
filename = secure_filename(file.filename) # c'est le path
title = request.form.get("title")
description = request.form.get("description")
print(description, " - ", request.form.get("description"))
category = request.form.get("category")
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
upload_date = datetime.datetime.now()
# print("datetime.datetime.now()", datetime.datetime.now())
# sauvegarder le fichier dans la DB
db = get_db()
if category:
cursor1 = db.execute("SELECT id from categories \
where name = ?", [category])
category_id = cursor1.fetchone()
# print(category_id[0])
db.execute("INSERT into pictures (title, filename, upload_date, category_id, description) \
values (? , ? , ? , ? , ? )",
[title, filename, upload_date, int(category_id[0]), description])
# # vérifier si le titre existe déjà
# cursor_title = db.execute(
# "SELECT id, title FROM pictures WHERE title = (?)", [title])
# print("I'am the cursor: ", cursor_title)
# # On l'enregistre dans une variable et on l'affiche
# # avec fetchone, si le résultat n'est pas None on retourne 404
# title_request = cursor_title.fetchone()
# print("hey I'm the request title ", title_request)
# if title_request is not None: abort(404)
db.commit()
return render_template("picture_uploaded.html")
return render_template('upload.html', list_of_categories=list_of_categories)
# La route de la page picture
@app.route("/picture/<picture_id>", methods=["GET", "POST"])
def picture_id(picture_id):
if picture_id and request.method == 'POST':
comment = request.form.get("comment")
# print("I am the comment :", comment)
comment_date = datetime.datetime.now()
# print("datetime.datetime.now()", comment_date)
# sauvegarder le fichier dans la DB
db = get_db()
db.execute("INSERT into comments (comment, comment_date, picture_id) \
values (? , ? , ?)",
[comment, comment_date, picture_id])
db.commit()
if picture_id and request.method == 'GET':
# print("I am the id of the chosen picture :", picture_id)
db = get_db()
pictures = db.execute("SELECT title, filename, upload_date, description, categories.name \
from pictures inner join categories \
on category_id = categories.id \
where pictures.id = (?)", [picture_id])
# print(pictures)
comments = db.execute("SELECT comment, comment_date \
from comments inner join pictures \
on picture_id = pictures.id \
where pictures.id = (?) \
order by comment_date desc", [picture_id])
# print(comments)
return render_template("picture.html",
all_pictures=pictures,
all_comments=comments)
# print("not picture_id")
return redirect("/picture/" + picture_id)
if __name__ == "__main__":
app.run(debug=True)
|
Sabrina-MORSLI/PicShare
|
picshare/run.py
|
run.py
|
py
| 7,210 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.g",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "flask.g._database",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "flask.send_from_directory",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "flask.send_from_directory",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "flask.request.files",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "werkzeug.utils.secure_filename",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "flask.request.form.get",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 97,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 101,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "flask.request.method",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "werkzeug.utils.redirect",
"line_number": 160,
"usage_type": "call"
}
] |
3121294529
|
import os
import sys
import time
from functools import partial
from multiprocessing import Pool
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('window-size=1920,1080')
chrome_options.add_argument('start-maximised')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
def scroll_to_page_end_n_times(browser, s, page_load_wait_seconds):
for _ in range(s):
print(f'ScrollHeight before scrolling: {browser.execute_script("return document.documentElement.scrollHeight")}')
browser.execute_script('window.scrollTo(0, document.body.scrollHeight);')
print(f'Scrolled, waiting for {page_load_wait_seconds} seconds to load page')
time.sleep(page_load_wait_seconds)
print(f'ScrollHeight after scrolling: {browser.execute_script("return document.documentElement.scrollHeight")}')
return
def links_collection(main_page_link, num_links, start_at_link_num, scroll_limit, page_load_wait_seconds, element_load_wait_seconds):
print('Working in background...')
n = 0
s = 0
link_num = start_at_link_num
links_dict = {}
chrome_driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()), options=chrome_options)
with chrome_driver as browser:
browser.get(main_page_link)
while n < num_links:
try:
link_path = '/html/body/div[1]/div/div[1]/div/div[3]/div/div/div/div[1]/div[1]/div[2]/div/div/div/div/div/div/div[' + str(
link_num) + ']/a/div[1]/div/div/div/div/div[2]/div[1]/div/div/div/div[2]/span/span/object/a'
page = WebDriverWait(browser, element_load_wait_seconds).until(EC.visibility_of_element_located((By.XPATH, link_path)))
event_url_long = page.get_attribute('href')
event_url_cutoff = event_url_long[event_url_long.find('events/') + 7:].find('/')
event_url = event_url_long[:event_url_long.find('events/') + 7 + event_url_cutoff + 1]
event_title = page.text
print(f'Fetching link for event: {event_title}')
links_dict[event_title + str(n)] = event_url
link_num += 1
n += 1
except Exception as e:
# print(f'links_collection exception:\n{e}')
if s < scroll_limit:
s += 1
scroll_to_page_end_n_times(browser, s, page_load_wait_seconds)
else:
break
print(f'\nNumber of links: {str(len(links_dict.items()))}')
return links_dict
def get_location(browser, element_load_wait_seconds):
try:
link_path = '/html/body/div[1]/div/div[1]/div/div[3]/div/div/div/div[1]/div[1]/div[2]/div/div/div[1]/div[1]/div[2]/div/div[2]/div/div[1]/div/div/div[3]'
page = WebDriverWait(browser, element_load_wait_seconds).until(EC.visibility_of_element_located((By.XPATH, link_path)))
e_location = page.text
except Exception as e:
e_location = 'n/a'
#print(f'get_location exception:\n{e}')
return e_location
def get_datetime(browser, element_load_wait_seconds):
try:
link_path = '/html/body/div[1]/div/div[1]/div/div[3]/div/div/div/div[1]/div[1]/div[2]/div/div/div[1]/div[1]/div[2]/div/div[2]/div/div[1]/div/div/div[1]/span'
page = WebDriverWait(browser, element_load_wait_seconds).until(EC.visibility_of_element_located((By.XPATH, link_path)))
e_datetime_str = page.text
except Exception as e:
e_datetime_str = 'n/a'
# print(f'get_datetime exception:\n{e}')
return e_datetime_str
def get_host_and_num_people_responded(browser, element_load_wait_seconds):
try:
link_path = '/html/body/div[1]/div/div[1]/div/div[3]/div/div/div/div[1]/div[1]/div[2]/div/div/div[2]/div/div/div/div/div[1]/div/div/div'
page = WebDriverWait(browser, element_load_wait_seconds).until(EC.visibility_of_element_located((By.XPATH, link_path)))
children = page.find_elements(By.XPATH, './child::*')
e_host = ''
e_num_people_responded = ''
for child in children:
textContent = child.text
if 'Event by' in textContent:
e_host = textContent[textContent.find('Event by') + 9:]
elif 'people responded' in textContent:
e_num_people_responded = textContent[:textContent.find('people responded')].strip()
elif 'person responded' in textContent:
e_num_people_responded = textContent[:textContent.find('person responded')].strip()
else:
pass
if e_host == '':
e_host = 'n/a'
if e_num_people_responded == '':
e_num_people_responded = 'n/a'
except Exception as e:
e_host = 'n/a'
e_num_people_responded = 'n/a'
# print(f'get_host_and_num_people_responded exception:\n{e}')
return e_host, e_num_people_responded
def get_description(browser, element_load_wait_seconds):
try:
link_path = '/html/body/div[1]/div/div[1]/div/div[3]/div/div/div/div[1]/div[1]/div[2]/div/div/div[2]/div/div/div/div/div[1]/div/div/div/div[last()]/div/span'
page = WebDriverWait(browser, element_load_wait_seconds).until(EC.visibility_of_element_located((By.XPATH, link_path)))
children = page.find_elements(By.XPATH, './child::*')
for child in children:
try:
see_more_btn = child.find_element(By.XPATH, "./div[@role='button']")
see_more_btn.click()
except:
pass
children = page.find_elements(By.XPATH, './child::*')
e_description = ''
for child in children:
e_description += child.text
e_description += '\n'
if 'See less' in e_description:
e_description = e_description[:e_description.find(' See less')]
elif 'See more' in e_description:
e_description = e_description[:e_description.find('... See more')]
else:
pass
if e_description == '':
e_description = 'n/a'
except Exception as e:
e_description = 'n/a'
# print(f'get_description exception:\n{e}')
return e_description
def get_image_url(browser, element_load_wait_seconds):
try:
link_path = '/html/body/div[1]/div/div[1]/div/div[3]/div/div/div/div[1]/div[1]/div[2]/div/div/div[1]/div[1]/div[1]/div/div/div[2]/div/a/div/div/div/div/div/img'
page = WebDriverWait(browser, element_load_wait_seconds).until(EC.visibility_of_element_located((By.XPATH, link_path)))
img_path = page.get_attribute('src')
except Exception as ignore:
try:
link_path = '/html/body/div[1]/div/div[1]/div/div[3]/div/div/div/div[1]/div[1]/div[2]/div/div/div[1]/div[1]/div[1]/div/div/div[2]/div/a/div/div/div/div/img'
page = WebDriverWait(browser, element_load_wait_seconds).until(EC.visibility_of_element_located((By.XPATH, link_path)))
img_path = page.get_attribute('src')
except Exception as e:
img_path = 'n/a'
# print(f'get_image_url exception:\n{e}')
return img_path
def crawl_links(element_load_wait_seconds, current_link):
e_location = 'n/a'
e_datetime = 'n/a'
e_host = 'n/a'
e_num_people_responded = 'n/a'
e_description = 'n/a'
img_path= 'n/a'
try:
chrome_driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()), options=chrome_options)
with chrome_driver as browser:
print(f'Crawling on: {current_link}')
# Using Selenium
browser.get(current_link)
e_location = get_location(browser, element_load_wait_seconds)
e_datetime = get_datetime(browser, element_load_wait_seconds)
e_host, e_num_people_responded = get_host_and_num_people_responded(browser, element_load_wait_seconds)
e_description = get_description(browser, element_load_wait_seconds)
img_path = get_image_url(browser, element_load_wait_seconds)
except Exception as e:
print(f'crawl_links exception:\n{e}') # if the links are not found in a page, print exception
return e_location, e_host, e_num_people_responded, e_datetime, e_description, img_path
def main(dict):
num_links = dict['num_links']
start_at_link_num = dict['start_at_link_num']
scroll_limit = dict['scroll_limit']
page_load_wait_seconds = dict['page_load_wait_seconds']
element_load_wait_seconds = dict['element_load_wait_seconds']
event_search_keyword = dict['event_search_keyword']
main_page_link = f'https://www.facebook.com/events/search/?q={event_search_keyword}'
pool_size = dict['pool_size']
links_dict = links_collection(main_page_link, num_links, start_at_link_num, scroll_limit, page_load_wait_seconds, element_load_wait_seconds)
print('\nInitiating scraping...')
#pool = Pool(processes=pool_size) # creates pool of n processes at a time
#func = partial(crawl_links, element_load_wait_seconds)
#e_details_list = pool.map(func, list(links_dict.values())) # maps the function crawl_links (with arg element_load_wait_seconds) with the links_dict.items() input
e_details_list = [crawl_links(element_load_wait_seconds, link) for link in list(links_dict.values())]
return_dict = { 'payload' : [] }
e_details_labels = ['location', 'host', 'numPeopleResponded', 'datetime', 'details', 'imgPath']
for (e_name, e_link), e_details in zip(links_dict.items(), e_details_list):
event_dict = {}
event_dict['link'] = e_link
event_dict['name'] = e_name[:-1]
for e_detail_item_label, e_detail_item in zip(e_details_labels, e_details):
event_dict[e_detail_item_label] = e_detail_item
return_dict['payload'].append(event_dict)
return return_dict
|
davi1972/greener-app
|
greener-scraper/greener-scraper-cli.py
|
greener-scraper-cli.py
|
py
| 9,583 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "selenium.webdriver.ChromeOptions",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.chrome.service.Service",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "webdriver_manager.chrome.ChromeDriverManager",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.chrome.service.Service",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "webdriver_manager.chrome.ChromeDriverManager",
"line_number": 172,
"usage_type": "call"
}
] |
9771781643
|
#Brownian Motion Simulator
#Simulate first on $R^1$
import numpy as np
import numpy
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
def graph(points):
data = np.array(points)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(data[:,0],data[:,1])
ax.set_xlabel("X")
ax.set_ylabel("Y")
plt.axis('scaled')
plt.show()
return
def run1d(n):
function = BMinterval(n)
pointlist = convert(function)
graph(pointlist)
return
def grapher(func):
a = convert(func)
graph(a)
return
def convert(func):
a = []
for term in func:
a.append([term, func[term]])
return a
def BMinterval(n,startpos,starttime):
#creates a 1-d Brownian motion on $[0,1]$ with $D_n$ dyadic level.
B = {}
i = 1
B[starttime] = startpos
B[starttime + 1] = startpos + np.random.randn()
while i < n:
k = 0
while 2*k + 1 <= np.power(2,i):
diadic = float(np.power(2,i))
d = (2*k + 1) / diadic
B[starttime + d] = startpos + .5 * ( B[starttime + (d - 1 / diadic)] + B[starttime + (d + 1 / diadic)] - 2*startpos) + .5 * np.random.randn()/ diadic
k = k + 1
i = i + 1
return B
def BM(n,t):
#creates a depth n brownian motion on [0,t], where t is an integer:
i = 1
B = BMinterval(n,0,0)
while i < t:
B = dict(B.items() + BMinterval(n, B[i], i).items())
i = i + 1
return B
def BM2d(n,t):
B1 = BM(n,t)
B2 = BM(n,t)
list = []
for term in B1:
list.append([B1[term],B2[term]])
return list
def fracpart(number):
return number - np.floor(number)
def inbox(points):
##Returns percentage of in points that are (up to ZxZ) in a the box, [0,1/2]x[0,1/2]
c = 0
for term in points:
if (fracpart(term[0]) <= .5) and (fracpart(term[1]) <= .5):
c = c + 1
return c / float(len(points))
def fold(points):
new = []
for term in points:
new.append([fracpart(term[0]),fracpart(term[1])])
return new
def inint(points,k):
c = 0
for term in points:
if (fracpart(points[term])) <= k:
c = c + 1
return c / float(len(points))
def fold2(points):
new = []
for term in points:
new.append([fracpart(points[term])])
return new
|
ElleNajt/TinyProjects
|
BrownianMotionSimulator.py
|
BrownianMotionSimulator.py
|
py
| 2,129 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "numpy.array",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.axis",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "numpy.random.randn",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "numpy.power",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.random.randn",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "numpy.floor",
"line_number": 77,
"usage_type": "call"
}
] |
650134167
|
#! /usr/bin/python
import os
import sys
import json
import luigi
import numpy as np
import vigra
import nifty.ufd as nufd
import cluster_tools.utils.volume_utils as vu
import cluster_tools.utils.function_utils as fu
from cluster_tools.cluster_tasks import SlurmTask, LocalTask, LSFTask
#
# Find Labeling Tasks
#
class MergeAssignmentsBase(luigi.Task):
""" MergeAssignments base class
"""
task_name = "merge_assignments"
src_file = os.path.abspath(__file__)
allow_retry = False
output_path = luigi.Parameter()
output_key = luigi.Parameter()
shape = luigi.ListParameter()
# task that is required before running this task
dependency = luigi.TaskParameter()
def requires(self):
return self.dependency
def run_impl(self):
shebang, block_shape, roi_begin, roi_end = self.global_config_values()
self.init(shebang)
block_list = vu.blocks_in_volume(self.shape, block_shape, roi_begin, roi_end)
n_jobs = min(len(block_list), self.max_jobs)
config = self.get_task_config()
config.update({"output_path": self.output_path, "output_key": self.output_key,
"tmp_folder": self.tmp_folder, "n_jobs": n_jobs, "block_list": block_list})
# we only have a single job to find the labeling
self.prepare_jobs(1, None, config)
self.submit_jobs(1)
# wait till jobs finish and check for job success
self.wait_for_jobs()
# log the save-path again
self.check_jobs(1)
class MergeAssignmentsLocal(MergeAssignmentsBase, LocalTask):
"""
MergeAssignments on local machine
"""
pass
class MergeAssignmentsSlurm(MergeAssignmentsBase, SlurmTask):
"""
MergeAssignments on slurm cluster
"""
pass
class MergeAssignmentsLSF(MergeAssignmentsBase, LSFTask):
"""
MergeAssignments on lsf cluster
"""
pass
def merge_assignments(job_id, config_path):
fu.log("start processing job %i" % job_id)
fu.log("reading config from %s" % config_path)
with open(config_path, "r") as f:
config = json.load(f)
output_path = config["output_path"]
output_key = config["output_key"]
tmp_folder = config["tmp_folder"]
n_jobs = config["n_jobs"]
block_list = config["block_list"]
id_prefix = "ids"
assignment_prefix = "cc_assignments"
# load labels
label_paths = [os.path.join(tmp_folder, f"{id_prefix}_{block_id}.npy") for block_id in block_list]
labels = [np.load(pp) if os.path.exists(pp) else [0] for pp in label_paths]
labels = np.unique(np.concatenate(labels))
# load assignments
assignment_paths = [os.path.join(tmp_folder, f"{assignment_prefix}_{job_id}.npy") for job_id in range(n_jobs)]
assignments = [np.load(pp) for pp in assignment_paths if os.path.exists(pp)]
if assignments:
assignments = np.concatenate(assignments, axis=0)
assignments = np.unique(assignments, axis=0)
assert assignments.shape[1] == 2
fu.log("have %i pairs of node assignments" % len(assignments))
have_assignments = True
else:
fu.log("did not find any node assignments and will not merge any components")
have_assignments = False
if have_assignments:
ufd = nufd.boost_ufd(labels)
ufd.merge(assignments)
label_assignments = ufd.find(labels)
else:
label_assignemnts = labels.copy()
n_labels = len(labels)
label_assignemnts, max_id, _ = vigra.analysis.relabelConsecutive(label_assignments, keep_zeros=True, start_label=1)
assert len(label_assignments) == n_labels
fu.log("reducing the number of labels from %i to %i" % (n_labels, max_id + 1))
label_assignments = np.concatenate([labels[:, None], label_assignments[:, None]], axis=1).astype("uint64")
chunks = (min(65334, n_labels), 2)
with vu.file_reader(output_path) as f:
f.create_dataset(output_key, data=label_assignments, compression="gzip", chunks=chunks)
fu.log_job_success(job_id)
if __name__ == "__main__":
path = sys.argv[1]
assert os.path.exists(path), path
job_id = int(os.path.split(path)[1].split(".")[0].split("_")[-1])
merge_assignments(job_id, path)
|
constantinpape/cluster_tools
|
cluster_tools/connected_components/merge_assignments.py
|
merge_assignments.py
|
py
| 4,231 |
python
|
en
|
code
| 32 |
github-code
|
6
|
[
{
"api_name": "luigi.Task",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "luigi.Parameter",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "luigi.ListParameter",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "luigi.TaskParameter",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.volume_utils.blocks_in_volume",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.volume_utils",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "cluster_tools.cluster_tasks.LocalTask",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "cluster_tools.cluster_tasks.SlurmTask",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "cluster_tools.cluster_tasks.LSFTask",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "numpy.load",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "numpy.load",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "numpy.concatenate",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "nifty.ufd.boost_ufd",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "nifty.ufd",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "vigra.analysis.relabelConsecutive",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "vigra.analysis",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "cluster_tools.utils.function_utils.log",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "numpy.concatenate",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.volume_utils.file_reader",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.volume_utils",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "cluster_tools.utils.function_utils.log_job_success",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "cluster_tools.utils.function_utils",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "os.path.split",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 138,
"usage_type": "attribute"
}
] |
32102340399
|
from typing import List
class Solution:
def canJump(self, nums: List[int]) -> bool:
if not nums or len(nums) < 2:
return True
max_arrive = nums[0]
for i in range(1, len(nums)):
if max_arrive < i:
return False
max_arrive = max(max_arrive, i + nums[i])
return True
nums = [0,1]
print(Solution().canJump(nums))
|
Eleanoryuyuyu/LeetCode
|
python/Greedy/55. 跳跃游戏.py
|
55. 跳跃游戏.py
|
py
| 398 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "typing.List",
"line_number": 4,
"usage_type": "name"
}
] |
39131633270
|
import random
from itertools import zip_longest
from typing import List
from config import MuZeroConfig
from game.game import AbstractGame
import _pickle as cPickle
import os
import numpy as np
class ReplayBuffer(object):
def __init__(self, config: MuZeroConfig, fighter):
self.window_size = config.window_size
self.batch_size = config.batch_size
self.buffer = []
self.loaded_games = []
self.current_games = []
self.memory_path = config.memory_path
self.fighter = fighter
def save_game(self, game):
if sum([len(i.root_values) for i in self.buffer]) > self.window_size:
self.buffer.pop(0)
if game.player1_historic_network == True:
game.game_player1_priority = 0
game.player1_priorities = list(np.full(len(game.root_values), 0))
else:
game.game_player1_priority = 1e3*len(game.root_values)
game.player1_priorities = list(np.full(len(game.root_values), 1e3))
player1_zero_move_idx = [i for i, j in enumerate(game.child_visits) if j[0][0] == 1.]
for idx in player1_zero_move_idx:
game.player1_priorities[idx] = 0
if game.player2_historic_network == True:
game.game_player2_priority = 0
game.player2_priorities = list(np.full(len(game.root_values), 0))
else:
game.game_player2_priority = 1e3*len(game.root_values)
game.player2_priorities = list(np.full(len(game.root_values), 1e3))
player2_zero_move_idx = [i for i, j in enumerate(game.child_visits) if j[1][0] == 1.]
for idx in player2_zero_move_idx:
game.player2_priorities[idx] = 0
self.buffer.append(game)
def update_buffer(self):
new_files = [f for f in os.listdir(self.memory_path) if f not in self.loaded_games]
new_files = [f for f in new_files if (f.split('_')[-1][:-4] == self.fighter) | (f.split('_')[-2] == self.fighter)]
new_files.sort(key = lambda x: int(x.split('_')[1]))
if len(new_files) > self.window_size // 1100:
self.loaded_games = self.loaded_games + new_files[:-self.window_size // 1100]
new_files = new_files[-self.window_size // 1100:]
if len(new_files) != 0:
for new_file in new_files:
with open(os.path.join(self.memory_path,new_file), 'rb') as game_file:
game = cPickle.load(game_file)
self.save_game(game)
self.loaded_games.append(new_file)
if sum([len(i.root_values) for i in self.buffer]) > self.window_size:
self.current_games.pop(0)
self.current_games.append(new_file)
def sample_batch(self, num_unroll_steps: int, unroll_step_size : int, td_steps: int, fighter):
# Generate some sample of data to train on
games = self.sample_games(fighter)
game_pos = [(g, self.sample_position(self.buffer[g], fighter), 'player1' if self.buffer[g].player1 == fighter else 'player2') for g in games]
game_data = [(self.buffer[g].make_image(i, p), [action.index for action in [j[int(p[-1]) - 1] for j in self.buffer[g].history[i:i + num_unroll_steps]]],
self.buffer[g].make_target(i, num_unroll_steps, unroll_step_size, td_steps, p))
for (g, i, p) in game_pos]
sample_weights = [self.buffer[g].player1_priorities[i] if p == 'player1' else self.buffer[g].player2_priorities[i] for (g, i, p) in game_pos]
game_weights = [self.buffer[g].game_player1_priority if p == 'player1' else self.buffer[g].game_player2_priority for (g, i, p) in game_pos]
weight_batch = 1 / (np.array(sample_weights) * np.array(game_weights))
weight_batch = weight_batch / np.max(weight_batch)
# Pre-process the batch
image_batch, actions_time_batch, targets_batch = zip(*game_data)
targets_init_batch, *targets_time_batch = zip(*targets_batch)
actions_time_batch = list(zip_longest(*actions_time_batch, fillvalue=0))
# Building batch of valid actions and a dynamic mask for hidden representations during BPTT
batch = image_batch, targets_init_batch, targets_time_batch, actions_time_batch
return batch, game_pos, weight_batch**0.4
def sample_games(self, fighter) -> List[AbstractGame]:
# Sample game from buffer either uniformly or according to some priority.
game_probs = np.array([game.game_player1_priority if game.player1 == fighter else game.game_player2_priority for game in self.buffer])
game_probs /= np.sum(game_probs)
return np.random.choice(len(self.buffer), size=self.batch_size, p = game_probs)
def sample_position(self, game: AbstractGame, fighter) -> int:
# Sample position from game either uniformly or according to some priority.
if game.player1 == fighter:
pos_probs = game.player1_priorities / sum(game.player1_priorities)
if game.player2 == fighter:
pos_probs = game.player2_priorities / sum(game.player2_priorities)
return np.random.choice(len(pos_probs), p=pos_probs)
def sample_position_value_bias(self, game: AbstractGame) -> int:
# Sample position from game either uniformly or according to some priority.
history = [i.index for i in game.history]
counts = np.bincount(history)
common = np.argmax(counts)
above_avg = [i[0] for i in np.argwhere(history==common)]
below_avg = [i[0] for i in np.argwhere(history!=common)]
if random.randint(0,5) != 5:
return np.random.choice(below_avg)
else:
return np.random.choice(above_avg)
def update_priorities(self, priorities, idx_info, fighter):
for i in range(len(idx_info)):
game_id, game_pos, _ = idx_info[i]
priority = priorities[i,:]
start_idx = game_pos
if self.buffer[game_id].player1 == fighter:
end_idx = min(game_pos+len(priority), len(self.buffer[game_id].player1_priorities))
self.buffer[game_id].player1_priorities[start_idx:end_idx] = priority[:end_idx-start_idx]
self.buffer[game_id].game_player1_priority = np.mean(self.buffer[game_id].player1_priorities) * len(self.buffer[game_id].root_values)
if self.buffer[game_id].player2 == fighter:
end_idx = min(game_pos+len(priority), len(self.buffer[game_id].player2_priorities))
self.buffer[game_id].player2_priorities[start_idx:end_idx] = priority[:end_idx-start_idx]
self.buffer[game_id].game_player2_priority = np.mean(self.buffer[game_id].player2_priorities) * len(self.buffer[game_id].root_values)
|
Nebraskinator/StreetFighter2AI
|
muzero/training/replay_buffer.py
|
replay_buffer.py
|
py
| 6,951 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "config.MuZeroConfig",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "config.window_size",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "config.batch_size",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "config.memory_path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "game.game.player1_historic_network",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "game.game.game_player1_priority",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "game.game.player1_priorities",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "numpy.full",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "game.game.root_values",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "game.game.game_player1_priority",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "game.game.root_values",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "game.game.player1_priorities",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "numpy.full",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "game.game.root_values",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "game.game.child_visits",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "game.game.player1_priorities",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "game.game.player2_historic_network",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "game.game.game_player2_priority",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "game.game.player2_priorities",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "numpy.full",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "game.game.root_values",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "game.game.game_player2_priority",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "game.game.root_values",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "game.game.player2_priorities",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "numpy.full",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "game.game.root_values",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "game.game.child_visits",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "game.game.player2_priorities",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "game.game",
"line_number": 43,
"usage_type": "argument"
},
{
"api_name": "os.listdir",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "_pickle.load",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "game.game",
"line_number": 57,
"usage_type": "argument"
},
{
"api_name": "numpy.array",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "itertools.zip_longest",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "game.game.player1",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "game.game.game_player1_priority",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "game.game.game_player2_priority",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "game.game.AbstractGame",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "game.game.AbstractGame",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "game.game.player1",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "game.game.player1_priorities",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "game.game.player2",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "game.game.player2_priorities",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "numpy.random.choice",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "game.game.AbstractGame",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "game.game.history",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "game.game",
"line_number": 100,
"usage_type": "name"
},
{
"api_name": "numpy.bincount",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "numpy.argmax",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "numpy.argwhere",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "numpy.argwhere",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.choice",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "numpy.mean",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 122,
"usage_type": "call"
}
] |
26806868269
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 23 17:29:26 2019
@author: dell
"""
from selenium import webdriver
from time import sleep
from bs4 import BeautifulSoup as bs
url = "https://www.google.com/"
browser = webdriver.Chrome("E:\\Study\\Project_4_Web_Scrapping\\chromedriver.exe")
browser.get(url)
sleep(2)
search = browser.find_element_by_xpath('//*[@id="tsf"]/div[2]/div/div[1]/div/div[1]/input')
search.click()
type_search = "wikipedia"
search.send_keys(type_search)
sleep(2)
search1 = browser.find_element_by_xpath('//*[@id="tsf"]/div[2]/div/div[2]/div[2]/div/center/input[1]')
search1.click()
sleep(5)
browser.quit()
|
lavish71/Forsk_2019
|
Project_4_Web_Scrapping/Project_4_2/Project_4_2_2.py
|
Project_4_2_2.py
|
py
| 665 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 28,
"usage_type": "call"
}
] |
25135340045
|
from flask import Flask, request, render_template
from chatXYZ import run, run_test
import logging
# API Key
from config import openai_api_key
log_handler = logging.StreamHandler()
log_formatter = logging.Formatter("%(asctime)s - %(message)s")
log_handler.setFormatter(log_formatter)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.addHandler(log_handler)
app = Flask(__name__)
api_key_mode = "system" # "user"" or "system"
SHOW_API_KEY_BOX = True if api_key_mode == "user" else False
@app.route("/", methods=["GET", "POST"])
def index():
if request.method == "POST":
query = request.form["query"]
# Get answer or error message
try:
if api_key_mode == "user":
api_key = request.form["api_key"].strip()
if api_key == "": # API key is not provided in user mode; show error
result = f"Please enter your OpenAI API Key!"
elif api_key != "": # If API key provided in user mode; use it
result = run_test(query, api_key=api_key, victim="Oppie")
elif api_key_mode == "system": # API key is not required in system mode; use system key
api_key = openai_api_key["OPENAI_API_KEY"]
result = run_test(query, api_key=api_key, victim="Oppie")
logger.info(f"User input: {query}") # Using logger
else:
raise NotImplementedError("Please set api_key_mode to either 'user' or 'system'.")
except Exception as e:
result = f"Ah, it seems something terrible has happened. Perhaps too many people are trying to ask me questions at the moment, or the test has gone wrong. Error: {e}"
return render_template("index.html", result=result, query=query, show_api_key_box=SHOW_API_KEY_BOX)
else:
return render_template("index.html", show_api_key_box=SHOW_API_KEY_BOX)
@app.route("/")
def home():
return render_template("index.html", show_api_key_box=SHOW_API_KEY_BOX)
if __name__ == "__main__":
app.run(host="127.0.0.1", port=8080, debug=True)
|
rikab/ChatXYZ
|
main.py
|
main.py
|
py
| 2,111 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "logging.StreamHandler",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.Formatter",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "flask.Flask",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "chatXYZ.run_test",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "config.openai_api_key",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "chatXYZ.run_test",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 51,
"usage_type": "call"
}
] |
11735874338
|
import sys
import enum
from sqlalchemy import Column, DateTime, Integer, String, ForeignKey, Table
from sqlalchemy.orm import relationship, backref
from rhinventory.extensions import db
class SimpleAssetAttribute():
name: str
def __str__(self) -> str:
return f"{self.name}"
def asset_n_to_n_table(other_table: db.Model) -> Table:
other_name = other_table.__name__.lower()
return Table(
f"asset_{other_name}",
db.Model.metadata,
Column("asset_id", ForeignKey("assets.id")),
Column(f"{other_name}_id", ForeignKey(other_table.id)),
)
class Platform(db.Model, SimpleAssetAttribute):
__tablename__ = 'platforms'
id: int = Column(Integer, primary_key=True) # type: ignore
slug: str = Column(String, nullable=False) # type: ignore
name: str = Column(String, nullable=False) # type: ignore
last_used = Column(DateTime, nullable=True)
asset_platform_table = asset_n_to_n_table(Platform)
class AssetTag(db.Model, SimpleAssetAttribute):
__tablename__ = 'tags'
id: int = Column(Integer, primary_key=True) # type: ignore
name: str = Column(String, nullable=False) # type: ignore
description: str = Column(String, nullable=False) # type: ignore
last_used = Column(DateTime, nullable=True)
asset_tag_table = asset_n_to_n_table(AssetTag)
class Packaging(db.Model, SimpleAssetAttribute):
__tablename__ = 'packagings'
id: int = Column(Integer, primary_key=True) # type: ignore
name: str = Column(String, nullable=False) # type: ignore
last_used = Column(DateTime, nullable=True)
# An asset can have a single packaging multiple times so we need a middle table
class AssetPackaging(db.Model):
__tablename__ = 'asset_packaging'
id: int = Column(Integer, primary_key=True) # type: ignore
asset_id: int = Column(Integer, ForeignKey('assets.id')) # type: ignore
packaging_id: int = Column(Integer, ForeignKey(Packaging.id)) # type: ignore
#asset = relationship("Asset")
#packaging = relationship(Packaging)
class Medium(db.Model, SimpleAssetAttribute):
__tablename__ = 'media'
id: int = Column(Integer, primary_key=True) # type: ignore
name: str = Column(String, nullable=False) # type: ignore
last_used = Column(DateTime, nullable=True)
class AssetMedium(db.Model):
__tablename__ = 'asset_mediums'
id: int = Column(Integer, primary_key=True) # type: ignore
asset_id: int = Column(Integer, ForeignKey('assets.id')) # type: ignore
medium_id: int = Column(Integer, ForeignKey(Medium.id)) # type: ignore
#asset = relationship("Asset")
#medium = relationship(Medium)
class Company(db.Model, SimpleAssetAttribute):
__tablename__ = 'companies'
id: int = Column(Integer, primary_key=True) # type: ignore
name: str = Column(String, nullable=False) # type: ignore
last_used = Column(DateTime, nullable=True)
class CompanyAlias(db.Model):
__tablename__ = 'company_aliases'
id: int = Column(Integer, primary_key=True) # type: ignore
alias: str = Column(String, nullable=False) # type: ignore
company_id = Column(Integer, ForeignKey(Company.id), nullable=False)
company = relationship(Company, backref="aliases")
asset_company_table = asset_n_to_n_table(Company)
|
retroherna/rhinventory
|
rhinventory/models/asset_attributes.py
|
asset_attributes.py
|
py
| 3,448 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "rhinventory.extensions.db.Model",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "rhinventory.extensions.db",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Table",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "rhinventory.extensions.db.Model",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "rhinventory.extensions.db",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Table",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "rhinventory.extensions.db.Model",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "rhinventory.extensions.db",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 25,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 26,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 27,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.DateTime",
"line_number": 28,
"usage_type": "argument"
},
{
"api_name": "rhinventory.extensions.db.Model",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "rhinventory.extensions.db",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 34,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 35,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 36,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.DateTime",
"line_number": 37,
"usage_type": "argument"
},
{
"api_name": "rhinventory.extensions.db.Model",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "rhinventory.extensions.db",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 43,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 44,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.DateTime",
"line_number": 45,
"usage_type": "argument"
},
{
"api_name": "rhinventory.extensions.db.Model",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "rhinventory.extensions.db",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 50,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 51,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 52,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "rhinventory.extensions.db.Model",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "rhinventory.extensions.db",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 59,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 60,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.DateTime",
"line_number": 61,
"usage_type": "argument"
},
{
"api_name": "rhinventory.extensions.db.Model",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "rhinventory.extensions.db",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 65,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 66,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 67,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "rhinventory.extensions.db.Model",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "rhinventory.extensions.db",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 74,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 75,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.DateTime",
"line_number": 76,
"usage_type": "argument"
},
{
"api_name": "rhinventory.extensions.db.Model",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "rhinventory.extensions.db",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 80,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 81,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 82,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.ForeignKey",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.relationship",
"line_number": 83,
"usage_type": "call"
}
] |
27265911454
|
import time
import json
from scrape_linkedin.utils import AnyEC
from scrape_linkedin.Profile import Profile
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException
class ProfileScraper:
"""
Scraper for Personal LinkedIn Profiles. See inherited Scraper class for
details about the constructor.
"""
MAIN_SELECTOR = '.core-rail'
ERROR_SELECTOR = '.profile-unavailable'
def __init__(self, driver):
self.timeout = 10
self.driver = driver
self.scroll_pause = 0.1
self.scroll_increment = 300
def scrape_by_email(self, email):
self.load_profile_page(
'https://www.linkedin.com/sales/gmail/profile/proxy/{}'.format(email))
return self.get_profile()
def scrape(self, url='', user=None):
self.load_profile_page(url, user)
return self.get_profile()
def load_profile_page(self, url='', user=None):
"""Load profile page and all async content
Params:
- url {str}: url of the profile to be loaded
Raises:
ValueError: If link doesn't match a typical profile url
"""
if user:
url = 'http://www.linkedin.com/in/' + user
if 'com/in/' not in url and 'sales/gmail/profile/proxy/' not in url:
raise ValueError(
"Url must look like... .com/in/NAME or... '.com/sales/gmail/profile/proxy/EMAIL")
self.driver.get(url)
# Wait for page to load dynamically via javascript
try:
myElem = WebDriverWait(self.driver, self.timeout).until(AnyEC(
EC.presence_of_element_located(
(By.CSS_SELECTOR, self.MAIN_SELECTOR)),
EC.presence_of_element_located(
(By.CSS_SELECTOR, self.ERROR_SELECTOR))
))
except TimeoutException as e:
raise ValueError(
"""Took too long to load profile. Common problems/solutions:
1. Invalid LI_AT value: ensure that yours is correct (they
update frequently)
2. Slow Internet: increase the time out parameter in the Scraper
constructor
3. Invalid e-mail address (or user does not allow e-mail scrapes) on scrape_by_email call
""")
# Check if we got the 'profile unavailable' page
try:
self.driver.find_element_by_css_selector(self.MAIN_SELECTOR)
except:
raise ValueError(
'Profile Unavailable: Profile link does not match any current Linkedin Profiles')
# Scroll to the bottom of the page incrementally to load any lazy-loaded content
self.scroll_to_bottom()
def get_profile(self):
try:
profile = self.driver.find_element_by_css_selector(
self.MAIN_SELECTOR).get_attribute("outerHTML")
except:
raise Exception(
"Could not find profile wrapper html. This sometimes happens for exceptionally long profiles. Try decreasing scroll-increment.")
contact_info = self.get_contact_info()
return Profile(profile + contact_info)
def get_contact_info(self):
try:
# Scroll to top to put clickable button in view
self.driver.execute_script("window.scrollTo(0, 0);")
button = self.driver.find_element_by_css_selector(
'a[data-control-name="contact_see_more"]')
button.click()
contact_info = self.wait_for_el('.pv-contact-info')
return contact_info.get_attribute('outerHTML')
except Exception as e:
print(e)
return ""
def scroll_to_bottom(self):
"""Scroll to the bottom of the page
Params:
- scroll_pause_time {float}: time to wait (s) between page scroll increments
- scroll_increment {int}: increment size of page scrolls (pixels)
"""
expandable_button_selectors = [
'button[aria-expanded="false"].pv-skills-section__additional-skills',
'button[aria-expanded="false"].pv-profile-section__see-more-inline',
'button[aria-expanded="false"].pv-top-card-section__summary-toggle-button',
'button[data-control-name="contact_see_more"]'
]
current_height = 0
while True:
for name in expandable_button_selectors:
try:
self.driver.find_element_by_css_selector(name).click()
except:
pass
# Use JQuery to click on invisible expandable 'see more...' elements
self.driver.execute_script(
'document.querySelectorAll(".lt-line-clamp__ellipsis:not(.lt-line-clamp__ellipsis--dummy) .lt-line-clamp__more").forEach(el => el.click())')
# Scroll down to bottom
new_height = self.driver.execute_script(
"return Math.min({}, document.body.scrollHeight)".format(current_height + self.scroll_increment))
if (new_height == current_height):
break
self.driver.execute_script(
"window.scrollTo(0, Math.min({}, document.body.scrollHeight));".format(new_height))
current_height = new_height
# Wait to load page
time.sleep(self.scroll_pause)
def wait(self, condition):
return WebDriverWait(self.driver, self.timeout).until(condition)
def wait_for_el(self, selector):
return self.wait(EC.presence_of_element_located((
By.CSS_SELECTOR, selector
)))
|
DumbMachine/linkedin
|
person.py
|
person.py
|
py
| 5,813 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "scrape_linkedin.utils.AnyEC",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "selenium.common.exceptions.TimeoutException",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "scrape_linkedin.Profile.Profile",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.expected_conditions",
"line_number": 138,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 139,
"usage_type": "name"
}
] |
34218162586
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 8 11:01:20 2022
@author: sonne
"""
#0. Imports
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import matplotlib.patches as patches
import matplotlib.animation as animation
import matplotlib.ticker as ticker
import tkinter as Tk #Interface
import numpy as np #Numerische Mathematik
import itertools #für Iteration
import random #Zufallszahlen
from scipy.constants import k #Boltzmann-Konstante
#1. Dictionaries für die Edelgase
Masse = {"Helium" : 6.642e-27,
"Neon" : 3.351e-26,
"Argon" : 6.634e-26,
"Krypton" : 1.392e-25,
"Xenon": 2.180e-25}
Durchmesser = {"Helium" : 36.58, # 1.4e-10m
"Neon" : 44.22, # 1.58e-10m
"Argon" : 65.96, # 1.88e-10m
"Krypton" : 76.15, # 2.00e-10m
"Xenon": 87.07} # 2.18e-10m
Farbe = {"Helium" : "blue",
"Neon" : "darkblue",
"Argon" : "blueviolet",
"Krypton" : "purple",
"Xenon": "indigo"}
LennardJones_epsilon = {"Helium" : 14e-23,
"Neon" : 50e-23,
"Argon" : 167e-23,
"Krypton" : 225e-23,
"Xenon": 320e-23}
LennardJones_sigma = {"Helium" : 2.56e-10,
"Neon" : 2.74e-10,
"Argon" : 3.4e-10,
"Krypton" : 3.65e-10,
"Xenon": 3.98e-10}
#2. Figure für Plot erstellen
#2.1 für die Simulation
fig = Figure(figsize = (6,6))
ax = fig.add_subplot(111)
#2.2 für Beschleunigungspfeil
arrow = Figure(figsize = (1.5,1.5)) #Zweites Plotfenster für den Beschleunigungsvektor
arr = arrow.add_subplot(111)
arr.set_xlim(-1,1)
arr.set_ylim(-1,1)
arr.axes.xaxis.set_visible(False) #Achsen zur Übersichtlichkeit ausgeblendet
arr.axes.yaxis.set_visible(False)
Inaktiv_text = arr.text(0,0,'Aktiviere \n"Teilchen verfolgen"', ha="center", va = "center", fontsize = 8.) #Text in der Figure zu Beginn
Pfeil = patches.Arrow(0, 0, 0, 0) #Pfeilobjekt mit Länge 0 erstellen
patch = arr.add_patch(Pfeil) #Pfeil zu Plot hinzufügen
#3. Interface mit Tkinter
class Interface():
def __init__(self, Teilchentracker_aus = True):
self.root = Tk.Toplevel()
self.Teilchentracker_aus = Teilchentracker_aus
#3.1 Tkinter-Fenster konfigurieren
self.root.title("C5 Molecular Dynamics") #Titel des Fensters
self.root.geometry("1400x800") #Größe des Fensters in Pixel
self.root.config(bg = "white") #weißer Hintergrund
self.root.columnconfigure(0, weight=3)
self.root.columnconfigure(1, weight=1)
self.root.columnconfigure(2, weight =1)
self.Ueberschrift = Tk.Label(self.root,text="Thermal motion", font = "Verdana 20 bold", \
bg = "white").grid(column=0, row=0)
#3.2 Canvas für die Simulation und für den Beschleunigungspfeil erstellen
self.canvas = FigureCanvasTkAgg(fig, master=self.root) #für Simulation
self.canvas.get_tk_widget().grid(column=0, row=1, rowspan = 9, sticky = Tk.N)
self.Label_Beschleunigungspfeil = Tk.Label(self.root, text = "Acceleration",\
font = "Verdana 10 bold", bg = "white").grid(column = 2, row = 1)
self.canvas_arrow = FigureCanvasTkAgg(arrow, master=self.root) #für Pfeil
self.canvas_arrow.get_tk_widget().grid(column=2, row =2, rowspan = 2, sticky = Tk.N, pady = 10)
#3.2 Schieberegler für Änderung der Temperatur
self.Label_Temperatur = Tk.Label(self.root, text = "Temperature in K", font = "Verdana 10 bold",\
bg = "white").grid(column = 1, row =1)
self.Slider_Temperatur = Tk.Scale(self.root, from_=1, to=2000, orient = "horizontal",\
bg = "white") #Schieberegler
self.Slider_Temperatur.grid(column = 1, row = 2) #Schieberegler platzieren
self.Slider_Temperatur.set(300) #Startwert
self.Button_Temperatur = Tk.Button(self.root, text="Change temperature", bg= "lightgreen", \
compound = "left", width = 18, command= \
self.update_Temperatur).grid(column = 1, row = 3)
#Knopf, ruft Funktion für Temperatur auf
#3.3 Schieberegler für Änderung der Teilchenzahl
self.Label_Teilchenzahl = Tk.Label(self.root, text = "Number of particles", \
font = "Verdana 10 bold", bg = "white").grid(column = 1, row = 4, sticky= Tk.S)
self.Slider_Teilchenzahl = Tk.Scale(self.root, from_=1, to=20,\
orient = "horizontal", bg = "white") #Schieberegler
self.Slider_Teilchenzahl.grid(column = 1, row = 5) #Schieberegler platzieren
self.Slider_Teilchenzahl.set(5) #Startwert
self.Button_Teilchenzahl = Tk.Button(self.root, text="Change number of particles",\
bg = "lightgreen", compound = "left", width = 18,\
command=self.update_Teilchenzahl).grid(column = 1, row = 6)
#Knopf, ruft Funktion für Teilchenzahl auf
#3.4 Dropdownmenü für Änderung der Teilchenart
self.Label_Teilchenart = Tk.Label(self.root, text = "Gas type", font = "Verdana 10 bold",\
bg = "white").grid(column = 1, row = 7, sticky = Tk.S)
Edelgase = ["Helium","Neon", "Argon","Krypton","Xenon"] #Liste der Optionen
Variable = Tk.StringVar() #Definition des Wert des Widgets, Hält eine Zeichenfolge; Standardwert ""
Variable.set(Edelgase[0]) #gibt an welches Element der Liste im Menü angezeigt wird
self.dropdown = Tk.OptionMenu(self.root, Variable, *Edelgase, command= \
self.update_Teilchenart).grid(column = 1, row = 8) #Widget für Dropdown-Menü erstellen
#3.5 Label mit Informationen zur aktuellen Simulation
self.Infos = Tk.Label(self.root, text = "Informationen", bg = "white", font = \
"Verdana 10 bold").grid(column = 2, row = 7, sticky = Tk.S)
self.Label_Infos = Tk.Label(self.root, text = "Infos", justify = "left") #Label erstellen
self.Label_Infos.grid(column = 2, row = 8) #Label platzieren
#3.6 Teilchentracker zum An- und Ausschalten
self.Label_Teilchentracker = Tk.Label(self.root, text = "Track particle", font = \
"Verdana 10 bold", bg = "white").grid(column = 2, row = 4, sticky = Tk.S)
self.Button_teilchen_verfolgen = Tk.Button(self.root, fg = "darkgreen", text="Teilchen verfolgen", bg = "white",\
height = 2, command=self.teilchen_verfolgen).grid(column = 2, row = 5, \
rowspan = 2, sticky = Tk.N, pady = 12)
#Knopf, aktiviert Teilchentracker
#3.7 Stopp-Knopf zum Beenden des Programms
self.Beenden = Tk.Button(self.root, text = "Interrupt", fg= "white", bg="maroon",\
command = self.stopp, width = 65).grid(column = 1, row = 9, columnspan = 2)
#Knopf, ruft Stopp-Funktion auf
#Funktionen für Tkinter-Schaltflächen:
def update_Temperatur(self):
box.Temperatur = self.Slider_Temperatur.get() #Wert des Schiebereglers abrufen
box.start_Animation() #Startbedingungen aktualisieren
def update_Teilchenzahl(self):
box.Teilchenzahl = self.Slider_Teilchenzahl.get() #Wert des Schiebereglers abrufen
box.particles = [Particle(i) for i in range(box.Teilchenzahl)] #neue Teilchen erstellen
box.start_Animation() #Startbedingungen aktualisieren
def update_Teilchenart(self, Variable):
partikel.Teilchenart = str(Variable) #Teilchenart als String speichern (für Infolabel)
partikel.m = Masse.get(Variable) #Masse im Dictionary nachschlagen und aktualisieren
partikel.R = Durchmesser.get(Variable) #Teilchenradius im Dictionary nachschlagen und aktualisieren
partikel.color = Farbe.get(Variable) #Farbe im Dictionary nachschlagen und aktualisieren
partikel.epsilon = LennardJones_epsilon.get(Variable) #Parameter im Dictionary nachschlagen und aktualisieren
partikel.sigma = LennardJones_sigma.get(Variable) #Parameter im Dictionary nachschlagen und aktualisieren
box.start_Animation() #Startbedingungen aktualisieren
def teilchen_verfolgen(self): #Möglichkeit die Farbe eines Teilchens zu ändern und den Beschleunigungsvektor zu verfolgen
global patch, Inaktiv_text
if self.Teilchentracker_aus: #wenn noch nicht aktiv
Inaktiv_text.remove() #Text aus Plot entfernen
arrow.canvas.draw()
self.Button_teilchen_verfolgen = Tk.Button(self.root, foreground = "white",\
text="Teilchen entfolgen", bg = "darkgreen", height = 2,\
command=self.teilchen_verfolgen).grid(column = 2, row = 5,\
rowspan = 2, sticky = Tk.N, pady = 12)
#Knopf ändert sein Aussehen
self.Teilchentracker_aus = False
else: #falls schon aktiv
Inaktiv_text = arr.text(0,0,'Aktiviere \n"Teilchen verfolgen"', ha="center", va = "center", fontsize = 8.)
#Text in Plot einfügen
arrow.canvas.draw()
self.Button_teilchen_verfolgen = Tk.Button(self.root, foreground = "darkgreen",\
text="Teilchen verfolgen", bg = "white", height = 2,\
command=self.teilchen_verfolgen).grid(column = 2, row = 5,\
rowspan = 2, sticky = Tk.N, pady = 12)
#Knopf ändert sein Aussehen
patch.remove() #Pfeil entfernen
Pfeil = patches.Arrow(0, 0, 0, 0) #einen neuen Pfeil der Länge 0 erstellen
patch = arr.add_patch(Pfeil) #Pfeil hinzufügen
arrow.canvas.draw() #Pfeil anzeigen
self.Teilchentracker_aus = True
def stopp(self):
self.root.destroy() #Tkinter-Fenster schließen
self.root.quit() #Programmausführung stoppen
interface = Interface() #auf die Klasse Interface() mit interface zugreifen
#4. Beschleunigungspfeil für das erste Teilchen
def pfeil(Beschleunigung):
global patch
if interface.Teilchentracker_aus == False: #nur wenn Teilchentracker aktiv ist
Betrag_Beschleunigung = np.sqrt(Beschleunigung[0]**2 + Beschleunigung[1]**2)
if Betrag_Beschleunigung != 0:
patch.remove() #Pfeil entfernen
Pfeil_x = Beschleunigung[0]/np.abs(Beschleunigung[0]) \
* np.log(np.abs(Beschleunigung[0]))/50 #logarithmisch skalierte Beschleunigung,
#um die nötigen Größenordnungen abzudecken
Pfeil_y = Beschleunigung[1]/np.abs(Beschleunigung[1]) \
* np.log(np.abs(Beschleunigung[1]))/50 #logarithmisch skalierte Beschleunigung
Pfeil = patches.FancyArrow(0, 0, Pfeil_x, Pfeil_y, color = "maroon", width = 0.05, overhang = 0.2,\
head_width = 0.25, head_length = 0.3) #Pfeil mit den Komponenten der
#Beschleunigung erstellen
patch = arr.add_patch(Pfeil) #Pfeil hinzufügen
arrow.canvas.draw() #Pfeil anzeigen
#5. Kritischer Radius
kritischerRadius = 4e-9 # entspricht 40% der Boxgröße, um Rechenaufwand zu reduzieren
#6. Teilchen als Klasse:
class Particle(): #Ordnet jedem Teilchen einen Radius (R), Masse (m), Farbe (color),
#und die Parameter für das Lennard-Jones-Potential (sigma, epsilon) zu
def __init__(self, R = 36.58, m = 6.642e-27, color = "blue", epsilon = 14e-23, sigma = 2.56e-10, Teilchenart = "Helium"):
self.R, self.m, self.color, self.epsilon, self.sigma, self.Teilchenart = R, m, color, epsilon, sigma, Teilchenart
partikel = Particle() #auf Klasse Particle() als partikel zugreifen
# 7. Funktionen für die Bewegung der Teilchen in der Box
class Box(): #enthält die Funktionen für die Bewegung der Teilchen in der Box
def __init__(self, Teilchenzahl = 5, dt=4E-15, Temperatur = 300, Boxgroesse = 1e-8,\
Anfangsgeschwindigkeit = 1367.8, E_gesamt = 3.1e-20):
#Default-Werte für Anzahl der Teilchen, Zeitintervall, Temperatur, Boxgröße, Anfangsgeschwindigkeit, Gesamtenergie
self.dt, self.Teilchenzahl, self.Temperatur, self.Boxgroesse, self.Anfangsgeschwindigkeit, \
self.E_gesamt = dt, Teilchenzahl, Temperatur, Boxgroesse, Anfangsgeschwindigkeit, E_gesamt
self.particles = [Particle(i) for i in range(self.Teilchenzahl)] #für jedes Teilchen eine Instanz der Particle-Klasse erstellen
#7.1 Startbedingungen für die Simulation berechnen und festlegen
def start_Animation(self):
self.scatter = ax.scatter([],[], s= partikel.R) #Streudiagramm mit Teilchen als Marker, Größe entsprechend des Teilchenradius
self.Anfangsgeschwindigkeit = self.mittlere_Geschwindigkeit(self.Temperatur) #Anfangsgeschwindigkeit aus Temperatur berechnen
self.E_gesamt = self.gesamtenergie(self.Teilchenzahl, self.Anfangsgeschwindigkeit) #Gesamtenergie aus kinetischer Energie bestimmen
Infos = "Edelgas: " + partikel.Teilchenart + \
"\nMasse: %10.3e kg \nGesamtenergie: %10.3e J \nMittlere Geschwindigkeit: %8.2f m/s" \
% (partikel.m, box.E_gesamt, box.Anfangsgeschwindigkeit) #Text für das Info-Label
interface.Label_Infos.configure(text = Infos) #Labelinhalt aktualisieren
box.startpositionen() #Startpositionen-Funktion aufrufen
for particle in self.particles:
angle = random.uniform(-1, 1) #zufälliger Winkel für Richtung der Geschwindigkeit
particle.v = np.array([(np.cos(angle * np.pi/2)), (np.sin(angle * np.pi/2))]) \
* self.Anfangsgeschwindigkeit #Anfangsgeschwindigkeit als Array definieren
particle.a = np.zeros(2) #Beschleunigung zum Zeitpunkt t=0 ist Null
#7.1.1 Anfangsgeschwindigkeit der Teilchen als mittlere Geschwindigkeit festlegen
def mittlere_Geschwindigkeit(self, T):
return np.sqrt(3*k*T/partikel.m) #Als mittlere Geschwindigkeit über Temperatur berechnen
#7.1.2 Gesamtenergie aller Teilchen berechnen
def gesamtenergie(self, Teilchenzahl, v):
return Teilchenzahl * 0.5 * partikel.m * v**2 #Summe der kinetischen Energie
#7.1.3 Startpostitionen der Teilchen zufällig bestimmen
def startpositionen(self):
for particle in self.particles:
particle.r = 100*np.random.uniform(0, self.Boxgroesse/100, size=2) #Startposition zufällig
#innerhalb des Kastens festlegen
#Wiederholung der zufälligen Teilchenverteilung bei Überlappung von 2 Teilchen zu Beginn der Animation
for particle, particle2 in itertools.combinations(self.particles, 2): #für jedes Teilchenpaar
# Abstand berechnen
x_diff = particle.r[0] - particle2.r[0]
y_diff = particle.r[1] - particle2.r[1]
Abstand = np.sqrt(x_diff**2 + y_diff**2)
if Abstand < 1.12*partikel.sigma: #wenn Abstand kleiner abstoßende Wechselwirkunegn
box.startpositionen() #neue Startpositionen berechnen
#7.2 Trajektorien der Teilchen über Velocity-Verlet-Algorithmus bestimmen
def zeitliche_Entwicklung(self, particles, Boxgroesse, dt, E_gesamt):
box.kollision_Box(particles, Boxgroesse) #elastische Stöße mit Wand berücksichtigen
for particle in particles:
particle.r += dt * particle.v + dt**2*particle.a #Ort nach Velocity-Verlet-Algorithmus bestimmen
particle.a_vorher = particle.a #Wert für die Beschleunigung für nächsten Zeitschritt speichern
particle.a = np.zeros(2) #Beschleunigung wieder auf Null setzen vor neuer Evaluation des Potentials
box.beschleunigung(particles) #Beschleunigung aus dem Potential berechnen
particle.v = (particle.v + dt/2 * (particle.a + particle.a_vorher)) \
* box.normierung(particles, E_gesamt) #Geschwindigkeit nach Velocity-Verlet-Algorithmus bestimmen und normieren
pfeil(box.particles[0].a) #Beschleunigungspfeil updaten
#7.2.1 Elastische Stöße mit den Wänden der Box
def kollision_Box(self, particles, Boxgroesse):
for particle in self.particles:
for i in range(0,2): #für x- und y-Koordinate
if particle.r[i] >= Boxgroesse:
particle.r[i] = Boxgroesse #Verhindert 'Tunneling', wo die Geschwind. eines sehr schnellen
# Teilchens vor Rückkehr in den Kasten zweimal gespiegelt wird
particle.v[i] *=-1 #Spiegelung der Geschwindigkeit
if particle.r[i] <= 0:
particle.r[i] = 0 #s.o.
particle.v[i] *=-1
#7.2.2 Abstand und Beschleunigung der Teilchen bestimmen
def beschleunigung(self, particles):
for particle, particle2 in itertools.combinations(self.particles, 2): #über alle Paare von Teilchen iterieren
#Abstand berechnen
x_diff = particle.r[0] - particle2.r[0]
y_diff = particle.r[1] - particle2.r[1]
Abstand = np.sqrt(x_diff**2 + y_diff**2)
#Wechselwirkung aus Potential berechnen:
if Abstand < kritischerRadius: #nur Wechselwirkung bestimmen, wenn innerhalb des kritischen Radius
Wechselwirkung = self.lennardJones_Kraft(Abstand) #Abstand in Lennard-Jones-Potential einsetzen
particle.a[0] -= 1/(partikel.m) * Wechselwirkung * x_diff/Abstand
particle.a[1] -= 1/(partikel.m) * Wechselwirkung * y_diff/Abstand
particle2.a[0] += 1/(partikel.m) * Wechselwirkung * x_diff/Abstand
particle2.a[1] += 1/(partikel.m) * Wechselwirkung * y_diff/Abstand
#7.2.3 Lennard-Jones-Potential
def lennardJones_Kraft(self, Distanz): #Kraft als Gradient des LennardJones-Potentials in Abhängigkeit
#vom Abstand der Teilchen
return (-24 * partikel.epsilon) * (2 *(partikel.sigma**12 / Distanz**13) -(partikel.sigma**6 / Distanz**7))
#7.2.4 Geschwindigkeiten für Energieerhaltung normieren
def normierung(self, particles, E_gesamt):
Summe_v=0
for particle in particles:
Summe_v += particle.v**2 #alle Geschwindigkeitsquadrate aufaddieren
return np.sqrt(E_gesamt /(0.5*Particle().m*Summe_v)) #neue Gesamtenergie bestimmen und Skalierungsfaktor zurückgeben
#7.3 Position der Teilchen zurückgeben
def position(self, particles): #Funktion um den Ort jedes Teilchens an die Animation zu übergeben
return [particle.r for particle in particles]
box = Box() #auf die Klasse Box() als box zugreifen
#8. Animation starten und Programm ausführen
def particle_Farbe(particles):
for particle in box.particles:
particle.color = partikel.color #jedem Teilchen die Farbe des Edelgases aus der Particle-Klasse zuweisen
if interface.Teilchentracker_aus == False: #wenn Teiclhentracker aktiviert
box.particles[0].color = "red" #erstes Teilchen wird rot eingefärbt
return [particle.color for particle in box.particles] #Farben zurückgeben
def init(): #Box darstellen
ax.set_xlim (0, box.Boxgroesse) #Boxgröße einstellen
ax.set_ylim (0, box.Boxgroesse)
ax.xaxis.set_major_locator(ticker.FixedLocator(np.arange(0,12e-9, 2e-9))) #Ticklabels einstellen
ax.yaxis.set_major_locator(ticker.FixedLocator(np.arange(0,12e-9, 2e-9)))
ax.xaxis.set_ticklabels(np.arange(0,11,2)) #Beschriftungen in nm festlegen
ax.yaxis.set_ticklabels(np.arange(0,11,2))
ax.set_xlabel("Boxbreite in nm") #Achsenbeschriftung
return box.scatter,
def update(frame): #Funktion für die Animation (FunAn)
box.zeitliche_Entwicklung(box.particles, box.Boxgroesse, box.dt, box.E_gesamt) #Funktion für zetilche Entwicklung aufrufen
box.scatter.set_offsets(np.array(box.position(box.particles))) #neuen Ort der Marker übernehmen
box.scatter.set_color(particle_Farbe(box.particles)) #Farbe der Teilchen ggf. ändern
return box.scatter,
box.start_Animation() #Funktion für Startbedingungen aufrufen
ani = animation.FuncAnimation(fig, update , frames=range(10000), init_func = init, blit=True,\
interval = 1/2000, repeat = True) #Animation abrufen
Tk.mainloop() #Tkinter-Fenster aufrufen
|
tappelnano/molecular_dynamics
|
2022_02_13_C5_Molekulardynamik.py
|
2022_02_13_C5_Molekulardynamik.py
|
py
| 22,402 |
python
|
de
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "matplotlib.figure.Figure",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "matplotlib.figure.Figure",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "matplotlib.patches.Arrow",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "matplotlib.patches",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "tkinter.Toplevel",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "tkinter.N",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Label",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "tkinter.N",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Label",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "tkinter.Scale",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "tkinter.S",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Scale",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "tkinter.S",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "tkinter.StringVar",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "tkinter.OptionMenu",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "tkinter.S",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Label",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "tkinter.S",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Button",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "tkinter.N",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Button",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "tkinter.N",
"line_number": 189,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Button",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "tkinter.N",
"line_number": 200,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.patches.Arrow",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "matplotlib.patches",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "numpy.sqrt",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 223,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "matplotlib.patches.FancyArrow",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "matplotlib.patches",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "random.uniform",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 271,
"usage_type": "attribute"
},
{
"api_name": "numpy.sin",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "scipy.constants.k",
"line_number": 278,
"usage_type": "name"
},
{
"api_name": "numpy.random.uniform",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 289,
"usage_type": "attribute"
},
{
"api_name": "itertools.combinations",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "itertools.combinations",
"line_number": 332,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 337,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "matplotlib.ticker.FixedLocator",
"line_number": 384,
"usage_type": "call"
},
{
"api_name": "matplotlib.ticker",
"line_number": 384,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 384,
"usage_type": "call"
},
{
"api_name": "matplotlib.ticker.FixedLocator",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "matplotlib.ticker",
"line_number": 385,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 393,
"usage_type": "call"
},
{
"api_name": "matplotlib.animation.FuncAnimation",
"line_number": 399,
"usage_type": "call"
},
{
"api_name": "matplotlib.animation",
"line_number": 399,
"usage_type": "name"
},
{
"api_name": "tkinter.mainloop",
"line_number": 401,
"usage_type": "call"
}
] |
72274308029
|
import datetime
import inspect
import json
import logging
from typing import Callable, Dict, List, Union
_JSON_INDENT = 4
_JSON_SEPERATORS = (",", ": ")
_DEPTH_RECURSION_DEFAULT = 1
_DEPTH_RECURSION_GET_LOGGER = 2
_DEPTH_RECURSION_JSON_LOGGER = 3
_LOGGING_LEVEL = logging.INFO if not __debug__ else logging.DEBUG
_FORMATTER_STR_DETAILED = (
"%(asctime)s (PID:%(process)d) %(levelname)s %(name)s: %(message)s"
)
# _FORMATTER_STR_SIMPLE = "%(name)s %(message)s"
_FORMATTER_STR = _FORMATTER_STR_DETAILED
def get_method_name(
module_name: str = None,
class_name: str = None,
depth_recursion: int = _DEPTH_RECURSION_DEFAULT,
) -> str:
"""Retrieves a method name with a module name and class name.
:param module_name: Module name
:type module_name: str
:param class_name: Class name
:type class_name: str
:param depth_recursion: Depth of recursive call for call stacks (>=1)
:type depth_recursion: int
:return: Method name
:rtype: str
"""
if depth_recursion < 1:
raise ValueError(f"depth_recursion is not natural number. - {depth_recursion}")
# Gets an appropriate frame stack where the logger is called.
f_stack = inspect.currentframe()
for _ in range(depth_recursion):
f_stack = f_stack.f_back
if f_stack is None:
raise ValueError("Reached the call stack limit.")
method_name = f_stack.f_code.co_name
if module_name is None and class_name is None:
return method_name
elif module_name is None:
return f"{class_name}.{method_name}"
elif class_name is None:
return f"{module_name}.{method_name}"
else:
return f"{module_name}.{class_name}.{method_name}"
def _logging_base_decorator(func_logging_decorator: Callable) -> Callable:
"""Decorator Function with Parameters.
:param func_logging_system: Function object for Decoration
:type func_logging_system: Function object
:return: Wrapper function's object
:rtype: Callable
"""
def wrapper(*args, **kwargs):
def wrapper_logging_decorator(func_get_logger):
return func_logging_decorator(func_get_logger, *args, **kwargs)
return wrapper_logging_decorator
return wrapper
@_logging_base_decorator
def _logging_decorator(
func_get_logger: Callable, level: int = _LOGGING_LEVEL, is_propagate: bool = False
) -> Callable:
"""Decorator Function for Python Logging.
:param func_get_logger: Function object for Decoration
:type func_get_logger: function
:param level: Logging Level
:type level: int
:param is_propagate: Need Propagation or not (False: Not propagate / True: Propagate)
:type is_propagate: bool
:return Wrapper function's object
:rtype: Callable
"""
handler = logging.StreamHandler()
handler.setLevel(level)
formatter = logging.Formatter(_FORMATTER_STR)
handler.setFormatter(formatter)
def wrapper(name):
logger = func_get_logger(name)
if handler is not None:
logger.addHandler(handler)
logger.setLevel(level)
logger.propagate = is_propagate
return logger
return wrapper
@_logging_decorator()
def get_logger(name: str) -> logging.Logger:
"""Gets a logger with the name.
:param name: Name of the logger
:type name: str
:return Logger
:rtype: logging.Logger
"""
return logging.getLogger(name=name)
def get_default_logger() -> logging.Logger:
"""Gets a logger with the method name.
:return Logger
:rtype: logging.Logger
"""
return get_logger(name=get_method_name(depth_recursion=_DEPTH_RECURSION_GET_LOGGER))
def get_class_default_logger(
class_name: str, module_name: str = None
) -> logging.Logger:
"""Gets a logger with the class name.
:param class_name: Class name.
:type class_name: str
:param class_name: (optional) Module name.
:type class_name: str
:return Logger
:rtype: logging.Logger
"""
return get_logger(
name=get_method_name(
module_name=module_name,
class_name=class_name,
depth_recursion=_DEPTH_RECURSION_GET_LOGGER,
)
)
def _json_serialize(obj: object) -> str:
"""Serializes the given object
:param obj: obj
:type obj: object
:return iso-formatted obj
:rtype: str
"""
if isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
raise TypeError(f"Type {type(obj)} not serializable")
def _json_dumps(json_items: Union[List[object], Dict[object, object]]) -> str:
"""Dumps as a JSON format.
:param json_items: Items to be converted to a JSON format.
:type json_items: list or dict
:return JSON formatted items.
:rtype: str
"""
return json.dumps(
json_items,
indent=_JSON_INDENT,
ensure_ascii=False,
sort_keys=True,
separators=_JSON_SEPERATORS,
default=_json_serialize,
)
def json_logger(
level: int,
json_items: Union[List[object], Dict[object, object]],
module_name: str = None,
class_name: str = None,
depth_recursion: int = 2,
msg: str = None,
) -> None:
"""Logs the given json string.
:param level: Logging level.
:type level: int
:param json_items: Items to be converted to a JSON format.
:type json_items: list or dict
:param module_name: Module name.
:type module_name: str
:param class_name: Class name.
:type class_name: str
:param depth_recursion: Depth recursion.
:type depth_recursion: int
:param msg: Logging message.
:type msg: str
"""
get_logger(
get_method_name(
module_name=module_name,
class_name=class_name,
depth_recursion=depth_recursion,
)
).log(level=level, msg=msg)
get_logger(
get_method_name(
module_name=module_name,
class_name=class_name,
depth_recursion=depth_recursion,
)
).log(level=level, msg=_json_dumps(json_items))
def json_logger_debug(
json_items: Union[List[object], Dict[object, object]],
module_name: str = None,
class_name: str = None,
msg: str = None,
) -> None:
"""Logs the given json string as DEBUG.
:param json_items: Items to be converted to a JSON format.
:type json_items: list or dict
:param module_name: Module name.
:type module_name: str
:param class_name: Class name.
:type class_name: str
:param msg: Logging message.
:type msg: str
"""
json_logger(
level=logging.DEBUG,
json_items=json_items,
module_name=module_name,
class_name=class_name,
depth_recursion=_DEPTH_RECURSION_JSON_LOGGER,
msg=msg,
)
def json_logger_info(
json_items: Union[List[object], Dict[object, object]],
module_name: str = None,
class_name: str = None,
msg: str = None,
) -> None:
"""Logs the given json string as INFO.
:param json_items: Items to be converted to a JSON format.
:type json_items: list or dict
:param module_name: Module name.
:type module_name: str
:param class_name: Class name.
:type class_name: str
:param msg: Logging message.
:type msg: str
"""
json_logger(
level=logging.INFO,
json_items=json_items,
module_name=module_name,
class_name=class_name,
depth_recursion=_DEPTH_RECURSION_JSON_LOGGER,
msg=msg,
)
def json_logger_warning(
json_items: Union[List[object], Dict[object, object]],
module_name: str = None,
class_name: str = None,
msg: str = None,
) -> None:
"""Logs the given json string as WARNING.
:param json_items: Items to be converted to a JSON format.
:type json_items: list or dict
:param module_name: Module name.
:type module_name: str
:param class_name: Class name.
:type class_name: str
:param msg: Logging message.
:type msg: str
"""
json_logger(
level=logging.WARNING,
json_items=json_items,
module_name=module_name,
class_name=class_name,
depth_recursion=_DEPTH_RECURSION_JSON_LOGGER,
msg=msg,
)
def json_logger_error(
json_items: Union[List[object], Dict[object, object]],
module_name: str = None,
class_name: str = None,
msg: str = None,
) -> None:
"""Logs the given json string as ERROR.
:param json_items: Items to be converted to a JSON format.
:type json_items: list or dict
:param module_name: Module name.
:type module_name: str
:param class_name: Class name.
:type class_name: str
:param msg: Logging message.
:type msg: str
"""
json_logger(
level=logging.ERROR,
json_items=json_items,
module_name=module_name,
class_name=class_name,
depth_recursion=_DEPTH_RECURSION_JSON_LOGGER,
msg=msg,
)
def json_logger_critical(
json_items: Union[List[object], Dict[object, object]],
module_name: str = None,
class_name: str = None,
msg: str = None,
) -> None:
"""Logs the given json string as CRITICAL.
:param json_items: Items to be converted to a JSON format.
:type json_items: list or dict
:param module_name: Module name.
:type module_name: str
:param class_name: Class name.
:type class_name: str
:param msg: Logging message.
:type msg: str
"""
json_logger(
level=logging.CRITICAL,
json_items=json_items,
module_name=module_name,
class_name=class_name,
depth_recursion=_DEPTH_RECURSION_JSON_LOGGER,
msg=msg,
)
|
novus-inc/pylogger
|
pylogger/pylogger.py
|
pylogger.py
|
py
| 9,703 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.INFO",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "logging.DEBUG",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "inspect.currentframe",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "typing.Callable",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "logging.StreamHandler",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "logging.Formatter",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "typing.Callable",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "logging.Logger",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "logging.Logger",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "logging.Logger",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "datetime.date",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "typing.Union",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 242,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 242,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 242,
"usage_type": "name"
},
{
"api_name": "logging.DEBUG",
"line_number": 262,
"usage_type": "attribute"
},
{
"api_name": "typing.Union",
"line_number": 272,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 272,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 272,
"usage_type": "name"
},
{
"api_name": "logging.INFO",
"line_number": 292,
"usage_type": "attribute"
},
{
"api_name": "typing.Union",
"line_number": 302,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 302,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 302,
"usage_type": "name"
},
{
"api_name": "logging.WARNING",
"line_number": 322,
"usage_type": "attribute"
},
{
"api_name": "typing.Union",
"line_number": 332,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 332,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 332,
"usage_type": "name"
},
{
"api_name": "logging.ERROR",
"line_number": 352,
"usage_type": "attribute"
},
{
"api_name": "typing.Union",
"line_number": 362,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 362,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 362,
"usage_type": "name"
},
{
"api_name": "logging.CRITICAL",
"line_number": 382,
"usage_type": "attribute"
}
] |
38058129584
|
from lib.processors import findFaceGetPulse
import networkx as nx
"""
Simple tool to visualize the design of the real-time image analysis
Everything needed to produce the graph already exists in an instance of the
assembly.
"""
#get the component/data dependancy graph (depgraph) of the assembly
assembly = findFaceGetPulse()
graph = assembly._depgraph._graph
#prune a few unconnected nodes not related to the actual analysis
graph.remove_node("@xin")
graph.remove_node("@xout")
graph.remove_node("driver")
#plot the graph to disc as a png image
ag = nx.to_agraph(graph)
ag.layout('dot')
ag.draw('design.png')
|
noahcse/webcam_pulse_detect
|
make_design_graph.py
|
make_design_graph.py
|
py
| 615 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "lib.processors.findFaceGetPulse",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "networkx.to_agraph",
"line_number": 21,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.