id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
6492974
|
<filename>attack-defence-challenges/tastyriffs/service/greatest.py
#!/usr/bin/python
import sys
import json
import random
import subprocess
trigger = False
last_rockstar = None
class Rockstar:
def __init__(self, name, age, notes, rockstar_type, rockstar_file):
self.name = name
self.age = age
self.notes = notes
self.rockstar_type = rockstar_type
self.rockstar_file = rockstar_file
self.rockstar_desc = file("./profiles/" + self.rockstar_file).read()
self.savedata = {"profile": {"name": self.name,
"age": self.age,
"notes": self.notes,
"rockstar": self.rockstar_type,
"rockstar_file": self.rockstar_file,
}
}
def fields(self):
savedata_output = json.dumps(self.savedata).encode("base64")
return (self.name, self.age, self.notes, self.rockstar_type,
self.rockstar_desc, savedata_output)
def write(data, ending="\n"):
sys.stdout.write(data + ending)
sys.stdout.flush()
def readline():
return sys.stdin.readline()
def print_menu():
write("Welcome to the Rockstar Creation Service.")
write("1. Create a rockstar")
write("2. Random a rockstar")
write("3. Load a rockstar")
write("4. Quit")
def handle_input():
write("> ", "")
response = readline().strip()
if response.isdigit():
handlers[int(response)]()
else:
write("404")
def generate():
name = "Placeholder name"
age = 21
notes = "Placeholder notes"
rockstar_type = random.choice(["classicrocker", "goth", "hairrocker",
"punkrocker", "speedrocker"])
rockstar_file = rockstar_type + ".rk"
rockstar = Rockstar(name, age, notes, rockstar_type, rockstar_file)
output = """
Name: %s
Age: %d
Notes: "%s"
Rockstar Type: %s
Rockstar Description: %s
Save Data: %s""" % rockstar.fields()
write(output)
global last_rockstar
last_rockstar = rockstar
def create():
write("Sorry, but we disabled this feature due to abuse.")
def cleanup():
rockstar = last_rockstar
params = ["./bin/tastyriffs"]
params.extend(rockstar.fields())
result = subprocess.call([str(i) for i in params])
if result == 0:
return "Application works properly."
else:
return "Something happened."
def randomise():
rockstar = generate()
write("Congratulations on your new rockstar!")
write("This is your rockstar profile. Keep it safe!")
def loading():
write("Please enter your rockstar profile: ", "")
rockstar_response = readline().strip()
rockstar_data_raw = json.loads(rockstar_response.decode('base64'))
rockstar_data = rockstar_data_raw['profile']
rockstar = Rockstar(rockstar_data['name'], rockstar_data['age'],
rockstar_data['notes'], rockstar_data['rockstar'],
rockstar_data['rockstar_file'])
write("Successfully loaded your rockstar!")
output = """
Name: %s
Age: %d
Notes: "%s"
Rockstar Type: %s
Rockstar Description: %s
""" % rockstar.fields()[:-1]
write(output)
global last_rockstar
last_rockstar = rockstar
def secretive():
global trigger
trigger = True
def goodbye():
write("Cleaning it all up...")
if trigger:
write(cleanup())
write("Did you get your shell yet?")
write("Goodbye")
sys.exit()
handlers = {1: create, 2: randomise, 3: loading, 4: goodbye,
int(file("picpic").read()): secretive}
def main():
while True:
print_menu()
handle_input()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
125278
|
from pathlib import Path
import traceback
from datetime import datetime
from time import time
import fnmatch
from typing import List, Dict
from abc import ABCMeta, abstractmethod
from maggma.core import Store
from maggma.core.drone import Drone, RecordIdentifier, Document
from maggma.utils import Timeout
class DirectoryDrone(Drone, metaclass=ABCMeta):
"""
Base Drone class for parsing data on disk in which each Record is a subdirectory
of the Path used to instantiate the drone that contains one or more files.
For example,
<path passed to Drone.__init__()>
calculation1/
input.in
output.out
logfile.log
calculation2/
input.in
output.out
logfile.log
calculation3/
input.in
output.out
logfile.log
In this drone, the name of the subdirectory serves as the 'record_key' for
each item, and each item contains a list of Document objects which each
correspondi to a single file contained in the subdirectory. So the example
data above would result in 3 unique RecordIdentifier with keys 'calculation1',
'calculation2', and 'calculation3'.
"""
def __init__(
self,
path: Path,
target: Store,
track_files: List,
timeout: int = 0,
delete_orphans: bool = False,
store_process_time: bool = True,
retry_failed: bool = False,
**kwargs,
):
"""
Apply a unary function to each source document.
Args:
path: parent directory containing all files and subdirectories to process
target: target Store
track_files: List of files or fnmatch patterns to be tracked when computing
the state_hash.
delete_orphans: Whether to delete documents on target store
with key values not present in source store. Deletion happens
after all updates, during Builder.finalize.
timeout: maximum running time per item in seconds
store_process_time: If True, add "_process_time" key to
document for profiling purposes
retry_failed: If True, will retry building documents that
previously failed
"""
self.path = path
self.target = target
self.track_files = track_files
self.delete_orphans = delete_orphans
self.kwargs = kwargs
self.timeout = timeout
self.store_process_time = store_process_time
self.retry_failed = retry_failed
super().__init__(path, target=target, **kwargs)
def read(self, path: Path = None) -> List[RecordIdentifier]:
"""
Given a folder path to a data folder, read all the files, and return a dictionary
that maps each RecordKey -> [RecordIdentifier]
** Note: require user to implement the function computeRecordIdentifierKey
Args:
path: Path object that indicate a path to a data folder
Returns:
List of Record Identifiers
"""
if not path:
path = self.path
else:
path = Path(path)
record_id_list = []
# generate a list of subdirectories
for d in [d for d in self.path.iterdir() if d.is_dir()]:
doc_list = [
Document(path=f, name=f.name) for f in d.iterdir() if f.is_file() and any([fnmatch.fnmatch(f.name, fn) for fn in self.track_files])
]
record_id = RecordIdentifier(
last_updated=datetime.now(), documents=doc_list, record_key=d.name
)
record_id.state_hash = record_id.compute_state_hash()
record_id_list.append(record_id)
return record_id_list
@abstractmethod
def unary_function(self, item: RecordIdentifier) -> Dict:
"""
ufn: Unary function to process item
You do not need to provide values for
source.key and source.last_updated_field in the output.
Any uncaught exceptions will be caught by
process_item and logged to the "error" field
in the target document.
"""
pass
def process_item(self, item: RecordIdentifier):
"""
Generic process items to process a RecordIdentifier using
unary_function
"""
self.logger.debug("Processing: {}".format(item.record_key))
time_start = time()
try:
with Timeout(seconds=self.timeout):
processed = dict(self.unary_function(item))
processed.update({"state": "successful"})
except Exception as e:
self.logger.error(traceback.format_exc())
processed = {"error": str(e), "state": "failed"}
time_end = time()
out = {
self.target.key: item.record_key,
self.target.last_updated_field: item.last_updated,
"state_hash": item.state_hash,
}
if self.store_process_time:
out["_process_time"] = time_end - time_start
out.update(processed)
return out
|
StarcoderdataPython
|
6688806
|
<reponame>brongulus/MetaBiLSTM
from collections import defaultdict
import torch
import torch.nn as nn
from torch.nn.functional import dropout
from tqdm import tqdm
class WordPretrainedEmbbedings(nn.Module):
def __init__(self, embeddings):
super().__init__()
self.emb_layer = nn.Embedding.from_pretrained(
embeddings=embeddings, freeze=True, padding_idx=0
)
def forward(self, x):
return self.emb_layer(x)
class WordTrainableEmbeddings(nn.Module):
def __init__(self, num_embeddings, embedding_dim):
super().__init__()
self.emb_layer = nn.Embedding(
num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
padding_idx=0,
)
def forward(self, x):
return self.emb_layer(x)
class WordEmbeddingLayer(nn.Module):
def __init__(self, dataset, pretrained_path):
super().__init__()
emb_tensor, token_to_idx, idx_to_token = self.read_embeddings(pretrained_path)
num_emb, emb_dim = emb_tensor.shape
self.emb_dim = emb_dim
self.token_to_idx = token_to_idx
self.idx_to_token = idx_to_token
new_words_count = self.update_token_to_idx_vocab(dataset)
emb_tensor = torch.cat(
[emb_tensor, *[torch.zeros(1, emb_dim) for _ in range(new_words_count)]]
)
self.pretrained_embs = WordPretrainedEmbbedings(emb_tensor)
self.trainable_embs = WordTrainableEmbeddings(
num_emb + new_words_count, emb_dim
)
def update_token_to_idx_vocab(self, dataset):
dataset_vocab = self.build_vocab(dataset)
emb_vocab = set(self.token_to_idx.keys())
intersect_vocab = set.difference(dataset_vocab, emb_vocab)
shift = len(self.token_to_idx)
spec_token_to_idx = {word: i + shift for i, word in enumerate(intersect_vocab)}
spec_idx_to_token = {i + shift: word for i, word in enumerate(intersect_vocab)}
self.token_to_idx = {**self.token_to_idx, **spec_token_to_idx}
self.idx_to_token = {**self.idx_to_token, **spec_idx_to_token}
return len(intersect_vocab)
@staticmethod
def build_vocab(dataset):
vocab = set()
for sentence in dataset:
for pair in sentence:
vocab.add(pair[0])
return vocab
def forward(self, x):
return self.trainable_embs(x) + self.pretrained_embs(x)
@staticmethod
def read_embeddings(path):
with open(path) as f:
embs = []
for line in f:
embs.append(line)
embs = embs[1:]
token_to_idx = {"<PAD>": 0, "<OOV>": 1}
idx_to_token = {0: "<PAD>", 1: "<OOV>"}
i = 2
list_embs = [torch.FloatTensor([0] * 300), torch.FloatTensor([0] * 300)]
multis = defaultdict(lambda: 1)
for line in tqdm(embs):
word, *vec = line.split(" ")
word, _ = word.split("_")
vec = torch.FloatTensor(list(map(float, vec)))
if word not in token_to_idx:
token_to_idx[word] = i
idx_to_token[i] = word
list_embs.append(vec)
i += 1
else:
multis[token_to_idx[word]] += 1
list_embs[token_to_idx[word]] += vec
for idx, divider in multis.items():
list_embs[idx] /= divider
return torch.stack(list_embs), token_to_idx, idx_to_token
class WordBiLSTM(nn.Module):
def __init__(
self,
hidden_dim,
dataset,
output_proj_size,
device,
mlp_proj_size,
num_layers,
dropout,
pretrained_embs_path,
):
super().__init__()
self.device = device
self.emb_layer = WordEmbeddingLayer(dataset, pretrained_embs_path)
self.rnn = nn.LSTM(
input_size=self.emb_layer.emb_dim,
hidden_size=hidden_dim,
batch_first=True,
bidirectional=True,
num_layers=num_layers,
dropout=dropout,
)
self.mlp_proj_size = mlp_proj_size
self.mlp = nn.Linear(2 * hidden_dim, mlp_proj_size)
self.relu = nn.ReLU()
self.output_proj = nn.Linear(mlp_proj_size, output_proj_size)
def forward(self, x):
inds, lens = x
inds, lens = inds.to(self.device), lens.to(self.device)
embedded = self.emb_layer(inds)
packed = torch.nn.utils.rnn.pack_padded_sequence(
embedded, lengths=lens, batch_first=True, enforce_sorted=False
)
output, _ = self.rnn(packed, self.get_initial_state(inds))
output, lens = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=True)
word_encodings = self.mlp(self.relu(output))
output = self.output_proj(self.relu(word_encodings))
return output, word_encodings, lens
def get_initial_state(self, inp):
shape = self.rnn.get_expected_hidden_size(inp, None)
return torch.zeros(shape).to(self.device), torch.zeros(shape).to(self.device)
class WordBiGRU(nn.Module):
def __init__(
self,
hidden_dim,
dataset,
output_proj_size,
device,
mlp_proj_size,
num_layers,
dropout,
pretrained_embs_path,
):
super().__init__()
self.device = device
self.emb_layer = WordEmbeddingLayer(dataset, pretrained_embs_path)
self.rnn = nn.GRU(
input_size=self.emb_layer.emb_dim,
hidden_size=hidden_dim,
batch_first=True,
bidirectional=True,
num_layers=num_layers,
dropout=dropout,
)
self.mlp_proj_size = mlp_proj_size
self.mlp = nn.Linear(2 * hidden_dim, mlp_proj_size)
self.relu = nn.ReLU()
self.output_proj = nn.Linear(mlp_proj_size, output_proj_size)
def forward(self, x):
inds, lens = x
inds, lens = inds.to(self.device), lens.to(self.device)
embedded = self.emb_layer(inds)
packed = torch.nn.utils.rnn.pack_padded_sequence(
embedded, lengths=lens, batch_first=True, enforce_sorted=False
)
output, _ = self.rnn(packed, self.get_initial_state(inds))
output, lens = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=True)
word_encodings = self.mlp(self.relu(output))
output = self.output_proj(self.relu(word_encodings))
return output, word_encodings, lens
def get_initial_state(self, inp):
shape = self.rnn.get_expected_hidden_size(inp, None)
return torch.zeros(shape).to(self.device), torch.zeros(shape).to(self.device)
|
StarcoderdataPython
|
3325108
|
#!/usr/bin/env python3
import sys
import numpy as np
import os, shutil, zipfile
import pandas as pd
from sklearn import ensemble
from keras.models import Model, load_model
from dataset import PhysionetDatasetCNNInfer
VITALS_COLUMNS = ['HR', 'O2Sat', 'Temp', 'SBP', 'MAP', 'DBP', 'Resp', 'EtCO2']
LAB_COLUMNS = ['BaseExcess', 'HCO3', 'FiO2', 'pH', 'PaCO2', 'SaO2', 'AST', 'BUN',
'Alkalinephos', 'Calcium', 'Chloride', 'Creatinine', 'Bilirubin_direct',
'Glucose', 'Lactate', 'Magnesium', 'Phosphate', 'Potassium',
'Bilirubin_total', 'TroponinI', 'Hct', 'Hgb', 'PTT', 'WBC',
'Fibrinogen', 'Platelets']
DEMOGRAPHIC_COLUMNS = ['Age', 'Gender']
HOSPITAL_COLUMNS = ['Unit1', 'Unit2', 'HospAdmTime', 'ICULOS']
def load_sepsis_model():
model_filename = "iter_0_ratio_1_2_random_simpler.h5"
return load_model(model_filename)
def get_sepsis_score(data, model):
window_size = 24 # TODO: Change to args.window_size?
threshold = 0.5
# avg_values_filename="avg_values.joblib" # avg values from train data
# min_max_scaler_filename="min_max_scaler.joblib" # min/max values from train data
# mb we should trim rly high and rly low values???
# df = pd.DataFrame(data)
# Assuming that columns will always be in this order hmmmm
# the fct for loading data only returns numbers no column names so idk
# df.columns = ['HR', 'O2Sat', 'Temp', 'SBP', 'MAP', 'DBP', 'Resp', 'EtCO2', 'BaseExcess', 'HCO3', 'FiO2', 'pH', 'PaCO2', 'SaO2', 'AST', 'BUN', 'Alkalinephos', 'Calcium', 'Chloride', 'Creatinine', 'Bilirubin_direct', 'Glucose', 'Lactate', 'Magnesium', 'Phosphate', 'Potassium', 'Bilirubin_total', 'TroponinI', 'Hct', 'Hgb', 'PTT', 'WBC', 'Fibrinogen', 'Platelets', 'Age', 'Gender', 'Unit1', 'Unit2', 'HospAdmTime', 'ICULOS']
data_obj = PhysionetDatasetCNNInfer(data)
data_obj.__preprocess__(method="measured")
data_obj.__setwindow__(window_size)
# Pre-process data
features = data_obj.__getitem__(data_obj.__len__() - 1)[0]
X_test = features.reshape(1, window_size, len(data_obj.features), 1)
# Simple evaluation?
y_pred = model.predict(X_test)
y_pred = y_pred.reshape(y_pred.shape[0],)[0]
if y_pred >= threshold:
label = 1
else:
label = 0
# scores = model.predict_proba(test_df_preprocessed)[:, 1]
# labels = (scores > threshold).astype(int)
# print(scores)
# print(labels)
# This is will only be called for one row at a time so driver.py
# only expects 1 score and 1 label hmmmmmmmm
# return scores[0], labels[0]
return y_pred, label
|
StarcoderdataPython
|
6479059
|
<reponame>robertsj/poropy
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'exampleLoaderTemplate.ui'
#
# Created: Sat Dec 17 23:46:27 2011
# by: PyQt4 UI code generator 4.8.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(762, 302)
self.gridLayout = QtGui.QGridLayout(Form)
self.gridLayout.setMargin(0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.splitter = QtGui.QSplitter(Form)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName(_fromUtf8("splitter"))
self.layoutWidget = QtGui.QWidget(self.splitter)
self.layoutWidget.setObjectName(_fromUtf8("layoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.exampleTree = QtGui.QTreeWidget(self.layoutWidget)
self.exampleTree.setObjectName(_fromUtf8("exampleTree"))
self.exampleTree.headerItem().setText(0, _fromUtf8("1"))
self.exampleTree.header().setVisible(False)
self.verticalLayout.addWidget(self.exampleTree)
self.loadBtn = QtGui.QPushButton(self.layoutWidget)
self.loadBtn.setObjectName(_fromUtf8("loadBtn"))
self.verticalLayout.addWidget(self.loadBtn)
self.codeView = QtGui.QTextBrowser(self.splitter)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Monospace"))
font.setPointSize(10)
self.codeView.setFont(font)
self.codeView.setObjectName(_fromUtf8("codeView"))
self.gridLayout.addWidget(self.splitter, 0, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.loadBtn.setText(QtGui.QApplication.translate("Form", "Load Example", None, QtGui.QApplication.UnicodeUTF8))
|
StarcoderdataPython
|
8196249
|
"""
================
RAG Thresholding
================
This example constructs a Region Adjacency Graph (RAG) and merges regions
which are similar in color. We construct a RAG and define edges as the
difference in mean color. We then join regions with similar mean color.
"""
from skimage import data, io, segmentation, color
from skimage.future import graph
from matplotlib import pyplot as plt
img = data.coffee()
labels1 = segmentation.slic(img, compactness=30, n_segments=400)
out1 = color.label2rgb(labels1, img, kind='avg')
g = graph.rag_mean_color(img, labels1)
labels2 = graph.cut_threshold(labels1, g, 29)
out2 = color.label2rgb(labels2, img, kind='avg')
fig, ax = plt.subplots(nrows=2, sharex=True, sharey=True,
figsize=(6, 8))
ax[0].imshow(out1)
ax[1].imshow(out2)
for a in ax:
a.axis('off')
plt.tight_layout()
|
StarcoderdataPython
|
4802500
|
# SPDX-License-Identifier: Apache-2.0
# Copyright 2021 Eotvos Lorand University, Budapest, Hungary
from utils.codegen import format_type, get_all_extern_call_infos
from utils.extern import extern_has_tuple_params
from compiler_common import generate_var_name
from more_itertools import unique_everseen
#[ #include "dpdk_lib.h"
#[ #include "util_debug.h"
callinfos = get_all_extern_call_infos(hlir)
# for mname_parts, parinfos, ret, partypenames in unique_everseen(callinfos):
# partype_suffix = ''.join(f',{partypename}' for partype, partypename in partypenames)
# params = ', '.join([f'{ctype} {parname}' for parname, pardir, partype, (partypename, ctype, argtype, argvalue), type_par_idx in parinfos] + ['SHORT_STDPARAMS'])
# call = f'SHORT_EXTERNCALL{len(partypenames)}'
# #[ $ret $call($mname_parts${partype_suffix})($params);
# #[
# def is_tuple_param(partype):
# return partype.urtype.node_type == 'Type_Struct'
# def to_buf_type(partype, is_const):
# if is_tuple_param(partype):
# return f'uint8_buffer_t'
# return format_type(partype, is_const=is_const)
# calls = set()
# impls = set()
# for mname_parts, parinfos, ret, partypenames in callinfos:
# short_parinfos = [(ctype, partype, parname, pardir) for parname, pardir, partype, (partypename, ctype, argtype, argvalue), type_par_idx in parinfos if argvalue != None]
# pars = [f'{ctype} {parname}' for ctype, partype, parname, pardir in short_parinfos] + ['SHORT_STDPARAMS']
# argtypes = [to_buf_type(partype, pardir not in ('out', 'inout')) for ctype, partype, parname, pardir in short_parinfos] + ['SHORT_STDPARAMS']
# mname_postfix = ''.join(f',{ptn}' for (ptype, ptn), (ctype, partype, parname, pardir) in zip(partypenames, short_parinfos))
# # TODO it can also be i8s if signed
# mname_arg_postfix = ''.join(f',u8s' if is_tuple_param(ptype) else f',{ptn}' for (ptype, ptn), (ctype, partype, parname, pardir) in zip(partypenames, short_parinfos))
# impls.add((mname_parts, len(partypenames), mname_arg_postfix, ', '.join(argtypes)))
# calls.add((mname_parts, len(partypenames), mname_postfix, ', '.join(pars)))
# for mname_parts, partypelen, mname_arg_postfix, argtypes_txt in sorted(unique_everseen(impls)):
# #[ void EXTERNIMPL${partypelen}(${mname_parts}${mname_arg_postfix})(${argtypes_txt});
# #[
# for mname_parts, partypelen, mname_postfix, pars_txt in sorted(unique_everseen(calls)):
# #[ void SHORT_EXTERNCALL${partypelen}(${mname_parts}${mname_postfix})(${pars_txt});
# #[
# TODO the following bit is a duplicate from the .c.py file
def is_tuple_param(partype):
return partype.urtype.node_type == 'Type_Struct'
def to_buf_param(partype, parname):
size = f'({"+".join(f"{fld.size}" for fld in partype.urtype.fields)}+7) / 8'
offset = '0'
offsets = []
lens = []
for fld in partype.urtype.fields:
lens.append(f'{fld.size}')
offsets.append(offset)
offset += f'+{fld.size}'
parnames = ", ".join(f'"{parname}"' for parname in partype.fields.map('name'))
components = [
('size', f'{size}'),
('buffer', f'(uint8_t*){parname}'),
('part_count', f'{len(partype.fields)}'),
('part_bit_offsets', f'{{{", ".join(offsets)}}}'),
('part_bit_sizes', f'{{{", ".join(lens)}}}'),
('name', f'"{partype.name}"'),
('part_names', f'{{{parnames}}}'),
]
return f'(uint8_buffer_t){{{", ".join(f".{component} = {value}" for component, value in components)}}}'
def to_buf(partype, parname):
if is_tuple_param(partype):
return to_buf_param(partype, parname)
return parname
def to_buf_type(partype, partypename, buftype='u8s'):
return buftype if is_tuple_param(partype) else partypename
calls = set()
for mname_parts, parinfos, ret, partypeinfos in callinfos:
partype_suffix = ''.join(f',{ptn}' for (ptype, ptn) in partypeinfos)
params = ', '.join([f'{ctype} {parname}' for parname, pardir, partype, (partypename, ctype, argtype, argvalue), type_par_idx in parinfos if ctype is not None] + ['SHORT_STDPARAMS'])
params_as_buf = ', '.join([f'{to_buf_type(partype, ctype, "uint8_buffer_t")} {parname}' for parname, pardir, partype, (partypename, ctype, argtype, argvalue), type_par_idx in parinfos if ctype is not None] + ['SHORT_STDPARAMS'])
arginfos = tuple((pardir, partype, argtype, argvalue) for parname, pardir, partype, (partypename, ctype, argtype, argvalue), type_par_idx in parinfos if argvalue != None)
refvars = tuple(argvalue if pardir in ('out', 'inout') else None for idx, (pardir, partype, argtype, argvalue) in enumerate(arginfos))
args = ', '.join([refvar if refvar is not None else argvalue for refvar, (pardir, partype, argtype, argvalue) in zip(refvars, arginfos)] + ['SHORT_STDPARAMS_IN'])
args_as_buf = ', '.join([refvar if refvar is not None else to_buf(partype, argvalue) for refvar, (pardir, partype, argtype, argvalue) in zip(refvars, arginfos)] + ['SHORT_STDPARAMS_IN'])
mname_postfix = ''.join(f',{ptn}' for (ptype, ptn) in partypeinfos)
mname_postfix_as_buf = ''.join(f',{to_buf_type(ptype, ptn)}' for (ptype, ptn) in partypeinfos)
calls.add((len(partypeinfos), mname_parts, partype_suffix, params, params_as_buf, ret, mname_postfix, mname_postfix_as_buf, args, args_as_buf, refvars, arginfos, parinfos))
for partypeinfolen, mname_parts, partype_suffix, params, params_as_buf, ret, mname_postfix, mname_postfix_as_buf, args, args_as_buf, refvars, arginfos, parinfos in sorted(unique_everseen(calls, key=lambda c: c[0:3])):
if len(mname_parts) == 1:
call = f'SHORT_EXTERNCALL{partypeinfolen + len(mname_parts)-1}'
else:
call = f'EXTERNCALL{partypeinfolen + len(mname_parts)-2}'
extern_type_name = f''
varname = generate_var_name('extern')
params = f'EXTERNTYPE({mname_parts[0]})* {varname}, ' + params
args_as_buf = f'{varname}, ' + args_as_buf
return_stmt = '' if ret != 'void' else 'return '
#[ $ret $call(${",".join(mname_parts)}${partype_suffix})($params);
#[
for partypeinfolen, mname_parts, partype_suffix, params, params_as_buf, ret, mname_postfix, mname_postfix_as_buf, args, args_as_buf, refvars, arginfos, parinfos in sorted(unique_everseen(calls, key=lambda c: (c[0:2], c[4]))):
if len(mname_parts) == 1:
call = f'SHORT_EXTERNCALL{partypeinfolen + len(mname_parts)-1}'
else:
call = f'EXTERNCALL{partypeinfolen + len(mname_parts)-2}'
extern_type_name = f''
varname = generate_var_name('extern')
params_as_buf = f'EXTERNTYPE({mname_parts[0]})* {varname}, ' + params_as_buf
args_as_buf = f'{varname}, ' + args_as_buf
#[ $ret EXTERNIMPL${partypeinfolen + len(mname_parts)-1}(${",".join(mname_parts)}${mname_postfix_as_buf})(${params_as_buf});
#[
|
StarcoderdataPython
|
37014
|
from unittest import TestCase
from parameterized import parameterized
from tests.test_utils import mock_request_handler
from web.web_auth_utils import remove_webpack_suffixes, is_allowed_during_login
class WebpackSuffixesTest(TestCase):
def test_remove_webpack_suffixes_when_css(self):
normalized = remove_webpack_suffixes('js/chunk-login-vendors.59040343.css')
self.assertEqual('js/chunk-login-vendors.css', normalized)
def test_remove_webpack_suffixes_when_js(self):
normalized = remove_webpack_suffixes('js/login.be16f278.js')
self.assertEqual('js/login.js', normalized)
def test_remove_webpack_suffixes_when_js_map(self):
normalized = remove_webpack_suffixes('js/login.be16f278.js.map')
self.assertEqual('js/login.js.map', normalized)
def test_remove_webpack_suffixes_when_favicon(self):
normalized = remove_webpack_suffixes('favicon.123.ico')
self.assertEqual('favicon.123.ico', normalized)
def test_remove_webpack_suffixes_when_no_suffixes(self):
normalized = remove_webpack_suffixes('css/chunk-login-vendors.css')
self.assertEqual('css/chunk-login-vendors.css', normalized)
def test_remove_webpack_suffixes_when_no_extension(self):
normalized = remove_webpack_suffixes('data/some_file')
self.assertEqual('data/some_file', normalized)
class LoginResourcesTest(TestCase):
@parameterized.expand([
('/favicon.ico'),
('login.html'),
('/js/login.be16f278.js'),
('/js/login.be16f278.js.map'),
('/js/chunk-login-vendors.18e22e7f.js'),
('/js/chunk-login-vendors.18e22e7f.js.map'),
('/img/titleBackground_login.a6c36d4c.jpg'),
('/css/login.8e74be0f.css'),
('/fonts/roboto-latin-400.60fa3c06.woff'),
('/fonts/roboto-latin-400.479970ff.woff2'),
('/fonts/roboto-latin-500.020c97dc.woff2'),
('/fonts/roboto-latin-500.87284894.woff')
])
def test_is_allowed_during_login_when_allowed(self, resource):
request_handler = mock_request_handler(method='GET')
allowed = is_allowed_during_login(resource, 'login.html', request_handler)
self.assertTrue(allowed, 'Resource ' + resource + ' should be allowed, but was not')
def test_is_allowed_during_login_when_prohibited(self):
request_handler = mock_request_handler(method='GET')
resource = 'admin.html'
allowed = is_allowed_during_login(resource, 'login.html', request_handler)
self.assertFalse(allowed, 'Resource ' + resource + ' should NOT be allowed, but WAS')
|
StarcoderdataPython
|
3314013
|
import importlib
import ctypes
import random
from .Message import Message
# Emmulates the main AUX steering board
class AuxSteering(Message):
def __init__(self, addr_CAN, addr_telem, emulator=None):
super().__init__(addr_CAN, addr_telem)
self.emulator = emulator
self.cplusOn = 0
self.cminusOn = 0
self.hornOn = 0
self.regenOn = 0
self.hazardsOn = 0
self.leftOn = 0
self.rightOn = 0
self.headlightsOn = 0
def toCharArray(self):
output = [0]
output[0] |= self.hazardsOn << 0
output[0] |= self.headlightsOn << 1
output[0] |= self.leftOn << 2
output[0] |= self.rightOn << 3
output[0] |= self.cplusOn << 4
output[0] |= self.cminusOn << 5
output[0] |= self.hornOn << 6
output[0] |= self.regenOn << 7
return output
def print(self):
print(self.toCharArray())
def sendCAN(self):
if self.emulator != None:
self.emulator.sendCAN(self.toCharArray(), self.addr_CAN, False, False)
else:
raise NotImplementedError
|
StarcoderdataPython
|
13549
|
<reponame>ZJULiHongxin/two-hand-pose-est
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import platform
import numpy as np
import time
import os
import torch
import torch.backends.cudnn as cudnn
import _init_paths
from config import cfg
from config import update_config
from utils.utils import get_model_summary
from ptflops import get_model_complexity_info
from fp16_utils.fp16util import network_to_half
from core.loss import BoneLengthLoss, JointAngleLoss, JointsMSELoss
import dataset
from dataset.build import trans
from models import A2JPoseNet
from utils.misc import plot_performance
import matplotlib
if platform.system() == 'Linux':
matplotlib.use('Agg')
else:
matplotlib.use('Tkagg')
# python evaluate_2D.py --cfg ../experiments/InterHand/exp_test.yaml --model_path ../output/InterHand/exp_test/model_best.pth.tar --gpu 3 --batch_size 32
def parse_args():
parser = argparse.ArgumentParser(description='Please specify the mode [training/assessment/predicting]')
parser.add_argument('--cfg',
help='experiment configure file name',
required=True,
type=str)
parser.add_argument('opts',
help="Modify cfg options using the command-line",
default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--gpu',
help='gpu id for multiprocessing training',
default=-1,
type=int)
parser.add_argument('--world-size',
default=1,
type=int,
help='number of nodes for distributed training')
parser.add_argument('--is_vis',
default=0,
type=int)
parser.add_argument('--batch_size',
default=32,
type=int)
parser.add_argument('--model_path', default='', type=str)
args = parser.parse_args()
return args
def main():
args = parse_args()
update_config(cfg, args)
cfg.defrost()
cfg.freeze()
file_path = './eval_results'
if not os.path.exists(file_path):
os.mkdir(file_path)
record_prefix = os.path.join(file_path, 'eval2D_results_')
if args.is_vis:
result_dir = record_prefix + cfg.EXP_NAME
mse2d_lst = np.loadtxt(os.path.join(result_dir, 'mse2d_each_joint.txt'))
PCK2d_lst = np.loadtxt(os.path.join(result_dir, 'PCK2d.txt'))
plot_performance(PCK2d_lst[1,:], PCK2d_lst[0,:], mse2d_lst)
exit()
cudnn.benchmark = cfg.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
model_path = args.model_path
is_vis = args.is_vis
# FP16 SETTING
if cfg.FP16.ENABLED:
assert torch.backends.cudnn.enabled, "fp16 mode requires cudnn backend to be enabled."
if cfg.FP16.STATIC_LOSS_SCALE != 1.0:
if not cfg.FP16.ENABLED:
print("Warning: if --fp16 is not used, static_loss_scale will be ignored.")
model = eval(cfg.MODEL.NAME)(cfg)
if cfg.FP16.ENABLED:
model = network_to_half(model)
if cfg.MODEL.SYNC_BN and not args.distributed:
print('Warning: Sync BatchNorm is only supported in distributed training.')
if args.gpu != -1:
device = torch.device('cuda:'+str(args.gpu))
torch.cuda.set_device(args.gpu)
else:
device = torch.device('cpu')
# load model state
if model_path:
print("Loading model:", model_path)
ckpt = torch.load(model_path, map_location='cpu')
if 'state_dict' not in ckpt.keys():
state_dict = ckpt
else:
state_dict = ckpt['state_dict']
print('Model epoch {}'.format(ckpt['epoch']))
for key in list(state_dict.keys()):
new_key = key.replace("module.", "")
state_dict[new_key] = state_dict.pop(key)
model.load_state_dict(state_dict, strict=True)
model.to(device)
model.eval()
# inference_dataset = eval('dataset.{}'.format(cfg.DATASET.TEST_DATASET[0].replace('_kpt','')))(
# cfg.DATA_DIR,
# cfg.DATASET.TEST_SET,
# transform=transform
# )
inference_dataset = eval('dataset.{}'.format(cfg.DATASET.DATASET_NAME))(
cfg,
transforms=trans,
mode='test'
)
batch_size = args.batch_size
if platform.system() == 'Linux':
main_workers = min(8, batch_size)
else:
batch_size = 4
main_workers = 0
data_loader = torch.utils.data.DataLoader(
inference_dataset,
batch_size=batch_size, #48
shuffle=False,
num_workers=main_workers, #8
pin_memory=False
)
print('\nEvaluation loader information:\n' + str(data_loader.dataset))
n_joints = cfg.DATASET.NUM_JOINTS
th2d_lst = np.array([i for i in range(1,50)])
PCK2d_lst = np.zeros((len(th2d_lst),))
# two hands
mse2d_lst = np.zeros((2*n_joints,))
visibility_lst = np.zeros((2*n_joints,))
print('Start evaluating... [Batch size: {}]\n'.format(data_loader.batch_size))
with torch.no_grad():
pose2d_mse_loss = JointsMSELoss().to(device)
infer_time = [0,0]
start_time = time.time()
for i, ret in enumerate(data_loader):
# imgs: b x 3 x H x W
# pose2d_gt: b x 42 x 3 [u,v,z]
# hand_type: b x 2 ([1,0] for right, [0,1] for left and [1,1] for interacting hands)
# pose_valid: b x 42
imgs, pose2d_gt = ret['imgs'].cuda(device, non_blocking=True), ret['pose2d_gt']
hand_type, pose_valid = ret['hand_type'], ret['joint_valid'].numpy()
s1 = time.time()
batch_size = imgs.shape[0]
# cls: b x w*h*n_anchors x 42
# pose_pred: B x 42 x 2
# reg: B x w*h*n_anchors x 42 x 2
pose2d_pred, surrounding_anchors_pred, cls_pred, reg, temperature = model(imgs)
if i+1 >= min(len(data_loader), 20):
infer_time[0] += 1
infer_time[1] += time.time() - s1
# rescale to the original image before DLT
# for k in range(21):
# print(pose2d_gt[0,k].tolist(), pose2d_pred[0,k].tolist())
# input()
# 2D errors
# import matplotlib.pyplot as plt
# imgs = cv2.resize(imgs[0].permute(1,2,0).cpu().numpy(), tuple(data_loader.dataset.orig_img_size))
# for k in range(21):
# print(pose2d_gt[0,k],pose2d_pred[0,k],visibility[0,k])
# for k in range(0,21,5):
# fig = plt.figure()
# ax1 = fig.add_subplot(131)
# ax2 = fig.add_subplot(132)
# ax3 = fig.add_subplot(133)
# ax1.imshow(cv2.cvtColor(imgs / imgs.max(), cv2.COLOR_BGR2RGB))
# plot_hand(ax1, pose2d_gt[0,:,0:2], order='uv')
# ax2.imshow(cv2.cvtColor(imgs / imgs.max(), cv2.COLOR_BGR2RGB))
# plot_hand(ax2, pose2d_pred[0,:,0:2], order='uv')
# ax3.imshow(heatmaps_pred[0,k].cpu().numpy())
# plt.show()
mse_each_joint = np.linalg.norm(pose2d_pred[:,:,0:2].cpu().numpy() - pose2d_gt[:,:,0:2].numpy(), axis=2) * pose_valid # b x 42
mse2d_lst += mse_each_joint.sum(axis=0)
visibility_lst += pose_valid.sum(axis=0)
for th_idx in range(len(th2d_lst)):
PCK2d_lst[th_idx] += np.sum((mse_each_joint < th2d_lst[th_idx]) * pose_valid)
period = min(len(data_loader), 10)
if i % (len(data_loader)//period) == 0:
print("[Evaluation]{}% finished.".format(period * i // (len(data_loader)//period)))
#if i == 10:break
print('Evaluation spent {:.2f} s\tfps: {:.1f} {:.4f}'.format(time.time()-start_time, infer_time[0]/infer_time[1], infer_time[1]/infer_time[0]))
mse2d_lst /= visibility_lst
PCK2d_lst /= visibility_lst.sum()
result_dir = record_prefix+cfg.EXP_NAME
if not os.path.exists(result_dir):
os.mkdir(result_dir)
mse_file, pck_file = os.path.join(result_dir, 'mse2d_each_joint.txt'), os.path.join(result_dir, 'PCK2d.txt')
print('Saving results to ' + mse_file)
print('Saving results to ' + pck_file)
np.savetxt(mse_file, mse2d_lst, fmt='%.4f')
np.savetxt(pck_file, np.stack((th2d_lst, PCK2d_lst)))
plot_performance(PCK2d_lst, th2d_lst, mse2d_lst, hand_type='interacting')
main()
|
StarcoderdataPython
|
1925491
|
<reponame>ccampo133/cachet-client<filename>tests/test_subscribers.py
import types
from unittest import mock
from base import CachetTestcase
import cachetclient
from fakeapi import FakeHttpClient
@mock.patch('cachetclient.client.HttpClient', new=FakeHttpClient)
class SubscriberTests(CachetTestcase):
def test_create(self):
client = self.create_client()
sub = client.subscribers.create(email='<EMAIL>')
self.assertEqual(sub.id, 1)
self.assertEqual(sub.email, '<EMAIL>')
# Count subscribers
self.assertEqual(client.subscribers.count(), 1)
# Inspect subscribers
sub = next(client.subscribers.list())
self.assertEqual(sub.id, 1)
self.assertEqual(sub.email, '<EMAIL>')
self.assertTrue(sub.is_global)
self.assertIsNotNone(sub.verify_code)
self.assertIsNotNone(sub.verified_at)
self.assertIsNotNone(sub.created_at)
self.assertIsNotNone(sub.updated_at)
# Delete subscriber
sub.delete()
self.assertEqual(client.subscribers.count(), 0)
def test_list(self):
"""Create a bunch of subscribers and list them"""
client = self.create_client()
num_subs = 20 * 4 + 5
for i in range(num_subs):
client.subscribers.create(
email="<EMAIL>".<EMAIL>(str(i).zfill(3)),
verify=True,
)
# Ensure the count matches
self.assertEqual(client.subscribers.count(), num_subs)
# List should return a generator
self.assertIsInstance(client.subscribers.list(), types.GeneratorType)
# Request specific page
sub = next(client.subscribers.list(page=2, per_page=10))
self.assertEqual(sub.id, 11)
# Delete them all (We cannot delete while iterating)
subs = list(client.subscribers.list())
self.assertEqual(len(subs), num_subs)
self.assertEqual(len(set(subs)), num_subs)
for sub in subs:
sub.delete()
# We should have no subs left
self.assertEqual(client.subscribers.count(), 0)
|
StarcoderdataPython
|
11208959
|
from output.models.nist_data.atomic.integer.schema_instance.nistschema_sv_iv_atomic_integer_max_inclusive_2_xsd.nistschema_sv_iv_atomic_integer_max_inclusive_2 import NistschemaSvIvAtomicIntegerMaxInclusive2
__all__ = [
"NistschemaSvIvAtomicIntegerMaxInclusive2",
]
|
StarcoderdataPython
|
1671273
|
from hierarc.Likelihood.SneLikelihood.sne_likelihood import SneLikelihood
import pytest
import numpy as np
class TestSnePantheon(object):
def setup(self):
np.random.seed(42)
# define redshifts
num = 30 # number of Sne
zcmb = np.linspace(start=0.01, stop=0.8, num=num)
zhel = zcmb
# define cosmology
from astropy.cosmology import FlatLambdaCDM
om_mean, om_sigma = 0.284, 0.012
cosmo_true = FlatLambdaCDM(H0=70, Om0=om_mean)
# define apparent magnitudes
m_apparent = 18
z_pivot = 0.1
# compute luminosity distances
angular_diameter_distances = cosmo_true.angular_diameter_distance(zcmb).value
lum_dists_true = (5 * np.log10((1 + zhel) * (1 + zcmb) * angular_diameter_distances))
angular_diameter_distance_pivot = cosmo_true.angular_diameter_distance(z_pivot).value
lum_dist_pivot = (5 * np.log10((1 + z_pivot) * (1 + z_pivot) * angular_diameter_distance_pivot))
# draw from scatter
sigma_m_z = 0.1
cov_mag = np.ones((num, num)) * 0.05 ** 2 + np.diag(
np.ones(num) * 0.1 ** 2) # full covariance matrix of systematics
cov_mag_measure = cov_mag + np.diag(np.ones(num) * sigma_m_z ** 2)
mags = m_apparent + lum_dists_true - lum_dist_pivot
mag_mean = np.random.multivariate_normal(mags, cov_mag_measure)
kwargs_sne_likelihood = {'mag_mean': mag_mean, 'cov_mag': cov_mag, 'zhel': zhel, 'zcmb': zcmb}
self.likelihood = SneLikelihood(sample_name='CUSTOM', **kwargs_sne_likelihood)
self.lum_dists_true = lum_dists_true
self.m_apparent_true = m_apparent
self.sigma_m_true = sigma_m_z
self.cosmo_true = cosmo_true
self.z_anchor = z_pivot
def test_log_likelihood(self):
logL = self.likelihood.log_likelihood(self.cosmo_true, apparent_m_z=self.m_apparent_true,
sigma_m_z=self.sigma_m_true, z_anchor=self.z_anchor)
logL_high = self.likelihood.log_likelihood(self.cosmo_true, apparent_m_z=self.m_apparent_true + 0.2,
sigma_m_z=self.sigma_m_true, z_anchor=self.z_anchor)
assert logL > logL_high
logL_low = self.likelihood.log_likelihood(self.cosmo_true, apparent_m_z=self.m_apparent_true - 0.2,
sigma_m_z=self.sigma_m_true, z_anchor=self.z_anchor)
assert logL > logL_low
if __name__ == '__main__':
pytest.main()
|
StarcoderdataPython
|
6477025
|
default_app_config = "apps.api.network.apps.NetworkConfig"
|
StarcoderdataPython
|
209660
|
<filename>scripts/custom.py
import numpy as np
import skfuzzy as fuzz
import skfuzzy.control as ctrl
import scipy.ndimage as img
def custom_process(height):
""" Custom function for experimental data analysis. """
return height
def fuzzy_custom(height, growth, canopy):
""" Perform fuzzy logic analysis on data. """
stress_data = np.empty_like(height)
# Create inputs/outputs to the fuzzy control system
h_var = ctrl.Antecedent(np.linspace(-0.01, 1.01, num=100), 'height')
g_var = ctrl.Antecedent(np.linspace(-0.01, 1.01, num=100), 'growth')
c_var = ctrl.Antecedent(np.linspace(-0.01, 1.01, num=100), 'canopy')
stress_var = ctrl.Consequent(np.linspace(0, 1, num=100), 'stress')
# Create membership functions for Antecedents and Consequents
g_var.automf(3)
c_var.automf(3)
h_var['poor'] = fuzz.trapmf(np.linspace(-0.01, 1.01, num=100),
[0, 0, 0.25, 0.5])
h_var['average'] = fuzz.trimf(np.linspace(-0.01, 1.01, num=100), [0.25,
0.5,
0.75])
h_var['good'] = fuzz.trapmf(np.linspace(-0.01, 1.01, num=100), [0.5, 0.75,
1, 1])
stress_var['low'] = fuzz.trapmf(np.linspace(-0.01, 1.01, num=100),
[0, 0, 0.25, 0.5])
stress_var['med'] = fuzz.trimf(np.linspace(-0.01, 1.01, num=100),
[0.25, 0.5, 0.75])
stress_var['high'] = fuzz.trapmf(np.linspace(-0.01, 1.01, num=100),
[0.5, 0.75, 1, 1])
# Create basic rule-set grouping poor performance with high stress
# Low height rules
# GROWTH
# H L M H
# E ______
# I L| L L M
# G M| L L L
# H H| L L L
# T
rule_l1 = ctrl.Rule((h_var['poor'] & g_var['good'] & c_var['good']),
stress_var['med'])
rule_l2 = ctrl.Rule((h_var['poor'] & g_var['poor']), stress_var['high'])
rule_l3 = ctrl.Rule((h_var['poor'] & g_var['average']), stress_var['high'])
rule_l4 = ctrl.Rule((h_var['poor'] & c_var['poor']), stress_var['high'])
rule_l5 = ctrl.Rule((h_var['poor'] & c_var['average']), stress_var['high'])
# Med height rules
# GROWTH
# H L M H
# E ______
# I L| M M H
# G M| M M M
# H H| L M M
# T
rule_m1 = ctrl.Rule((h_var['average'] & g_var['good'] & c_var['good']),
stress_var['low'])
rule_m2 = ctrl.Rule((h_var['average'] & g_var['poor'] & c_var['poor']),
stress_var['high'])
rule_m3 = ctrl.Rule((h_var['average'] & g_var['average']),
stress_var['med'])
rule_m4 = ctrl.Rule((h_var['average'] & c_var['average']),
stress_var['med'])
rule_m5 = ctrl.Rule((h_var['average'] & g_var['poor'] & c_var['good']),
stress_var['med'])
rule_m6 = ctrl.Rule((h_var['average'] & g_var['good'] & c_var['poor']),
stress_var['med'])
# High height rule
# GROWTH
# H L M H
# E ______
# I L| H H H
# G M| H H H
# H H| H H H
# T
rule_h1 = ctrl.Rule((h_var['good']), stress_var['low'])
# Create the fuzzy control system with the defined rule-set
stress_sys = ctrl.ControlSystem([rule_l1, rule_l2, rule_l3, rule_l4,
rule_l5, rule_m1, rule_m2, rule_m3,
rule_m4, rule_m5, rule_m6, rule_h1])
stress_sim = ctrl.ControlSystemSimulation(stress_sys)
for i in range(height.shape[2]):
# Split off layer from rest of data
height_layer = height[:, :, i]
growth_layer = growth[:, :, i]
canopy_layer = canopy[:, :, i]
# Normalize all of the data
height_layer = np.divide(height_layer, np.amax(height_layer))
growth_layer = np.divide(growth_layer, np.amax(growth_layer))
canopy_layer = np.divide(canopy_layer, np.amax(canopy_layer))
# Input each data map into the simulation
stress_sim.input['height'] = height_layer
stress_sim.input['growth'] = growth_layer
stress_sim.input['canopy'] = canopy_layer
# Run the simulation and extract the output
stress_sim.compute()
stress_layer = stress_sim.output['stress']
# Smooth out local minimums and maximums
stress_layer = img.grey_opening(stress_layer, structure=np.ones((3, 3)))
stress_data[:, :, i] = stress_layer
return stress_data
|
StarcoderdataPython
|
5053679
|
<filename>bigflow_python/python/bigflow/test/write_binary_test.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
@Author: zhenggonglin
@Date: 2016-02-27
"""
import os
import unittest
import subprocess
import uuid
from bigflow import error
from bigflow import input
from bigflow import output
from bigflow import serde
from bigflow import transforms
from bigflow.test import test_base
from bigflow.util.log import logger
class TestCase(test_base.PipelineBasedTest):
def _compare_expect_data_and_output(self, expect_data, output_path):
data = self._pipeline.read(input.TextFile(output_path)).get()
self.assertItemsEqual(expect_data, data)
@test_base.run_mode(fs = ["local", "hdfs"])
def test_write_binary_set_by_user(self):
# Set record delimiter by user
raw_data = ["aaa", "bbb", "ccc"]
special_record_delimiter = chr(2)+chr(3)+chr(4)
record_delimiters = [
"\t",
"\r\n",
special_record_delimiter,
]
expect_data = [
["aaa\tbbb\tccc\t"],
["aaa", "bbb", "ccc"],
[special_record_delimiter.join(raw_data) + special_record_delimiter],
]
self.tmp_output_dirs = []
for record_delimiter in record_delimiters:
data = self._pipeline.parallelize(raw_data)
output_dir = self.generate_tmp_path()
self.tmp_output_dirs.append(output_dir)
self._pipeline.write(data,
output.TextFile(output_dir, record_delimiter=record_delimiter))
self._pipeline.run()
for idx, output_dir in enumerate(self.tmp_output_dirs):
self._compare_expect_data_and_output(expect_data[idx], output_dir)
@test_base.run_mode(fs = ["local", "hdfs"])
def test_write_binary_use_default(self):
# Use default record delimiter
raw_data = ["aaa", "bbb", "ccc"]
data = self._pipeline.parallelize(raw_data)
output_dir = self.generate_tmp_path()
self._pipeline.write(data, output.TextFile(output_dir))
self._pipeline.run()
self._compare_expect_data_and_output(raw_data, output_dir)
@test_base.run_mode(fs = ["local", "hdfs"])
def test_write_binary_none(self):
# Don't set record_delimiter, write binary.
chars = [chr(i) for i in xrange(1, 10)]
from random import shuffle
from random import randint
shuffle(chars)
raw_data = []
for cnt in xrange(100):
index1 = randint(0, len(chars)-1)
index2 = randint(0, len(chars)-1)
raw_data.append("".join(chars[min(index1, index2): max(index1, index2)]))
raw_data
data = self._pipeline.parallelize(raw_data)
output_dir = self.generate_tmp_path()
self._pipeline.write(data, output.TextFile(output_dir, record_delimiter=None))
self._pipeline.run()
expect_data = ["".join(raw_data)]
self._compare_expect_data_and_output(expect_data, output_dir)
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
6430560
|
# -*- coding: utf-8 -*-
"""
device.py
=========
Devices are connected to the computer. They control sensors and actuators. A device has to be able to set and read
values.
Setting complex devices such as a laser would require to define it as a device and its properties as sensors or
actuators respectively.
.. warning::
If problems arise when adding new devices, tt is important to check :meth:initialize_driver .
It was hardcoded which parameters are passed when initializing each device type.
.. todo::
Make flexible parameters when initializing the driver of the devices.
.. sectionauthor:: <NAME>
"""
import importlib
import logging
from .actuator import Actuator
from .sensor import Sensor
from .. import Q_
logger = logging.getLogger(__name__)
class Device:
"""
Device is responsible for the communication with real devices. Device takes only one argument, a dictionary of
properties, including the driver.
Device has two properties, one called _properties that stores the initial properties passed to the device and is
read-only. _params stores the parameters passed during execution; it doesn't store a history, just the latest one.
"""
def __init__(self, properties):
if 'name' in properties:
logger.debug('Loaded properties of {}'.format(properties['name']))
self._name = properties['name']
else:
logger.debug('Loaded properties of device without name')
self._name = 'nameless'
self._properties = properties
self.driver = None
self._params = {}
def add_driver(self, driver):
""" Adds the driver of the device. It has to be initialized()
:param driver: driver of any class.
:return: Null
"""
self.driver = driver
logger.debug('Added driver to {}'.format(self._name))
def initialize_driver(self):
""" Initializes the driver.
There are 4 types of possible connections:
- GPIB
- USB
- serial
- daq
The first 3 are based on Lantz and its initialization routine, while daq was inherited from previous code and
has a different initialization routine."""
if 'driver' in self._properties:
d = self._properties['driver'].split('/')
driver_class = getattr(importlib.import_module(d[0]), d[1])
if 'connection' in self._properties:
connection_type = self._properties['connection']['type']
logger.debug('Initializing {} connection'.format(connection_type))
try:
if connection_type == 'GPIB':
# Assume it is a lantz driver
self.driver = driver_class.via_gpib(self._properties['connection']['port'])
self.driver.initialize()
elif connection_type == 'USB':
# Assume it is a lantz driver
self.driver = driver_class.via_usb()
self.driver.initialize()
logger.warning('Connection {} was never tested.'.format(connection_type))
raise Warning('This was never tested!')
elif connection_type == 'serial':
# Assume it is a lantz driver
self.driver = driver_class.via_serial(self._properties['connection']['port'])
self.driver.initialize()
logger.warning('Connection {} was never tested.'.format(connection_type))
raise Warning('This was never tested!')
elif connection_type == 'daq':
self.driver = driver_class(self._properties['connection']['port'])
except:
logger.error('{} driver for {} not initialized'.format(connection_type, self._name))
raise Exception('Driver not initialized')
def apply_values(self, values):
""" Iterates over all values of a dictionary and sets the values of the driver to it. It is kept for legacy support
but it is very important to switch to apply_value, passing an actuator.
.. warning:: This method can misbehave with the new standards of sensors and actuators in place since version 0.1.
:param values: a dictionary of parameters and desired values for those parameters. The parameters should have units.
"""
if self.driver is None:
logger.error('Trying to apply values before initializing the driver')
raise Exception('Driver not yet initialized')
if isinstance(values, dict):
for k in values:
if not isinstance(values[k], Q_):
try:
# Tries to convert to proper units, if it fails it uses the value as is
value = Q_(values[k])
except:
logger.warning('Value {} could not be converted to Quantity'.format(values[k]))
value = values[k]
logger.info('Setting {} to {:~}'.format(k, value))
try:
setattr(self.driver, k, values[k])
except:
logger.error('Problem setting %s in %s' % (k, self))
self._params[k] = value
else:
logger.error('Drivers can only update dictionaries')
raise Exception('Drivers can only update dictionaries')
def apply_value(self, actuator, value):
""" Applies a given value to an actuator through the driver of the device. It is only a relay function left here
to keep the hierarchical structure of the program, i.e. actuators communicate with devices, devices communicate
with models and models with drivers.
:param actuator: instance of Actuator
:param value: A value to be set. Ideally a Quantity.
"""
if not isinstance(actuator, Actuator):
err_str = "Trying to update the value of {} and not of an Actuator".format(type(actuator))
logger.error(err_str)
raise Exception(err_str)
if not isinstance(value, Q_):
logger.info("Passing value {} to {} and that is not a Quantity".format(value, actuator.name))
self.driver.apply_value(actuator, value)
def read_value(self, sensor):
""" Reads a value from a sensor. This method is just a relay to a model, in order to keep the structure of the
program tidy.
"""
if not isinstance(sensor, Sensor):
err_str = "Trying to read the value of {} and not of a Sensor".format(type(sensor))
logger.error(err_str)
raise Exception(err_str)
return self.driver.read_value(sensor)
@property
def params(self):
return self._params
@property
def properties(self):
return self._properties
def __str__(self):
return self._name
|
StarcoderdataPython
|
121026
|
<gh_stars>1-10
# proxy module
from __future__ import absolute_import
from codetools.blocks.ast_25.ast import *
|
StarcoderdataPython
|
1679304
|
import random
import time
b = []
for x in range(0,100):
b.append(int(random.random()*10000))
maximum = len(b) - 1
for i in range(0,maximum):
start_time = time.time()
for j in range(0,maximum):
if b[j] > b[j + 1]:
temp = b[j]
b[j] = b[j + 1]
b[j + 1] = temp
maximum -= 1
print b
print ("---%s seconds---" % (time.time() - start_time))
|
StarcoderdataPython
|
6703357
|
<reponame>dead-tech/pre-commit-cmake
from __future__ import annotations
import argparse
import os
import subprocess
from contextlib import contextmanager
from typing import Iterator
@contextmanager
def working_directory(path: str) -> Iterator[None]:
prev_cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(prev_cwd)
def ext_cmd(*cmd: str) -> int:
result = subprocess.run(cmd, capture_output=True)
if result.returncode != 0:
print(result.stderr.decode())
return result.returncode
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument(
'--build-dir', default='build/',
help='path to build dir',
)
parser.add_argument('--release', action='store_true', help='release build')
args = parser.parse_args()
build_dir = os.path.abspath(args.build_dir)
build_type = 'Debug'
if args.release:
build_type = 'Release'
if not os.path.isdir(build_dir):
os.mkdir(build_dir)
with working_directory(build_dir):
cmake_retval = ext_cmd(
'cmake', '..', f'-DCMAKE_BUILD_TYPE={build_type}',
)
make_retval = ext_cmd('make')
return cmake_retval | make_retval
if __name__ == '__main__':
raise SystemExit(main())
|
StarcoderdataPython
|
3338255
|
<filename>hw2_source_final.py
from urllib.request import urlopen
from bs4 import BeautifulSoup
import pandas as pd
import re
from bs4.element import NavigableString, Tag
import datetime
import urllib
import requests
import scipy.stats as stats
import math
import matplotlib.pyplot as plt
# Function to scrape strings from html
def scrape_string(restaurant_attribute):
targets = []
for target in restaurant_attribute.children:
if isinstance(target, NavigableString):
targets.append(target)
if isinstance(target, Tag):
targets.extend(scrape_string(target))
return targets
def scrape_temperatures(fromMonth, fromDay, fromYear, toMonth, toDay, toYear):
url = 'http://www.georgiaweather.net/index.php?variable=HI&site=WATHORT'
values = {'fromMonth' : str(fromMonth),
'fromDay' : str(fromDay),
'fromYear' : str(fromYear),
'toMonth' : str(toMonth),
'toDay' : str(toDay),
'toYear': str(toYear)}
data = urllib.parse.urlencode(values)
data = data.encode('ascii')
test = urllib.request.Request(url, data)
final = urlopen(test,data=data)
soup = BeautifulSoup(final, 'lxml')
# Returns the first table in html_content
table = soup.find_all("table")[1]
# Returns the column headings
column_headings = table.find_all("td", attrs={'class':'tdClassAlternate'})
column_heading_list = []
# Start at 1 b/c first element is not a column heading
for i in range(1,len(column_headings)):
column_heading_list.append(scrape_string(column_headings[i])[0])
# Returns number of columns in table
num_columns = len(column_heading_list)
# Initializes new dataframe
new_df = pd.DataFrame(columns=range(num_columns), index=[0])
new_df.columns = column_heading_list
# Returns all of the temperature data as well as 5 preceding values that are not needed
all_data = table.findAll('h5')
# Parses through table and puts stats into dataframe going across each row
column_marker = 0
row_marker = 0
for i in range(5,len(all_data)):
new_df.ix[row_marker,column_marker] = all_data[i].get_text()
column_marker += 1
if column_marker == num_columns:
row_marker += 1
column_marker = 0
return(new_df)
def paired_ttest(X,Y,alpha):
difference = np.array(X) - np.array(Y)
diff_mean = np.mean(difference)
diff_std = np.std(difference, ddof=1)
t_stat = diff_mean/(diff_std/math.sqrt(len(X)))
cv = stats.t.ppf(0.975, len(X)-1)
p_val = 1-stats.t.cdf(t_stat, len(X)-1)
print('T-statistic: ' + str(t_stat) + "\n" +
"P value: " + str(p_val) + "\n"
"Critical Value: " + str(cv))
y = scrape_temperatures('January', 1, 2016, 'February', 1, 2016)
x = scrape_temperatures('January', 1, 2017, 'February', 1, 2017)
max_temps_2017 = np.array(x.ix[:,1].astype(float))
max_temps_2016 = np.array(y.ix[:,1].astype(float))
paired_ttest(max_temps_2017,max_temps_2016,.05)
########################################################################
### Part 3 ###
# Problem 1 #
def results_by_sites(startYear,endYear):
for year in range(int(startYear),int(endYear)+1):
html_content = urlopen("http://www.cfbstats.com/{}/team/257/index.html".format(str(year)))
# Create html object
soup = BeautifulSoup(html_content, "lxml")
# Returns the first table in html_content
table = soup.find_all("table")[1]
# Returns column headings
column_headings = table.find_all("th")
# Creates a list containing the column names
col_names=[]
for th in column_headings:
col_names.append(th.get_text())
# Returns number of columns in table
num_of_columns = len(col_names)
# Initializes new dataframe
new_df = pd.DataFrame(columns=range(num_of_columns), index=[0])
new_df.columns = col_names
row_marker = 0
# Parses through table and puts stats into dataframe going across each row
for row in table.find_all('tr'):
column_marker = 0
columns = row.find_all('td')
for column in columns:
new_df.ix[row_marker,column_marker] = column.get_text()
column_marker += 1
if column_marker == num_of_columns:
row_marker += 1
if '@ : Away' in new_df.ix[len(new_df)-1,0]:
new_df = new_df.ix[:len(new_df)-2,:]
if year == int(startYear):
cont_table = np.zeros(shape=[2,3])
for row in range(len(new_df)):
if "+" in new_df.ix[row,'Opponent']:
if "W" in new_df.ix[row,'Result']:
cont_table[0,1]+=1
elif "L" in new_df.ix[row, "Result"]:
cont_table[1,1]+=1
elif "@" in new_df.ix[row,'Opponent']:
if "W" in new_df.ix[row,'Result']:
cont_table[0,2]+=1
elif "L" in new_df.ix[row, "Result"]:
cont_table[1,2]+=1
else:
if "W" in new_df.ix[row,'Result']:
cont_table[0,0]+=1
elif "L" in new_df.ix[row, "Result"]:
cont_table[1,0]+=1
test_statistic=0
for row in range(cont_table.shape[0]):
for col in range(cont_table.shape[1]):
test_statistic += (cont_table[row,col] - (np.sum(cont_table[row,:])*np.sum(cont_table[:,col])/np.sum(cont_table)))**2 / (np.sum(cont_table[row,:])*np.sum(cont_table[:,col])/np.sum(cont_table))
p_value = 1 - stats.chi2.cdf(test_statistic, (cont_table.shape[0]-1)*(cont_table.shape[1]-1))
return({'Contingency Table:':cont_table,'P-value:':p_value})
#a).
results = results_by_sites(2012,2016)
results
#b).
cont_table = results['Contingency Table:']
# Data to plot
labels = 'Home', 'Neutral', 'Away'
sizes = [cont_table[0,0],cont_table[0,1],cont_table[0,2]]
colors = ['lightskyblue', 'yellowgreen', 'lightcoral']
patches, percents, texts = plt.pie(sizes, colors=colors, shadow=True, startangle=90,autopct='%1.1f%%')
plt.legend(patches, labels, loc="best")
plt.title("Percentage of Wins by Location")
plt.axis('equal')
plt.show()
#c).
degrees_of_freedom = (cont_table.shape[0]-1)*(cont_table.shape[1]-1)
print('Degrees of Freedom: ' + str(degrees_of_freedom))
critical_value = stats.chi2.ppf(0.95, degrees_of_freedom)
print('Critical Value: ' + str(critical_value))
test_statistic=0
for row in range(cont_table.shape[0]):
for col in range(cont_table.shape[1]):
test_statistic += (cont_table[row,col] - (np.sum(cont_table[row,:])*np.sum(cont_table[:,col])/np.sum(cont_table)))**2 / (np.sum(cont_table[row,:])*np.sum(cont_table[:,col])/np.sum(cont_table))
p_value = 1 - stats.chi2.cdf(test_statistic, degrees_of_freedom)
print('P-Value: ' + str(p_value))
# Because the P-value is much larger than 0.05, I would not reject the null hypothesis that game results are independent of game sites.
|
StarcoderdataPython
|
1766854
|
# Exercise 1: A Good First Program
print "Hello World!"
|
StarcoderdataPython
|
8049169
|
from typing import Tuple, List, Union, Sequence, Dict, Callable, Any
from pathlib import Path
from spacy.vectors import Vectors
from spacy.strings import StringStore
from spacy.util import SimpleFrozenDict
import numpy
import srsly
from .util import registry, cosine_similarity
class Sense2Vec(object):
def __init__(
self,
shape: tuple = (1000, 128),
strings: StringStore = None,
senses: List[str] = [],
vectors_name: str = "sense2vec",
overrides: Dict[str, str] = SimpleFrozenDict(),
):
"""Initialize the Sense2Vec object.
shape (tuple): The vector shape.
strings (StringStore): Optional string store. Will be created if it
doesn't exist.
senses (list): Optional list of all available senses. Used in methods
that generate the best sense or other senses.
vectors_name (unicode): Optional name to assign to the Vectors object.
overrides (dict): Optional custom functions to use, mapped to names
registered via the registry, e.g. {"make_key": "custom_make_key"}.
RETURNS (Sense2Vec): The newly constructed object.
"""
self.vectors = Vectors(shape=shape, name=vectors_name)
self._row2key = None
self.strings = StringStore() if strings is None else strings
self.freqs: Dict[int, int] = {}
self.cache = None
self.cfg: Dict[str, Any] = {
"senses": senses,
"make_key": "default",
"split_key": "default",
}
self.cfg.update(overrides)
@property
def senses(self) -> Sequence[str]:
"""RETURNS (list): The available senses."""
return self.cfg.get("senses", [])
@property
def frequencies(self) -> List[Tuple[str, int]]:
"""RETURNS (list): The (key, freq) tuples by frequency, descending."""
freqs = [(self.strings[k], s) for k, s in self.freqs.items() if s is not None]
return sorted(freqs, key=lambda item: item[1], reverse=True)
def __len__(self) -> int:
"""RETURNS (int): The number of rows in the vectors table."""
return len(self.vectors)
def __contains__(self, key: Union[str, int]) -> bool:
"""Check if a key is in the vectors table.
key (unicode / int): The key to look up.
RETURNS (bool): Whether the key is in the table.
"""
key = self.ensure_int_key(key)
return key in self.vectors
def __getitem__(self, key: Union[str, int]) -> Union[numpy.ndarray, None]:
"""Retrieve a vector for a given key. Returns None if the key is not
in the table.
key (unicode / int): The key to look up.
RETURNS (numpy.ndarray): The vector.
"""
key = self.ensure_int_key(key)
if key in self.vectors:
return self.vectors[key]
return None
def __setitem__(self, key: Union[str, int], vector: numpy.ndarray):
"""Set a vector for a given key. Will raise an error if the key
doesn't exist.
key (unicode / int): The key.
vector (numpy.ndarray): The vector to set.
"""
key = self.ensure_int_key(key)
if key not in self.vectors:
raise ValueError(f"Can't find key {key} in table")
self.vectors[key] = vector
self._row2key = None
def __iter__(self):
"""YIELDS (tuple): String key and vector pairs in the table."""
yield from self.items()
def items(self):
"""YIELDS (tuple): String key and vector pairs in the table."""
for key, value in self.vectors.items():
yield self.strings[key], value
def keys(self):
"""YIELDS (unicode): The string keys in the table."""
for key in self.vectors.keys():
yield self.strings[key]
def values(self):
"""YIELDS (numpy.ndarray): The vectors in the table."""
yield from self.vectors.values()
@property
def row2key(self):
if not self._row2key:
self._row2key = {row: key for key, row in self.vectors.key2row.items()}
return self._row2key
@property
def make_key(self) -> Callable:
"""Get the function to make keys."""
return registry.make_key.get(self.cfg["make_key"])
@property
def split_key(self) -> Callable:
"""Get the function to split keys."""
return registry.split_key.get(self.cfg["split_key"])
def add(self, key: Union[str, int], vector: numpy.ndarray, freq: int = None):
"""Add a new vector to the table.
key (unicode / int): The key to add.
vector (numpy.ndarray): The vector to add.
freq (int): Optional frequency count.
"""
if not isinstance(key, int):
key = self.strings.add(key)
self.vectors.add(key, vector=vector)
if freq is not None:
self.set_freq(key, freq)
self._row2key = None
def get_freq(self, key: Union[str, int], default=None) -> Union[int, None]:
"""Get the frequency count for a given key.
key (unicode / int): They key to look up.
default: Default value to return if no frequency is found.
RETURNS (int): The frequency count.
"""
key = self.ensure_int_key(key)
return self.freqs.get(key, default)
def set_freq(self, key: Union[str, int], freq: int):
"""Set a frequency count for a given key.
key (unicode / int): The key to set the count for.
freq (int): The frequency count.
"""
if not isinstance(freq, int):
raise ValueError(f"Invalid frequency count: {repr(freq)} for '{key}'")
key = self.ensure_int_key(key)
self.freqs[key] = freq
def ensure_int_key(self, key: Union[str, int]) -> int:
"""Ensure that a key is an int by looking it up in the string store.
key (unicode / int): The key.
RETURNS (int): The integer key.
"""
return key if isinstance(key, int) else self.strings.add(key)
def similarity(
self,
keys_a: Union[Sequence[Union[str, int]], str, int],
keys_b: Union[Sequence[Union[str, int]], str, int],
) -> float:
"""Make a semantic similarity estimate of two keys or two sets of keys.
The default estimate is cosine similarity using an average of vectors.
keys_a (unicode / int / iterable): The string or integer key(s).
keys_b (unicode / int / iterable): The other string or integer key(s).
RETURNS (float): The similarity score.
"""
if isinstance(keys_a, (str, int)):
keys_a = [keys_a]
if isinstance(keys_b, (str, int)):
keys_b = [keys_b]
average_a = numpy.vstack([self[key] for key in keys_a]).mean(axis=0)
average_b = numpy.vstack([self[key] for key in keys_b]).mean(axis=0)
return cosine_similarity(average_a, average_b)
def most_similar(
self,
keys: Union[Sequence[Union[str, int]], str, int],
n: int = 10,
batch_size: int = 16,
) -> List[Tuple[str, float]]:
"""Get the most similar entries in the table. If more than one key is
provided, the average of the vectors is used.
keys (unicode / int / iterable): The string or integer key(s) to compare to.
n (int): The number of similar keys to return.
batch_size (int): The batch size to use.
RETURNS (list): The (key, score) tuples of the most similar vectors.
"""
if isinstance(keys, (str, int)):
keys = [keys]
for key in keys:
if key not in self:
raise ValueError(f"Can't find key {key} in table")
if self.cache and self.cache["indices"].shape[1] >= n:
n = min(len(self.vectors), n)
key = self.ensure_int_key(key)
key_row = self.vectors.find(key=key)
if key_row < self.cache["indices"].shape[0]:
rows = self.cache["indices"][key_row, :n]
scores = self.cache["scores"][key_row, :n]
entries = zip(rows, scores)
entries = [
(self.strings[self.row2key[r]], score)
for r, score in entries
if r in self.row2key
]
return entries
# Always ask for more because we'll always get the keys themselves
n = min(len(self.vectors), n + len(keys))
rows = numpy.asarray(self.vectors.find(keys=keys))
vecs = self.vectors.data[rows]
average = vecs.mean(axis=0, keepdims=True)
result_keys, _, scores = self.vectors.most_similar(
average, n=n, batch_size=batch_size
)
result = list(zip(result_keys.flatten(), scores.flatten()))
result = [(self.strings[key], score) for key, score in result if key]
result = [(key, score) for key, score in result if key not in keys]
return result
def get_other_senses(
self, key: Union[str, int], ignore_case: bool = True
) -> List[str]:
"""Find other entries for the same word with a different sense, e.g.
"duck|VERB" for "duck|NOUN".
key (unicode / int): The key to check.
ignore_case (bool): Check for uppercase, lowercase and titlecase.
RETURNS (list): The string keys of other entries with different senses.
"""
result = []
key = key if isinstance(key, str) else self.strings[key]
word, orig_sense = self.split_key(key)
versions = [word, word.upper(), word.title()] if ignore_case else [word]
for text in versions:
for sense in self.senses:
new_key = self.make_key(text, sense)
if sense != orig_sense and new_key in self:
result.append(new_key)
return result
def get_best_sense(
self, word: str, senses: Sequence[str] = tuple(), ignore_case: bool = True
) -> Union[str, None]:
"""Find the best-matching sense for a given word based on the available
senses and frequency counts. Returns None if no match is found.
word (unicode): The word to check.
senses (list): Optional list of senses to limit the search to. If not
set / empty, all senses in the vectors are used.
ignore_case (bool): Check for uppercase, lowercase and titlecase.
RETURNS (unicode): The best-matching key or None.
"""
sense_options = senses or self.senses
if not sense_options:
return None
versions = [word, word.upper(), word.title()] if ignore_case else [word]
freqs = []
for text in versions:
for sense in sense_options:
key = self.make_key(text, sense)
if key in self:
freq = self.get_freq(key, -1)
freqs.append((freq, key))
return max(freqs)[1] if freqs else None
def to_bytes(self, exclude: Sequence[str] = tuple()) -> bytes:
"""Serialize a Sense2Vec object to a bytestring.
exclude (list): Names of serialization fields to exclude.
RETURNS (bytes): The serialized Sense2Vec object.
"""
vectors_bytes = self.vectors.to_bytes()
freqs = list(self.freqs.items())
data = {"vectors": vectors_bytes, "cfg": self.cfg, "freqs": freqs}
if "strings" not in exclude:
data["strings"] = self.strings.to_bytes()
if "cache" not in exclude:
data["cache"] = self.cache
return srsly.msgpack_dumps(data)
def from_bytes(self, bytes_data: bytes, exclude: Sequence[str] = tuple()):
"""Load a Sense2Vec object from a bytestring.
bytes_data (bytes): The data to load.
exclude (list): Names of serialization fields to exclude.
RETURNS (Sense2Vec): The loaded object.
"""
data = srsly.msgpack_loads(bytes_data)
self.vectors = Vectors().from_bytes(data["vectors"])
self.freqs = dict(data.get("freqs", []))
self.cfg.update(data.get("cfg", {}))
if "strings" not in exclude and "strings" in data:
self.strings = StringStore().from_bytes(data["strings"])
if "cache" not in exclude and "cache" in data:
self.cache = data.get("cache", {})
self._row2key = None
return self
def to_disk(self, path: Union[Path, str], exclude: Sequence[str] = tuple()):
"""Serialize a Sense2Vec object to a directory.
path (unicode / Path): The path.
exclude (list): Names of serialization fields to exclude.
"""
path = Path(path)
self.vectors.to_disk(path)
srsly.write_json(path / "cfg", self.cfg)
srsly.write_json(path / "freqs.json", list(self.freqs.items()))
if "strings" not in exclude:
self.strings.to_disk(path / "strings.json")
if "cache" not in exclude and self.cache:
srsly.write_msgpack(path / "cache", self.cache)
def from_disk(self, path: Union[Path, str], exclude: Sequence[str] = tuple()):
"""Load a Sense2Vec object from a directory.
path (unicode / Path): The path to load from.
exclude (list): Names of serialization fields to exclude.
RETURNS (Sense2Vec): The loaded object.
"""
path = Path(path)
strings_path = path / "strings.json"
freqs_path = path / "freqs.json"
cache_path = path / "cache"
self.vectors = Vectors().from_disk(path)
self.cfg.update(srsly.read_json(path / "cfg"))
if freqs_path.exists():
self.freqs = dict(srsly.read_json(freqs_path))
if "strings" not in exclude and strings_path.exists():
self.strings = StringStore().from_disk(strings_path)
if "cache" not in exclude and cache_path.exists():
self.cache = srsly.read_msgpack(cache_path)
self._row2key = None
return self
|
StarcoderdataPython
|
8089799
|
import paho.mqtt.client as mqtt
import json
import time
import os
from random import *
# Host name of the local mosquitto broker is read from the environment variable MqttBrokerAddress
mosquitto_host = os.environ.get("MqttBrokerAddress", "localhost")
# Port of the mosquitto broker.
mosquitto_port = 1883
# Connects to the mosquitto broker
def connect(clientName):
client = mqtt.Client(clientName)
try:
client.connect(mosquitto_host, mosquitto_port)
except:
raise ("Failed to connect to {}:{}. Check ENV variable MqttBrokerAddress".format(mosquitto_host, mosquitto_port))
return client
# Reacts to a published message
def on_message(client, userdata, msg):
print(f"Received Message topic {msg.topic} -> {msg.payload.decode()}")
calculateScore(msg)
# Calculates a basic score based on the message
def calculateScore(msg):
# ... code to calculate score goes here...
score = randint(1, 100)
# .....
print(f"Posting updated score {score}")
# Sends the calculated score
scoreMsg = {"score":score}
scoreMsgString = json.dumps(scoreMsg)
scoreTopic = "samplemonitormodule/public/scoreupdate"
print(f"Publishing updated score {scoreTopic} -> {scoreMsgString}")
client.publish(scoreTopic, scoreMsgString)
# Starts the application, connects to the mosquitto module
print ("Starting Sample Monitoring Module... ")
client = connect("sample-monitoring-module")
# Registers on_message as a callback that will be invoked when telemetry is received
client.on_message=on_message
client.loop_start()
# Subscribes to the telemetry message from the telemetry and command daemon
client.subscribe("vehicleabstractionmodule/public/telemetry")
# Create loop
while True:
time.sleep(5)
client.loop_stop()
|
StarcoderdataPython
|
6648657
|
<reponame>thomson131/tiny_python_projects
#!/usr/bin/env python3
"""
Author : james <<EMAIL>>
Date : 2022-02-21
Purpose: Create a picnic list
"""
import argparse
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Create a picnic list',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('items',
metavar='items',
nargs='+',
help='One or more items to bring to the picnic')
parser.add_argument('-s',
'--sorted',
help='Should the program sort the list of items alphabetically?',
action='store_true')
return parser.parse_args()
# --------------------------------------------------
def main():
"""Print the list"""
args = get_args()
sort_arg = args.sorted
items_arg = args.items
if sort_arg == True:
items_arg = sorted(items_arg)
else:
pass
if len(items_arg) > 2:
first_items = ', '.join(items_arg[0:-1]) + ','
last_item = ' and ' + items_arg[-1]
formatted_items = first_items + last_item
elif len(items_arg) == 2:
formatted_items = ' and '.join(items_arg)
else:
formatted_items = items_arg[0]
print("You are bringing {}.".format(formatted_items))
# --------------------------------------------------
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
5010653
|
from text_utils.pronunciation.main import eng_to_arpa, ger_to_ipa
result = ger_to_ipa(
eng_sentence="This is a test",
consider_annotations=False,
)
print(result)
|
StarcoderdataPython
|
6470386
|
<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 22 08:16:36 2019
@author: john.onwuemeka; <NAME>
"""
import numpy as np
def get_good_snr_freq_range(snrthres,signal1,signal2,snr1,snr2,freqsignal1,freqsignal2,noise1,noise2):
"""
Function to determine useable frequency range of spectra based on the
signal-to-noise ratio [SNR]
Inputs:
---------
snrthres: user-defined SNR (defaults to 2)
signal1: signal spectrum of event 1
signal2: signal spectrum of event 2 (if available)
snr1: SNR of event 1
snr2: SNR of event 2 (if available)
freqsignal1: frequency bins of signal 1
freqsignal2: frequency bins of signal 2
noise1: noise spectrum of event 1
noise2: noise spectrum of event 2
Returns:
----------
datas: signal1 windowed over frequency range where SNR threshold is meet or surpassed
datae: signal2 (if available) windowed over frequency range where SNR threshold is meet or surpassed
fnm: frequency range of signal1 where SNR threshold is meet or surpassed
fne: frequency range of signal2 where SNR threshold is meet or surpassed
noisem: event1 noise windowed over frequency range where SNR threshold is meet or surpassed
noisee: event2 noise windowed over frequency range where SNR threshold is meet or surpassed
Note: fnm and fne exactly the same when analysing spectral ratios; fne is
None when analysing single spectrum
"""
datas = None; datae = None; fnm = None; fne = None;
noisem = None; noisee = None;
quit_calc = 'N'
try:
try:
spm_low = np.where(snr1 >= snrthres )[0][0]
except:
spm_low = 0
try:
half_up = snr1[slice(spm_low,len(snr1)-1)]
spm_high = np.where(half_up < snrthres )[0][0] + spm_low
except:
spm_high = len(snr1) - 1
except:
quit_calc = 'Y'
pass
if signal2 is not None:
try:
spe_low = np.where(snr2 >= snrthres)[0][0]
except:
spe_low = 0
try:
half_up = snr2[slice(spe_low,len(snr2)-1)]
spe_high = np.where(half_up < snrthres )[0][0] + spe_low
except:
spe_high = len(snr2) - 1
low_end = max(spe_low,spm_low)
high_end = min(spe_high,spm_high)
fnm = freqsignal1[slice(low_end,high_end)] # change to sp later
fne = freqsignal2[slice(low_end,high_end)]
datas = signal1[slice(low_end,high_end)] # change to sp later
datae = signal2[slice(low_end,high_end)]
noisem = noise1[slice(low_end,high_end)]
noisee = noise2[slice(low_end,high_end)]
else:
if quit_calc == 'N':
fnm = freqsignal1[slice(spm_low,spm_high)] # change to sp later
datas = signal1[slice(spm_low,spm_high)] # change to sp later
noisem = noise1[slice(spm_low,spm_high)]
return datas,datae,fnm,fne,noisem,noisee
|
StarcoderdataPython
|
5165014
|
ipBadLapErr = -23000 # bad network configuration
ipBadCnfgErr = -23001 # bad IP configuration error
ipNoCnfgErr = -23002 # missing IP or LAP configuration error
ipLoadErr = -23003 # error in MacTCP load
ipBadAddr = -23004 # error in getting address
connectionClosing = -23005 # connection is closing
invalidLength = -23006
connectionExists = -23007 # request conflicts with existing connection
connectionDoesntExist = -23008 # connection does not exist
insufficientResources = -23009 # insufficient resources to perform request
invalidStreamPtr = -23010
streamAlreadyOpen = -23011
connectionTerminated = -23012
invalidBufPtr = -23013
invalidRDS = -23014
invalidWDS = -23014
openFailed = -23015
commandTimeout = -23016
duplicateSocket = -23017
# Error codes from internal IP functions
ipDontFragErr = -23032 # Packet too large to send w/o fragmenting
ipDestDeadErr = -23033 # destination not responding
icmpEchoTimeoutErr = -23035 # ICMP echo timed-out
ipNoFragMemErr = -23036 # no memory to send fragmented pkt
ipRouteErr = -23037 # can't route packet off-net
nameSyntaxErr = -23041
cacheFault = -23042
noResultProc = -23043
noNameServer = -23044
authNameErr = -23045
noAnsErr = -23046
dnrErr = -23047
outOfMemory = -23048
|
StarcoderdataPython
|
3263010
|
<filename>day02/passwords.py
def validate(line):
split = line.split(':')
left, psswd = split[0].strip(), split[1].strip()
split = left.split()
bounds, letter = split[0].strip(), split[1].strip()
split = bounds.split('-')
lower, upper = int(split[0]), int(split[1])
return len(list(filter(lambda c: c == letter, psswd))) in range(lower, upper+1)
with open('in.txt') as f:
lines = f.readlines()
stripped = map(lambda s: s.strip(), lines)
non_empty = filter(lambda s: s, stripped)
valid = sum(map(validate, non_empty))
print(valid)
|
StarcoderdataPython
|
12836394
|
from os import system , name
def Run(Input):
if name == "nt":
# Windows Machine
system('dir')
else:
# Linux/Unix Machine
system('ls')
|
StarcoderdataPython
|
8097042
|
<gh_stars>1-10
from gwk.records.excel import save_as_uigf
from gwk.records.models import migrate
def main(uid):
with open(f'./ggr_{uid}.json', 'r', encoding='UTF-8') as f:
old = migrate(f)
with open(f'./records_{uid}.json', 'w', encoding='UTF-8') as f:
old.dump(f)
save_as_uigf(old, f'./records_{uid}.xlsx')
if __name__ == '__main__':
print('请输入已导出JSON的祈愿uid:', end='')
main(input())
|
StarcoderdataPython
|
71927
|
#!/usr/bin/env python
import os,sys
curdir = os.path.abspath(".")
for f in [f for f in os.listdir(curdir) if f.endswith(".cxd") and not f.endswith("_bg.cxd")]:
fout = f[:-4] + ".out"
cmd = "sbatch --output=%s/%s cxd_to_h5.sh %s/%s" % (curdir, fout, curdir, f)
for arg in sys.argv[1:]:
cmd += " " + arg
print(cmd)
os.system(cmd)
|
StarcoderdataPython
|
4911437
|
from __future__ import absolute_import
from __future__ import unicode_literals
from django.utils.translation import ugettext_noop as _
# this is just here to mark some strings from settings for translation
# in a safer way.
# there is almost certainly a smarter way to do this.
_("Monitor Workers")
_("Inspect Data")
_("Raw Data")
_("Manage Deployments")
_("CommTrack") # To mark this string for translation to CommCare Supply
_("CommCare Supply")
|
StarcoderdataPython
|
3406569
|
<reponame>Greeser/gate-decorator-pruning
"""
* Copyright (C) 2019 <NAME>
* If you are using this code in your research, please cite the paper:
* Gate Decorator: Global Filter Pruning Method for Accelerating Deep Convolutional Neural Networks, in NeurIPS 2019.
"""
import torch
import torchvision
from torchvision import transforms
import os
from config import cfg
def _get_loaders(root):
train_dir = os.path.join(root, 'train')
val_dir = os.path.join(root, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = torchvision.datasets.ImageFolder(
train_dir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
val_dataset = torchvision.datasets.ImageFolder(
val_dir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=cfg.data.batch_size,
shuffle=cfg.data.shuffle,
num_workers=cfg.data.num_workers,
pin_memory=True
)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=cfg.data.test_batch_size,
shuffle=False,
num_workers=cfg.data.num_workers,
pin_memory=True
)
return train_loader, val_loader
def get_imagenet():
return _get_loaders('./data/imagenet12')
|
StarcoderdataPython
|
1956850
|
<gh_stars>1-10
def aumentar(x):
s = x + 1
#print(f'Alguém te deu uma moeda, de {x} suas moedas aumentaram para {s}')
return s
def diminuir(x):
s = x - 1
#print(f'Você doou uma moeda, de {x} suas moedas diminuiram para {s}')
return s
def dobro(x):
s = x * 2
#print(f'Você ganhou um sorteio, de {x} suas moedas dobraram para {s}')
return s
def metade(x):
s = x/2
#print(f'Você perdeu uma aposta, de {x} suas moedas foram reduzidas para a metade {s}')
return s
def moeda(msg, p = False):
if p == True:
print(f'R${msg}', end="")
else:
print(f'{msg}', end="")
return
def resumo(x):
print("-"*30)
print("RESUMO DO VALOR")
print(f"Preço analisado: {moeda(x)}")
print(f"Aumento do valor: {moeda(aumentar(x))}")
print(f"Diminuição do valor {moeda(diminuir(x))}:")
print(f"Dobro do valor: {moeda(dobro(x))}")
print(f"Metade do valor: {moeda(metade(x))}")
return x
|
StarcoderdataPython
|
8176042
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 20 09:58:28 2020
@author: TheBeast
"""
# =============================================================================
# Clear Variables
# =============================================================================
# Clear variables before running
from IPython import get_ipython
get_ipython().magic('reset -sf')
# =============================================================================
# Import Libraries
# =============================================================================
import numpy as np
import os
from scipy import signal
import os, requests
import plotly.express as px
from plotly.offline import plot
from matplotlib import rcParams
from matplotlib import pyplot as plt
rcParams['figure.figsize'] = [20, 4]
rcParams['font.size'] =15
rcParams['axes.spines.top'] = False
rcParams['axes.spines.right'] = False
rcParams['figure.autolayout'] = True
# =============================================================================
# Import the Data
# =============================================================================
fname = []
for j in range(4):
# fname.append('steinmetz_part%d.npz'%j)
if j==0:
fname.append('steinmetz_lfp.npz')
else:
fname.append('steinmetz_part%d.npz'%j)
url = ["https://osf.io/kx3v9/download"]#["https://osf.io/agvxh/download"]
url.append("https://osf.io/agvxh/download")
url.append("https://osf.io/uv3mw/download")
url.append("https://osf.io/ehmw2/download")
# url.append("https://osf.io/kx3v9/download")
for j in range(len(url)):
if not os.path.isfile(fname[j]):
try:
r = requests.get(url[j])
except requests.ConnectionError:
print("!!! Failed to download data !!!")
else:
if r.status_code != requests.codes.ok:
print("!!! Failed to download data !!!")
else:
with open(fname[j], "wb") as fid:
fid.write(r.content)
# =============================================================================
# Data Parameters
# =============================================================================
fs=100
# =============================================================================
# Sorting Parts of Data
# =============================================================================
#Here we pull out the data and store it in 'alldata'
alldat_lfp = np.array([])
alldat = np.array([])
alldat_lfp = np.hstack((alldat, np.load('steinmetz_lfp.npz', allow_pickle=True)['dat']))
for j in range(1,len(fname)):
if j !=0: #CHANGE BACK TO 0
alldat = np.hstack((alldat, np.load('steinmetz_part%d.npz'%j, allow_pickle=True)['dat']))
# Get all the mouse names
all_mouse_names =[]
for ii in range(0,np.size(alldat)):
all_mouse_names.append(alldat[ii]['mouse_name'])
#Gets all the unique mouse names
all_mouse_names_unique= list(set(all_mouse_names))
## _____________________Define Brain Regions _________________________________
# groupings of brain regions
regions = ["Vis Ctx", "Thal", "Hippo","Motor" "other ctx", "midbrain", "basal ganglia", "cortical subplate", "other"]
brain_groups = [["VISa", "VISam", "VISl", "VISp", "VISpm", "VISrl"], # visual cortex
["CL", "LD", "LGd", "LH", "LP", "MD", "MG", "PO", "POL", "PT", "RT", "SPF", "TH", "VAL", "VPL", "VPM"], # thalamus
["CA", "CA1", "CA2", "CA3", "DG", "SUB", "POST"], # hippocampal
["MD","MG","MOp","MOs","MRN"], #Motor areas
["ACA", "AUD", "COA", "DP", "ILA", "MOp", "MOs", "OLF", "ORB", "ORBm", "PIR", "PL", "SSp", "SSs", "RSP"," TT"], # non-visual cortex
["APN", "IC", "MB", "MRN", "NB", "PAG", "RN", "SCs", "SCm", "SCig", "SCsg", "ZI"], # midbrain
["ACB", "CP", "GPe", "LS", "LSc", "LSr", "MS", "OT", "SNr", "SI"], # basal ganglia
["BLA", "BMA", "EP", "EPd", "MEA"] # cortical subplate
]
# Making a trial time variable from -50 to 200
# AA=np.linspace(0, 250, 1)
# BB=[50]*len(AA)
Trial_t=np.linspace(-50, 200, num=250)#np.subtract(AA,BB)
# =============================================================================
# Select Mouse and Pull Out Info
# =============================================================================
# select just one of the recordings here. 11 is nice because it has some neurons in vis ctx.
file_num=11 #This is one mouse session that we will look at
dat = alldat[file_num]
dat.update(alldat_lfp[file_num])
print(dat.keys())
# _____________________________________________________________________________
dt = dat['bin_size'] # binning at 10 ms
NT = dat['spks'].shape[-1]
response = dat['response'] # right - nogo - left (-1, 0, 1)
vis_right = dat['contrast_right'] # 0 - low - high
vis_left = dat['contrast_left'] # 0 - low - high
#___________________________Brain Area Spikes only_______________________________
nareas = 4 # only the top 4 regions are in this particular mouse
NN = len(dat['brain_area']) # number of neurons
barea = nareas * np.ones(NN, ) # last one is "other"
#Loop over 4 brain areas
for j in range(nareas):
barea[np.isin(dat['brain_area'], brain_groups[j])] = j # assign a number to each region
#___________________________Brain Area LFP only_______________________________
nareas = 4 # only the top 4 regions are in this particular mouse
NN = len(dat['brain_area_lfp']) # number of neurons
barea_lfp = nareas * np.ones(NN, ) # last one is "other"
#Loop over 4 brain areas
for j in range(nareas):
barea_lfp[np.isin(dat['brain_area_lfp'], brain_groups[j])] = j # assign a number to each region
##________________ Now we pull out features __________________________________
# plt.plot(1/dt * dat['spks'][1][:,:].mean(axis=1))
plt.show()
#Right more than left
Look_area=3.
y_RL=dat['lfp'][barea_lfp==Look_area,:,vis_right>vis_left].mean(axis=(0,1))
#Left more than right
y_LR=dat['lfp'][barea_lfp==Look_area][vis_left>vis_right,:].mean(axis=(0,1))
fig = px.line(x=Trial_t, y=[y_RL,y_LR])
# fig = px.line(x=Trial_t, y=np.ndarray.tolist(y_RL))
fig.show()
plot(fig, auto_open=True)
Pxx_den=[]
for ii in range(0,len(alldat[10]['lfp'][0])):
y=alldat[10]['pupil'][0][ii]
f, Pxx = signal.welch(y[50:], fs, nperseg=1024)
Pxx_den.append(Pxx)
fig = px.line(x=f, y=Pxx_den)
fig.show()
plot(fig, auto_open=True)
from bokeh.layouts import gridplot
from bokeh.plotting import figure, output_file, show
p1 = figure( title="Stock Closing Prices")
p1.xaxis.axis_label = 'Date'
p1.yaxis.axis_label = 'Price'
p1.line(Trial_t,y_LR, color='#A6CEE3', legend_label='AAPL')
p1.legend.location = "top_left"
output_file("stocks.html", title="stocks.py example")
# show(gridplot([[p1]], plot_width=400, plot_height=400)) # open a browser
show(p1) # open a browser
### Junk
for j in range(nareas):
ax = plt.subplot(1,nareas,j+1)
plt.plot(1/dt * dat['spks'][barea==j][:,np.logical_and(vis_left==0, vis_right>0)].mean(axis=(0,1)))
plt.plot(1/dt * dat['spks'][barea==j][:,np.logical_and(vis_left>0 , vis_right==0)].mean(axis=(0,1)))
plt.plot(1/dt * dat['spks'][barea==j][:,np.logical_and(vis_left==0 , vis_right==0)].mean(axis=(0,1)))
plt.plot(1/dt * dat['spks'][barea==j][:,np.logical_and(vis_left>0, vis_right>0)].mean(axis=(0,1)))
plt.text(.25, .92, 'n=%d'%np.sum(barea==j), transform=ax.transAxes)
if j==0:
plt.legend(['right only', 'left only', 'neither', 'both'], fontsize=12)
ax.set(xlabel = 'binned time', ylabel = 'mean firing rate (Hz)', title = regions[j])
|
StarcoderdataPython
|
3241213
|
from sys import argv
import json
import os
import requests
from base64 import b64encode
ENDPOINT_URL = 'https://vision.googleapis.com/v1/images:annotate'
def get_food_name(b64_text: bytes) -> str:
api_key = os.environ['VISION_API']
img_requests = []
text = b64_text
img_requests.append({
'image': {'content': text},
'features': [{
'type': 'LABEL_DETECTION',
'maxResults': 10
}]
})
response = requests.post(ENDPOINT_URL,
data=json.dumps({"requests": img_requests}).encode(),
params={'key': api_key},
headers={'Content-Type': 'application/json'})
resps = []
for resp in response.json()['responses'][0]['labelAnnotations']:
resps.append(resp['description'])
return resps
|
StarcoderdataPython
|
1837815
|
'''
Created on 9.11.2016
@author: <NAME>
'''
import numpy as np
import xevacam.xevadll as xdll
from contextlib import contextmanager
import threading
import queue
import sys
import time
import struct
import xevacam.utils as utils
from xevacam.utils import kbinterrupt_decorate
'''
class ExceptionThread(threading.Thread):
def __init__(self, *args, **kwargs):
super().__init__(self, *args, **kwargs)
self.exc_queue = queue.Queue()
'''
class XevaCam(object):
def __init__(self, calibration=''):
'''
Constructor
@param calibration: Bytes string path to the calibration file (.xca)
'''
self.handle = 0
self.calibration = calibration.encode('utf-8') # Path to .xca file
# Involve threading
self._enabled = False
self.enabled_lock = threading.Lock()
self.handlers = [] # For streams, objects with write() method
# Exception queue for checking if an exception occurred inside thread
self.exc_queue = queue.Queue()
self._capture_thread = threading.Thread(name='capture_thread',
target=self.capture_frame_stream)
# args=(self.handlers))
self._record_time = 0 # Used for measuring the overall recording time
self._times = [] # Used for saving time stamps for each frame
@contextmanager
def opened(self, camera_path='cam://0', sw_correction=True):
'''
Context manager for open(). Opens connection to the camera and closes it in controlled fashion when
exiting the context.
@param camera_path: String path to the camera. Default is 'cam://0'
@param sw_correction: Use previously defined calibration file (.xca)
'''
try:
yield self.open(camera_path, sw_correction)
finally:
self.close()
def open(self, camera_path='cam://0', sw_correction=True):
'''
Opens connection to the camera and closes it in controlled fashion when
exiting the context.
@param camera_path: String path to the camera. Default is 'cam://0'
@param sw_correction: Use previously defined calibration file (.xca)
'''
self.handle = \
xdll.XDLL.open_camera(camera_path.encode('utf-8'), 0, 0)
print('XCHANDLE:', self.handle)
if self.handle == 0:
raise Exception('Handle is NULL')
if not xdll.XDLL.is_initialised(self.handle):
raise Exception('Initialization failed.')
if self.calibration:
if sw_correction:
flag = xdll.XDLL.XLC_StartSoftwareCorrection
else:
flag = 0
error = xdll.XDLL.load_calibration(self.handle,
self.calibration,
flag)
if error != xdll.XDLL.I_OK:
msg = 'Could\'t load' + \
'calibration file ' + \
str(self.calibration) + \
xdll.error2str(error)
raise Exception(msg)
return self
def close(self):
'''
Stops capturing, closes capture thread, closes connection.
'''
try:
if xdll.XDLL.is_capturing(self.handle):
print('Stop capturing')
error = xdll.XDLL.stop_capture(self.handle)
if error != xdll.XDLL.I_OK:
xdll.print_error(error)
raise Exception('Could not stop capturing')
self.enabled = False
self._capture_thread.join(1)
if self._capture_thread.isAlive():
raise Exception('Thread didn\'t stop.')
except:
print('Something went wrong closing the camera.')
raise
finally:
if xdll.XDLL.is_initialised(self.handle):
print('Closing connection.')
xdll.XDLL.close_camera(self.handle)
@property
def enabled(self):
'''
Is capture thread enabled.
@return: True/False
'''
with self.enabled_lock:
val = self._enabled
return val
@enabled.setter
def enabled(self, value):
'''
Signals capture thread to shut down when set to False.
Otherwise always True.
'''
with self.enabled_lock:
self._enabled = value
def is_alive(self):
return self._capture_thread.isAlive()
def get_frame_size(self):
'''
Asks the camera what is the frame size in bytes.
@return: c_ulong
'''
frame_size = xdll.XDLL.get_frame_size(self.handle) # Size in bytes
return frame_size
def get_frame_dims(self):
'''
Returns frame dimensions in tuple(height, width).
@return: tuple (c_ulong, c_ulong)
'''
frame_width = xdll.XDLL.get_frame_width(self.handle)
frame_height = xdll.XDLL.get_frame_height(self.handle)
print('width:', frame_width, 'height:', frame_height)
return frame_height, frame_width
def get_frame_type(self):
'''
Returns enumeration of camera's frame type.
@return: c_ulong
'''
return xdll.XDLL.get_frame_type(self.handle)
def get_pixel_dtype(self):
'''
Returns numpy dtype of the camera's configured data type for frame
@return: Numpy dtype (np.uint8, np.uint16 or np.uint32)
'''
bytes_in_pixel = self.get_pixel_size()
conversions = (None, np.uint8, np.uint16, None, np.uint32)
try:
pixel_dtype = conversions[bytes_in_pixel]
except:
raise Exception('Unsupported pixel size %s' % str(bytes_in_pixel))
if conversions is None:
raise Exception('Unsupported pixel size %s' % str(bytes_in_pixel))
return pixel_dtype
def get_pixel_size(self):
'''
Returns a frame pixel's size in bytes.
@return: int
'''
frame_t = xdll.XDLL.get_frame_type(self.handle)
return xdll.XDLL.pixel_sizes[frame_t]
def get_frame(self, buffer, frame_t, size, flag=0):
'''
Reads a frame from camera. Raises an exception on errors.
@param buffer: bytes buffer (output) to which a frame is read from
the camera.
@param frame_t: frame type enumeration. Use get_frame_type() to find
the native type.
@param size: frame size in bytes. Use get_frame_dims()
@param flag: Type of execution. 0 is non-blocking, xdll.XGF_Blocking
is blocking.
@return: True if got frame, otherwise False
'''
# frame_buffer = \
# np.zeros((frame_size / pixel_size,),
# dtype=np.int16)
# frame_buffer = bytes(frame_size)
error = xdll.XDLL.get_frame(self.handle,
frame_t,
flag,
# frame_buffer.ctypes.data,
buffer,
size)
# ctypes.cast(buffer, ctypes.POINTER(ctypes.c))
if error not in (xdll.XDLL.I_OK, xdll.XDLL.E_NO_FRAME):
raise Exception(
'Error while getting frame: %s' % xdll.error2str(error))
# frame_buffer = np.reshape(frame_buffer, frame_dims)
return error == xdll.XDLL.I_OK # , frame_buffer
def set_handler(self, handler, incl_ctrl_frames=False):
'''
Adds a new output to which frames are written.
@param handler: a file-like object, a stream or object with write()
and read() methods.
'''
self.handlers.append((handler, incl_ctrl_frames))
def clear_handlers(self):
name = 'clear_handlers'
if not self.is_alive():
self.handlers.clear()
print(name, 'Cleared handlers')
else:
raise Exception('Can\'t clear handlers when thread is alive')
def check_thread_exceptions(self):
name = 'check_thread_exceptions'
try:
exc = self.exc_queue.get(block=False)
except queue.Empty:
pass # No exceptions
else:
exc_type, exc_obj, exc_trace = exc
print(name, '%s: %s' % (str(exc_type), str(exc_trace)))
raise exc
@kbinterrupt_decorate
def start_recording(self):
'''
Starts recording frames to handlers.
'''
self.enabled = True
self._capture_thread = threading.Thread(name='capture_thread',
target=self.capture_frame_stream)
self._capture_thread.start()
@kbinterrupt_decorate
def wait_recording(self, seconds):
'''
Blocks execution and checks there are no exceptions.
@param seconds: Time how long the function blocks the execution.
'''
# self.record_time
start = time.time()
while True:
self.check_thread_exceptions() # Raises exception
end = time.time()
t = end-start
if end-start >= seconds:
break
self._record_time += t
# time.sleep(seconds)
@kbinterrupt_decorate
def stop_recording(self):
'''
Stops capturing frames after the latest one is done capturing.
@return: Metadata tuple array
'''
start = time.time()
self.enabled = False
self._capture_thread.join(5)
if self._capture_thread.isAlive():
raise Exception('Thread didn\'t stop.')
end = time.time()
self._record_time += end-start
error = xdll.XDLL.stop_capture(self.handle)
if error != xdll.XDLL.I_OK:
xdll.print_error(error)
raise Exception(
'Could not stop capturing. %s' % xdll.error2str(error))
self.check_thread_exceptions() # Raises exception
# Return ENVI metadata about the recording
frame_dims = self.get_frame_dims()
frame_type = self.get_frame_type()
meta = (('samples', frame_dims[1]),
('bands', self.frames_count),
('lines', frame_dims[0]),
('data type',
utils.datatype2envitype(
'u' + str(xdll.XDLL.pixel_sizes[frame_type]))),
('interleave', 'bil'),
('byte order', 1),
('description', 'Capture time = %d\nFrame time stamps = %s' % (self._record_time, str(self._times))))
return meta
def capture_frame_stream(self):
'''
Thread function for continuous camera capturing.
Keeps running until 'enabled' property is set to False.
'''
name = 'capture_frame_stream'
try:
error = xdll.XDLL.start_capture(self.handle)
if error != xdll.XDLL.I_OK:
xdll.print_error(error)
raise Exception(
'%s Starting capture failed! %s' % (name, xdll.error2str(error)))
if xdll.XDLL.is_capturing(self.handle) == 0:
for i in range(5):
if xdll.XDLL.is_capturing(self.handle) == 0:
print(name, 'Camera is not capturing. Retry number %d' % i)
time.sleep(0.1)
else:
break
if xdll.XDLL.is_capturing(self.handle) == 0:
raise Exception('Camera is not capturing.')
elif xdll.XDLL.is_capturing(self.handle):
self.frames_count = 0
size = self.get_frame_size()
dims = self.get_frame_dims()
frame_t = self.get_frame_type()
# pixel_size = self.get_pixel_size()
print(name, 'Size:', size, 'Dims:', dims, 'Frame type:', frame_t)
frame_buffer = bytes(size)
# ctrl_frame_buffer = bytearray(4) # 32 bits
start_time = utils.get_time()
while self._enabled:
# frame_buffer = \
# np.zeros((size / pixel_size,),
# dtype=np.int16)
# buffer = memoryview(frame_buffer)
while True:
ok = self.get_frame(frame_buffer,
frame_t=frame_t,
size=size,
flag=0) # Non-blocking
# xdll.XGF_Blocking
if ok:
curr_time = utils.get_time() - start_time
self._times.append(curr_time)
ctrl_frame_buffer = struct.pack('I', curr_time) # 4 bytes
for h, incl_ctrl_frame in self.handlers:
# print(name,
# 'Writing to %s' % str(h.__class__.__name__))
if incl_ctrl_frame:
h.write(ctrl_frame_buffer)
wrote_bytes = h.write(frame_buffer)
# print(name,
# 'Wrote to %s:' % str(h.__class__.__name__),
# wrote_bytes,
# 'bytes')
break
# else:
# print(name, 'Missed frame', i)
self.frames_count += 1
else:
raise Exception('Camera is not capturing.')
except Exception as e:
self.exc_queue.put(sys.exc_info())
print(name, '%s(%s): %s' % (type(e).__name__, str(e.errno), e.strerror))
print(name, 'Thread closed')
def capture_single_frame(self):
'''
TODO: Not tested
'''
name = 'capture_single_frame'
frame = None
error = xdll.XDLL.start_capture(self.handle)
if error != xdll.XDLL.I_OK:
xdll.print_error(error)
raise Exception(
'%s Starting capture failed! %s' % (name, xdll.error2str(error)))
if xdll.XDLL.is_capturing(self.handle) == 0:
for i in range(5):
if xdll.XDLL.is_capturing(self.handle) == 0:
print(name, 'Camera is not capturing. Retry number %d' % i)
time.sleep(0.1)
else:
break
if xdll.XDLL.is_capturing(self.handle) == 0:
raise Exception('Camera is not capturing.')
elif xdll.XDLL.is_capturing(self.handle):
size = self.get_frame_size()
dims = self.get_frame_dims()
frame_t = self.get_frame_type()
# pixel_size = self.get_pixel_size()
print(name, 'Size:', size, 'Dims:', dims, 'Frame type:', frame_t)
frame_buffer = bytes(size)
while True:
ok = self.get_frame(frame_buffer,
frame_t=frame_t,
size=size,
dims=dims,
flag=0) # Non-blocking
# xdll.XGF_Blocking
if ok:
frame = frame_buffer
break
# else:
# print(name, 'Missed frame', i)
else:
raise Exception('Camera is not capturing.')
print(name, 'Finished')
return frame, size, dims, frame_t
"""
class XevaImage(object):
def __init__(self, byte_stream, dims, dtype):
import io
if not isinstance(byte_stream, io.BytesIO):
raise Exception('')
self.stream = byte_stream
@contextmanager
def open(self, mode='r'):
try:
yield self
finally:
pass
def read_numpy_array(self, target_order=None):
''' Reads BytesIO stream Numpy 3D ndarray '''
self.stream.read()
# TODO:
# map(lambda x: x, self.lines)
# trans = self._get_permutation_tuple(interleave_order, target_order)
# data = np.transpose(data, trans)
# return data
return 0
"""
|
StarcoderdataPython
|
12836694
|
<filename>language/python/string_handle.py
import json
def basic():
len('aaaa')
str(1)
try:
a = 'aaa' + 2
except TypeError as e:
print('Type Error: {0}'.format(e))
def dict_to_str():
print('dict to str')
d1 = {'a': 1, 'b': 'string'}
d1_str = str(d1)
print(d1_str)
# This isn't secure because using eval function.
d2 = eval(d1_str)
if d1 == d2:
print('eval function')
def dict_to_str2():
print('dict to str 2')
d1 = {'a': 1, 'b': 'string'}
d1_str = json.dumps(d1)
print(d1_str)
d2 = json.loads(d1_str)
if d1 == d2:
print('json function')
def split():
str1 = 'Thu,1,10,except'
print('string split example: {0}'.format(str1))
# ',' : seperator
elements = str1.split(',')
for el in elements:
print(el)
def join():
list1 = ['1', 'in', 'out']
print('string join example: {0}'.format(':'.join(list1)))
def index():
str1 = '--; select * from ...'
print('string find and index example: {0}'.format(str1))
# find function will return index
if str1.find('--;') >= 0:
print('find it --;')
# index: 3 to end
print(str1[3:])
# index: end 3 character
print(str1[-3:])
def formating():
# python3: format
name = 'Roll'
age = 20
print('{0}: {1}'.format(name, age))
# python 3.6: f-string
name2 = 'Kell'
age2 = 40
print(f'{name2}: {age2}')
# python 3.6: f-string
name3 = '<NAME>'
print(f'{name3.split()}')
if __name__ == '__main__':
print('This is ' + 'string' + ' example')
basic()
dict_to_str()
dict_to_str2()
split()
join()
index()
formating()
|
StarcoderdataPython
|
3300816
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.task.task import Task
from pants.util.dirutil import safe_rmtree
class Clean(Task):
"""Delete all build products, creating a clean workspace."""
def execute(self):
safe_rmtree(self.get_options().pants_workdir)
|
StarcoderdataPython
|
204714
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Custom Logging IO
"""
from logIO.setupLogging import *
|
StarcoderdataPython
|
3285539
|
"""
Utility functions for the backends
"""
from datetime import datetime, timedelta
import logging
import pytz
from django.core.exceptions import ObjectDoesNotExist
from requests.exceptions import HTTPError
from social_django.utils import load_strategy
from backends.exceptions import InvalidCredentialStored
from backends.edxorg import EdxOrgOAuth2
log = logging.getLogger(__name__)
def _send_refresh_request(user_social):
"""
Private function that refresh an user access token
"""
strategy = load_strategy()
try:
user_social.refresh_token(strategy)
except HTTPError as exc:
if exc.response.status_code in (400, 401):
raise InvalidCredentialStored(
message="Received a {} status code from the OAUTH server".format(
exc.response.status_code
),
http_status_code=exc.response.status_code,
)
raise
def refresh_user_token(user_social):
"""
Utility function to refresh the access token if is (almost) expired
Args:
user_social (UserSocialAuth): a user social auth instance
"""
try:
last_update = datetime.fromtimestamp(
user_social.extra_data.get("updated_at"), tz=pytz.UTC
)
expires_in = timedelta(seconds=user_social.extra_data.get("expires_in"))
except TypeError:
_send_refresh_request(user_social)
return
# small error margin of 5 minutes to be safe
error_margin = timedelta(minutes=5)
if datetime.now(tz=pytz.UTC) - last_update >= expires_in - error_margin:
_send_refresh_request(user_social)
def get_social_username(user):
"""
Get social auth edX username for a user, or else return None.
Args:
user (django.contrib.auth.models.User):
A Django user
"""
if user.is_anonymous:
return None
try:
return user.social_auth.get(provider=EdxOrgOAuth2.name).uid
except ObjectDoesNotExist:
return None
except Exception as ex: # pylint: disable=broad-except
log.error("Unexpected error retrieving social auth username: %s", ex)
return None
|
StarcoderdataPython
|
5141187
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 3 15:21:26 2018
@author: <NAME>
"""
import numpy as np
import time
from threading import Thread
from traits.api import HasTraits, Float, Enum, Array, Instance, Int, String, Bool, Button, List, Tuple, Dict, Directory, HTML
from traitsui.api import Handler, Tabbed, View, Item, VGroup, HGroup, CodeEditor, HTMLEditor, RangeEditor, ButtonEditor, ListStrEditor, InstanceEditor
from chaco.api import GridContainer,ArrayPlotData, ArrayDataSource, add_default_grids, PlotAxis, Legend, OverlayPlotContainer, LinearMapper, Plot, jet,LinePlot, DataRange1D
from chaco.tools.api import LineSegmentTool, PanTool, ZoomTool, BroadcasterTool, LegendTool, LegendHighlighter
from chaco.scales.api import CalendarScaleSystem
from chaco.scales_tick_generator import ScalesTickGenerator
from enable.api import ComponentEditor, Component
import os
from scipy import ndimage, misc
from ftplib import FTP_TLS
import sys
import traceback
class UploadThread(Thread):
def run(self):
self.master.status = "establishing connection to server..."
try:
ftps = FTP_TLS(self.master.ftp_url,self.master.ftp_user,self.master.ftp_pw)
ftps.cwd(self.master.ftp_dir)
picnames = np.array(ftps.nlst())[2:]
picnumbers = map(int,[name[0:-4] for name in picnames])
maxnumber = max(picnumbers)
self.master.status = "connection successful"
except:
traceback.print_exc()
self.master.status = "could not establish connection"
self.master.notuploading = True
html_pics = ''
pic_1 = '''<div class="responsive">
<div class="gallery">
<img src="/pictures/'''
pic_2 = '''.jpg" width="600" height="400">
<div class="desc"></div>
</div>
</div>'''
picnumber = maxnumber + 1
if not os.path.exists(self.master.dirpath+'/smallpics'):
os.makedirs(self.master.dirpath+'/smallpics')
for filename in os.listdir(self.master.dirpath):
#print filename
#os.rename(os.dirpath.join(dirpath,filename), os.dirpath.join(dirpath,str(picnumber)+'.jpg'))
if filename[-4:] != ".jpg" and filename[-4:] != ".png":
continue
picpath = self.master.dirpath + '/' + filename#+ str(picnumber) + '.jpg'
pic = ndimage.imread(picpath)
fac = 1328./max(pic.shape)
smallpic = misc.imresize(pic,fac)
newpath = self.master.dirpath + '/smallpics/' + str(picnumber) + '.jpg'
misc.imsave(newpath, smallpic)
html_pics = html_pics + pic_1 + str(picnumber) + pic_2
#upload pic
self.master.status = "uploading picture " + newpath
fopen = open(newpath,'r')
storcommand = "STOR " + str(picnumber) + '.jpg'
ftps.storbinary(storcommand, fopen)
fopen.close()
picnumber = picnumber + 1
html_intro = self.master.html_intro_1 + self.master.category + self.master.html_intro_2
full_html = html_intro + self.master.html_text + html_pics + self.master.html_end
html_name = self.master.title + ".php"
html_path = self.master.codedir + '/' + self.master.date + "_" + html_name
fopen = open(html_path, "w")
fopen.write(full_html)
fopen.close()
#upload
try:
self.master.status = "uploading html " + html_path
fopen = open(html_path,'r')
storcommand = "STOR " + self.master.date + '_' + html_name
ftps.cwd('..')
ftps.storbinary(storcommand, fopen)
fopen.close()
ftps.quit()
self.master.status = "uploading succesful"
self.master.notuploading = True
except:
traceback.print_exc()
self.master.notuploading = True
class MainWindow(HasTraits):
title = String()
date = String()
category = Enum(['nus','travel','pics','food'])
dirpath = Directory()
codedir = Directory()
html_text = String('')
status = String('no connection')
ftp_url = String('files.000webhost.com')
ftp_user = String('maxinsingapore')
ftp_dir = String('public_html/pictures')
ftp_pw = String()
upload_btn = Button('Upload')
html_preview = HTML()
preview_btn = Button('HTML preview')
uploadthread = Instance(UploadThread)
notuploading = Bool(True)
html_intro_1 = '''<!DOCTYPE html><html><head><link href="main.css" rel="stylesheet"/>
<title>Max in Singapore</title>
</head>
<body>
<?php require("ground.php"); ?>
<div class = "title">
<a href="'''
html_intro_2 = '''.php"><figure><p>back</p</figure></a>
</div>
<div class="center">'''
html_end = ''' </div>
</div>
</div>
</body>
</html>'''
traits_view = View(HGroup('ftp_url','ftp_user','ftp_pw','ftp_dir'),
HGroup('title','date','category'),
HGroup(Item('html_text',editor=CodeEditor()),Item('html_preview',editor=HTMLEditor())),
'preview_btn',
Item('dirpath',label='Photo Directory'),
Item('codedir',label='Code Directory'), Item('status',style='readonly'),
Item('upload_btn',enabled_when='notuploading'))
def _preview_btn_fired(self):
html_intro = self.html_intro_1 + self.category + self.html_intro_2
self.html_preview = html_intro + self.html_text + self.html_end
def _upload_btn_fired(self):
if self.dirpath != '' and self.codedir !='':
self.notuploading = False
self.uploadthread = UploadThread()
self.uploadthread.wants_abort=False
self.uploadthread.master=self
self.uploadthread.start()
else:
self.status = "choose directories"
if __name__== '__main__':
s=MainWindow()
s.configure_traits()
|
StarcoderdataPython
|
9721761
|
<filename>deepspeed/runtime/bf16_optimizer.py
import torch
import torch.distributed as dist
from deepspeed.runtime.constants import PIPE_REPLICATED
from deepspeed.ops.op_builder import UtilsBuilder
from packaging import version as pkg_version
from deepspeed.git_version_info import version
from deepspeed.runtime.utils import (get_global_norm_of_tensors,
clip_tensors_by_global_norm,
get_grad_norm,
clip_gradients,
align_dense_tensors,
all_gather_dp_groups,
bwc_tensor_model_parallel_rank,
is_model_parallel_parameter,
see_memory_usage)
from deepspeed.checkpoint.constants import (DS_VERSION,
PARTITION_COUNT,
BASE_OPTIMIZER_STATE,
SINGLE_PARTITION_OF_FP32_GROUPS,
CLIP_GRAD,
GROUPS_PADDING)
import types
from dataclasses import dataclass
@dataclass
class fragment_address:
numel: int
start: int
@dataclass
class tensor_fragment:
lp_fragment: torch.Tensor
lp_fragment_address: fragment_address
hp_fragment: torch.Tensor
hp_fragment_address: fragment_address
optim_fragment: {}
def update_hp(self):
self.hp_fragment.data.copy_(self.lp_fragment.data)
def update_lp(self):
self.lp_fragment.data.copy_(self.hp_fragment.data)
def get_optim_state_fragment(self, key):
if key in self.optim_fragment:
return self.optim_fragment[key]
else:
raise ValueError(f'{key} not found in optimizer state fragment')
def get_full_hp_param(self, optim_state_key=None):
reduce_buffer = torch.zeros_like(self, dtype=torch.float32).flatten()
if self._hp_mapping is not None:
lp_frag_address = self._hp_mapping.lp_fragment_address
reduce_fragment = torch.narrow(reduce_buffer,
0,
lp_frag_address.start,
lp_frag_address.numel)
if optim_state_key is None:
hp_fragment = self._hp_mapping.hp_fragment
else:
hp_fragment = self._hp_mapping.get_optim_state_fragment(optim_state_key)
reduce_fragment.data.copy_(hp_fragment.data)
torch.distributed.all_reduce(reduce_buffer, group=self._dp_group)
return reduce_buffer.reshape_as(self)
class BF16_Optimizer:
def __init__(self,
init_optimizer,
mpu=None,
clip_grad=0.0,
norm_type=2,
allgather_bucket_size=5000000000,
dp_process_group=None,
timers=None):
super().__init__()
see_memory_usage('begin bf16_optimizer', force=True)
self.timers = timers
self.optimizer = init_optimizer
self.clip_grad = clip_grad
self.norm_type = norm_type
self.mpu = mpu
self.allgather_bucket_size = int(allgather_bucket_size)
self.dp_process_group = dp_process_group
self.dp_rank = dist.get_rank(group=self.dp_process_group)
self.real_dp_process_group = [
dp_process_group for i in range(len(self.optimizer.param_groups))
]
dp_world_size = dist.get_world_size(group=self.dp_process_group)
self.partition_count = [
dp_world_size for i in range(len(self.optimizer.param_groups))
]
# Load pre-built or JIT compile (un)flatten ops
util_ops = UtilsBuilder().load()
self.flatten = util_ops.flatten
self.unflatten = util_ops.unflatten
#align nccl all-gather send buffers to 4-bye boundary
self.nccl_start_alignment_factor = 2 # 4-byte alignment/sizeof(fp16) = 2
# Build BF16/FP32 groups
self.bf16_groups = []
self.bf16_groups_flat = []
self.bf16_partitioned_groups = []
self.fp32_groups_flat_partition = []
# Maintain different fp32 gradients views for convenience
self.fp32_groups_gradients = []
self.fp32_groups_gradients_flat = []
self.fp32_groups_actual_gradients_flat = []
self.fp32_groups_gradient_flat_partition = []
self.fp32_groups_has_gradients = []
self.step_count = 0
self.groups_padding = []
for i, param_group in enumerate(self.optimizer.param_groups):
see_memory_usage(f'before initializing group {i}', force=True)
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
# grab the original list
self.bf16_groups.append(param_group['params'])
# create flat bf16 params
self.bf16_groups_flat.append(
self._flatten_dense_tensors_aligned(
self.bf16_groups[i],
self.nccl_start_alignment_factor * dp_world_size))
# Make bf16 params point to flat tensor storage
self._update_storage_to_flattened_tensor(
tensor_list=self.bf16_groups[i],
flat_tensor=self.bf16_groups_flat[i])
# divide flat weights into equal sized partitions
partition_size = self.bf16_groups_flat[i].numel() // dp_world_size
bf16_dp_partitions = [
self.bf16_groups_flat[i].narrow(0,
dp_index * partition_size,
partition_size)
for dp_index in range(dp_world_size)
]
self.bf16_partitioned_groups.append(bf16_dp_partitions)
# create fp32 params partition
self.fp32_groups_flat_partition.append(
bf16_dp_partitions[partition_id].clone().float().detach())
self.fp32_groups_flat_partition[i].requires_grad = True
num_elem_list = [t.numel() for t in self.bf16_groups[i]]
# create fp32 gradients
self.fp32_groups_gradients_flat.append(
torch.zeros_like(self.bf16_groups_flat[i],
dtype=torch.float32))
# track individual fp32 gradients for entire model
fp32_gradients = self._split_flat_tensor(
flat_tensor=self.fp32_groups_gradients_flat[i],
num_elem_list=num_elem_list)
self.fp32_groups_gradients.append(fp32_gradients)
# flat tensor corresponding to actual fp32 gradients (i.e., minus alignment padding)
length_without_padding = sum(num_elem_list)
self.fp32_groups_actual_gradients_flat.append(
torch.narrow(self.fp32_groups_gradients_flat[i],
0,
0,
length_without_padding))
# flat tensor corresponding to gradient partition
self.fp32_groups_gradient_flat_partition.append(
torch.narrow(self.fp32_groups_gradients_flat[i],
0,
partition_id * partition_size,
partition_size))
# track fp32 gradient updates
self.fp32_groups_has_gradients.append([False] * len(self.bf16_groups[i]))
# Record padding required for alignment
if partition_id == dist.get_world_size(
group=self.real_dp_process_group[i]) - 1:
padding = self.bf16_groups_flat[i].numel() - length_without_padding
else:
padding = 0
self.groups_padding.append(padding)
# update optimizer param groups to reference fp32 params partition
param_group['params'] = [self.fp32_groups_flat_partition[i]]
see_memory_usage(f'after initializing group {i}', force=True)
see_memory_usage('before initialize_optimizer', force=True)
self.initialize_optimizer_states()
see_memory_usage('end initialize_optimizer', force=True)
# Need optimizer states initialized before linking lp to optimizer state
self._link_all_hp_params()
see_memory_usage('end bf16_optimizer', force=True)
def _link_all_hp_params(self):
dp_world_size = dist.get_world_size(group=self.dp_process_group)
for i, param_group in enumerate(self.optimizer.param_groups):
# Link bf16 and fp32 params in partition
# TODO: Make this configurable
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
partition_size = self.bf16_groups_flat[i].numel() // dp_world_size
self._link_hp_params(self.bf16_groups[i],
self.fp32_groups_flat_partition[i],
partition_id * partition_size,
partition_size,
self.real_dp_process_group[i])
def _init_lp_to_hp_mapping(self,
lp_param_list,
partition_start,
partition_size,
dp_group):
current_offset = 0
param_and_offset_list = []
partition_end = partition_start + partition_size
for lp_param in lp_param_list:
lp_param._hp_mapping = None
lp_param._dp_group = dp_group
lp_param.get_full_hp_param = types.MethodType(get_full_hp_param, lp_param)
# lp_param overlaps with partition if both are true
# 1) current_offset < partition_end,
# 2) current_offset + lp_param.numel() >= partition_start
lp_param_end = current_offset + lp_param.numel()
if current_offset < partition_end and lp_param_end > partition_start:
param_and_offset_list.append((lp_param, current_offset))
current_offset += lp_param.numel()
return param_and_offset_list
def _link_hp_params(self,
lp_param_list,
flat_hp_partition,
partition_start,
partition_size,
dp_group):
local_lp_param_and_offset = self._init_lp_to_hp_mapping(
lp_param_list,
partition_start,
partition_size,
dp_group)
hp_end = partition_start + partition_size
for lp_param, lp_start in local_lp_param_and_offset:
lp_end = lp_param.numel() + lp_start
hp_start = partition_start
fragment_start = max(lp_start, hp_start)
fragment_end = min(lp_end, hp_end)
# print(
# f'{self.dp_rank=} {lp_start=} {lp_end-lp_start=} {hp_start=} {hp_end-hp_start=} {fragment_start=} {fragment_end-fragment_start=}'
# )
assert fragment_start < fragment_end, \
f'fragment start {fragment_start} should be < fragment_end {fragment_end}'
fragment_numel = fragment_end - fragment_start
hp_frag_address = fragment_address(start=fragment_start - hp_start,
numel=fragment_numel)
hp_fragment_tensor = flat_hp_partition.narrow(0,
hp_frag_address.start,
hp_frag_address.numel)
optim_fragment = {
key: value.narrow(0,
hp_frag_address.start,
hp_frag_address.numel)
for key,
value in self.optimizer.state[flat_hp_partition].items()
if torch.is_tensor(value)
}
lp_frag_address = fragment_address(start=fragment_start - lp_start,
numel=fragment_numel)
lp_fragment_tensor = lp_param.flatten().narrow(0,
lp_frag_address.start,
lp_frag_address.numel)
lp_param._hp_mapping = tensor_fragment(lp_fragment=lp_fragment_tensor,
lp_fragment_address=lp_frag_address,
hp_fragment=hp_fragment_tensor,
hp_fragment_address=hp_frag_address,
optim_fragment=optim_fragment)
def initialize_optimizer_states(self):
"""Take an optimizer step with zero-valued gradients to allocate internal
optimizer state.
This helps prevent memory fragmentation by allocating optimizer state at the
beginning of training instead of after activations have been allocated.
"""
for param_partition, grad_partition in zip(self.fp32_groups_flat_partition, self.fp32_groups_gradient_flat_partition):
param_partition.grad = grad_partition
self.optimizer.step()
self.clear_hp_grads()
def _split_flat_tensor(self, flat_tensor, num_elem_list):
assert sum(num_elem_list) <= flat_tensor.numel()
tensor_list = []
offset = 0
for num_elem in num_elem_list:
dense_tensor = torch.narrow(flat_tensor, 0, offset, num_elem)
tensor_list.append(dense_tensor)
offset += num_elem
return tensor_list
def _update_storage_to_flattened_tensor(self, tensor_list, flat_tensor):
updated_params = self.unflatten(flat_tensor, tensor_list)
for p, q in zip(tensor_list, updated_params):
p.data = q.data
def _flatten_dense_tensors_aligned(self, tensor_list, alignment):
return self.flatten(align_dense_tensors(tensor_list, alignment))
@torch.no_grad()
def step(self, closure=None):
if closure is not None:
raise NotImplementedError(f'{self.__class__} does not support closure.')
all_groups_norm = get_global_norm_of_tensors(
input_tensors=self.get_grads_for_norm(),
mpu=self.mpu,
norm_type=self.norm_type)
self._global_grad_norm = all_groups_norm
assert all_groups_norm > 0.
if self.clip_grad > 0.:
clip_tensors_by_global_norm(
input_tensors=self.get_grads_for_norm(for_clipping=True),
max_norm=self.clip_grad,
global_norm=all_groups_norm,
mpu=self.mpu)
self.optimizer.step()
self.update_lp_params()
all_gather_dp_groups(partitioned_param_groups=self.bf16_partitioned_groups,
dp_process_group=self.real_dp_process_group,
start_alignment_factor=self.nccl_start_alignment_factor,
allgather_bucket_size=self.allgather_bucket_size)
self.clear_hp_grads()
self.step_count += 1
def backward(self, loss, update_hp_grads=True, clear_lp_grads=False, **bwd_kwargs):
"""Perform a backward pass and copy the low-precision gradients to the
high-precision copy.
We copy/accumulate to the high-precision grads now to prevent accumulating in the
bf16 grads after successive backward() calls (i.e., grad accumulation steps > 1)
The low-precision grads are deallocated during this procedure.
"""
self.clear_lp_grads()
loss.backward(**bwd_kwargs)
if update_hp_grads:
self.update_hp_grads(clear_lp_grads=clear_lp_grads)
@torch.no_grad()
def update_hp_grads(self, clear_lp_grads=False):
for i, group in enumerate(self.bf16_groups):
for j, lp in enumerate(group):
if lp.grad is None:
continue
hp_grad = self.fp32_groups_gradients[i][j]
assert hp_grad is not None, \
f'high precision param has no gradient, lp param_id = {id(lp)} group_info = [{i}][{j}]'
hp_grad.data.add_(lp.grad.data.to(hp_grad.dtype).view(hp_grad.shape))
lp._hp_grad = hp_grad
self.fp32_groups_has_gradients[i][j] = True
# clear gradients
if clear_lp_grads:
lp.grad = None
@torch.no_grad()
def get_grads_for_reduction(self):
return self.fp32_groups_gradients_flat
@torch.no_grad()
def get_grads_for_norm(self, for_clipping=False):
grads = []
tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu)
for i, group in enumerate(self.bf16_groups):
for j, lp in enumerate(group):
if not for_clipping:
if hasattr(lp, PIPE_REPLICATED) and lp.ds_pipe_replicated:
continue
if not (tensor_mp_rank == 0 or is_model_parallel_parameter(lp)):
continue
if not self.fp32_groups_has_gradients[i][j]:
continue
grads.append(self.fp32_groups_gradients[i][j])
return grads
@torch.no_grad()
def update_lp_params(self):
for i, (bf16_partitions, fp32_partition) in enumerate(zip(self.bf16_partitioned_groups, self.fp32_groups_flat_partition)):
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
bf16_partitions[partition_id].data.copy_(fp32_partition.data)
def clear_hp_grads(self):
for flat_gradients in self.fp32_groups_gradients_flat:
flat_gradients.zero_()
for i, group in enumerate(self.fp32_groups_gradients):
self.fp32_groups_has_gradients[i] = [False] * len(group)
def clear_lp_grads(self):
for group in self.bf16_groups:
for param in group:
param.grad = None
def state_dict(self):
state_dict = {}
state_dict[CLIP_GRAD] = self.clip_grad
state_dict[BASE_OPTIMIZER_STATE] = self.optimizer.state_dict()
state_dict[SINGLE_PARTITION_OF_FP32_GROUPS] = self.fp32_groups_flat_partition
state_dict[GROUPS_PADDING] = self.groups_padding
state_dict[PARTITION_COUNT] = self.partition_count
state_dict[DS_VERSION] = version
return state_dict
# Restore base optimizer fp32 weights bfloat16 weights
def _restore_from_bit16_weights(self):
for i, group in enumerate(self.bf16_groups):
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
for bf16_partitions, fp32_partition in zip(self.bf16_partitioned_groups, self.fp32_groups_flat_partition):
fp32_partition.data.copy_(bf16_partitions[partition_id].data)
def refresh_fp32_params(self):
self._restore_from_bit16_weights()
def load_state_dict(self,
state_dict_list,
load_optimizer_states=True,
load_from_fp32_weights=False):
dp_rank = dist.get_rank(group=self.dp_process_group)
current_rank_sd = state_dict_list[dp_rank]
ckpt_version = current_rank_sd.get(DS_VERSION, False)
assert ckpt_version, f"Empty ds_version in checkpoint, not clear how to proceed"
ckpt_version = pkg_version.parse(ckpt_version)
self.clip_grad = current_rank_sd.get(CLIP_GRAD, self.clip_grad)
if load_optimizer_states:
self.optimizer.load_state_dict(current_rank_sd[BASE_OPTIMIZER_STATE])
if load_from_fp32_weights:
for current, saved in zip(self.fp32_groups_flat_partition, current_rank_sd[SINGLE_PARTITION_OF_FP32_GROUPS]):
src_tensor = _get_padded_tensor(saved, current.numel())
current.data.copy_(src_tensor.data)
self._link_all_hp_params()
@property
def param_groups(self):
"""Forward the wrapped optimizer's parameters."""
return self.optimizer.param_groups
def _get_padded_tensor(src_tensor, size):
if src_tensor.numel() >= size:
return src_tensor
padded_tensor = torch.zeros(size, dtype=src_tensor.dtype, device=src_tensor.device)
slice_tensor = torch.narrow(padded_tensor, 0, 0, src_tensor.numel())
slice_tensor.data.copy_(src_tensor.data)
return padded_tensor
'''
Logic for lp_param to hp_param mapping
lp lp0 lp1 lp2 lp3 lp4 <------- indices/names
lp [ ][ ][ ][ ][ ] <-------- tensors
flat_lp [ ] <-------- flat lp params
flat_hp [ ] <------------------ flat hp partition on current rank
full_hp [ ] <------- full flat hp params
lp2
full numel = 16
lp_frag
numel = 12
frag_start = 3
frag_end = 15
hp_frag
numel = 12
frag_start = 0
frag_end = 11
hp_frag.copy_(lp_frag)
lp3:
full numel = 4
lp_frag
numel = 4
start = 0
end = 3
hp_frag
numel = 4
start = 12
end = 15
lp4:
full numel = 12
lp_frag
numel = 4
start = 0
end = 3
hp_frag
numel = 4
start = 16
end = 19
Visual depiction of above
lp { }
flat_lp [ ]
flat_hp ( )
flat_lp [ { ( } ) ]
lx hx ly hy
ly-hx
lp { }
flat_lp [ ]
flat_hp ( )
flat_lp [ ( { ) } ]
hx lx hy ly
hy-lx
lp { }
flat_lp [ ]
flat_hp ( )
flat_lp [ ( { } ) ]
hx lx ly hy
ly-lx
lp -> (lx, hy)
flat_hp -> (hx, hy)
'''
|
StarcoderdataPython
|
3258416
|
import logging
from bentoml.utils.log import configure_logging
def test_configure_logging_default():
configure_logging()
bentoml_logger = logging.getLogger("bentoml")
assert bentoml_logger.level == logging.INFO
assert bentoml_logger.propagate is False
assert len(bentoml_logger.handlers) == 2
assert bentoml_logger.handlers[0].name == "console"
assert bentoml_logger.handlers[1].name == "local"
prediction_logger = logging.getLogger("bentoml.prediction")
assert prediction_logger.level == logging.INFO
assert prediction_logger.propagate is False
assert len(prediction_logger.handlers) == 2
assert prediction_logger.handlers[0].name == "console"
assert prediction_logger.handlers[1].name == "prediction"
feedback_logger = logging.getLogger("bentoml.feedback")
assert feedback_logger.level == logging.INFO
assert feedback_logger.propagate is False
assert len(feedback_logger.handlers) == 2
assert feedback_logger.handlers[0].name == "console"
assert feedback_logger.handlers[1].name == "feedback"
def test_configure_logging_custom_level():
configure_logging(logging_level=logging.ERROR)
bentoml_logger = logging.getLogger("bentoml")
assert bentoml_logger.level == logging.ERROR
assert bentoml_logger.propagate is False
assert len(bentoml_logger.handlers) == 2
assert bentoml_logger.handlers[0].name == "console"
assert bentoml_logger.handlers[1].name == "local"
prediction_logger = logging.getLogger("bentoml.prediction")
assert prediction_logger.level == logging.INFO
assert prediction_logger.propagate is False
assert len(prediction_logger.handlers) == 2
assert prediction_logger.handlers[0].name == "console"
assert prediction_logger.handlers[1].name == "prediction"
feedback_logger = logging.getLogger("bentoml.feedback")
assert feedback_logger.level == logging.INFO
assert feedback_logger.propagate is False
assert len(feedback_logger.handlers) == 2
assert feedback_logger.handlers[0].name == "console"
assert feedback_logger.handlers[1].name == "feedback"
def test_configure_logging_console_disabled():
configure_logging(console_logging_enabled=False)
bentoml_logger = logging.getLogger("bentoml")
assert bentoml_logger.level == logging.INFO
assert bentoml_logger.propagate is False
assert len(bentoml_logger.handlers) == 1
assert bentoml_logger.handlers[0].name == "local"
prediction_logger = logging.getLogger("bentoml.prediction")
assert prediction_logger.level == logging.INFO
assert prediction_logger.propagate is False
assert len(prediction_logger.handlers) == 1
assert prediction_logger.handlers[0].name == "prediction"
feedback_logger = logging.getLogger("bentoml.feedback")
assert feedback_logger.level == logging.INFO
assert feedback_logger.propagate is False
assert len(feedback_logger.handlers) == 1
assert feedback_logger.handlers[0].name == "feedback"
def test_configure_logging_file_disabled():
configure_logging(file_logging_enabled=False)
bentoml_logger = logging.getLogger("bentoml")
assert bentoml_logger.level == logging.INFO
assert bentoml_logger.propagate is False
assert len(bentoml_logger.handlers) == 1
assert bentoml_logger.handlers[0].name == "console"
prediction_logger = logging.getLogger("bentoml.prediction")
assert prediction_logger.level == logging.INFO
assert prediction_logger.propagate is False
assert len(prediction_logger.handlers) == 1
assert prediction_logger.handlers[0].name == "console"
feedback_logger = logging.getLogger("bentoml.feedback")
assert feedback_logger.level == logging.INFO
assert feedback_logger.propagate is False
assert len(feedback_logger.handlers) == 1
assert feedback_logger.handlers[0].name == "console"
def test_configure_logging_advanced():
advanced_config = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"test_formatter": {"format": "[%(asctime)s] %(levelname)s - %(message)s"}
},
"handlers": {
"test_handler": {
"level": "WARN",
"formatter": "test_formatter",
"class": "logging.StreamHandler",
"stream": "ext://sys.stdout",
}
},
"loggers": {
"test_logger": {
"handlers": ["test_handler"],
"level": "WARN",
"propagate": False,
}
},
}
configure_logging(advanced_enabled=True, advanced_config=advanced_config)
bentoml_logger = logging.getLogger("test_logger")
assert bentoml_logger.level == logging.WARN
assert bentoml_logger.propagate is False
assert len(bentoml_logger.handlers) == 1
assert bentoml_logger.handlers[0].name == "test_handler"
|
StarcoderdataPython
|
11323150
|
<filename>setup.py
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="housie",
version="0.1.0",
author="<NAME>",
author_email="<EMAIL>",
description="All the core logic for playing/simulating the popular game 'Housie' "
"(also known as 'Bingo' or 'Tambola')",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/aaronalphonso/housie",
packages=['housie', 'housie.models'],
package_dir={'': 'src'},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
keywords='python housie core tambola bingo ticket generator board game',
project_urls={
'Source': "https://github.com/aaronalphonso/housie"
},
)
|
StarcoderdataPython
|
9660931
|
<filename>Wettbewerbe/migrations/0014_auto_20170827_1229.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-08-27 12:29
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Wettbewerbe', '0013_auto_20170827_1228'),
]
operations = [
migrations.AlterField(
model_name='teilnahme',
name='person',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='Wettbewerbe.Person'),
),
migrations.AlterField(
model_name='teilnahme',
name='veranstaltung',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='Wettbewerbe.Veranstaltung'),
),
]
|
StarcoderdataPython
|
5023061
|
from fancy_python_library import get_fancy
print('- This is so very fancy!')
print('- {}!'.format(get_fancy()))
|
StarcoderdataPython
|
4959103
|
<reponame>wis-software/office-manager
from django.db import models
from django.utils.translation import ugettext_lazy as _
__all__ = [
'Publisher'
]
class Publisher(models.Model):
title = models.CharField(_('name'), max_length=1024)
description = models.TextField(_('description'), default='', blank=True)
created_at = models.DateTimeField(_('created at'), auto_now_add=True)
def __str__(self):
return self.title
class Meta:
verbose_name = _('publisher')
verbose_name_plural = _('publishers')
|
StarcoderdataPython
|
9643039
|
from pyradox.datatype import Color, Time, Tree
from pyradox.filetype import csv, json, table, txt, yml
from pyradox.filetype.txt import parse, parse_file, parse_dir, parse_merge
from pyradox.filetype.yml import get_localisation
from pyradox.config import get_language, get_game_from_path, get_game_directory
from pyradox.worldmap import ProvinceMap
import pyradox.format
import pyradox.image
|
StarcoderdataPython
|
1682031
|
<reponame>shivachoudhary/demo1
# !usr/bin/python
# Ussage :: creating modules and use as many time
def nseries(a):
sum=0
for value in range(1,a+1):
sum=sum+value
return sum
def sub(a,b):
if(a>b):
return a-b
else:
return b-a
if __name__=='__main__':
b=int(raw_input("enter a value ::"))
print "heyyy u r entering into the main Function:::"
print "N Summation series sum is {}".format(nseries(b))
a=int(raw_input("enter a value ::"))
my_num=[b,a]
print " diff of two numbers {}".format(sub(*my_num))
|
StarcoderdataPython
|
163383
|
<filename>darkarmour.py
#!/usr/bin/env python3
import os
import sys
import random
import string
import argparse
from lib import banner
from lib import compile
from lib import auxiliary
from lib import encryption
class DarkArmour(object):
def __init__(self):
super(DarkArmour, self).__init__()
self.version = 0.3
self.enc_algos = ["xor"]
self.compile_binary = compile.Binary()
def show_banner(self):
banner.show_banner(self.version)
return
def _do_encrypt(self):
print(f"[i] Begining encryption via {self.crypt_type.upper()}")
keys_used = {}
for loop in range(self.loops):
sys.stdout.write(f"[i] Generating and encrypting with key ({loop}) \r")
if self.crypt_type == "xor":
crypt = encryption.XOR()
if loop == 0:
bytes, len, key = crypt.crypt_file(True, crypt.key, infile=self.in_file)
else:
bytes, len, key = crypt.crypt_file(True, crypt.key, infile=None, data=bytes, data_length=len)
keys_used[str(loop)] = key
if loop != self.loops - 1:
bytes = auxiliary.clean_hex_output(bytes)
return bytes, len, keys_used
def _do_jmp(self):
bytes, length, keys_used = self._do_encrypt()
keys = []
for i in keys_used: keys.append(hex(int(i)))
sys.stdout.write(f"[+] Encrypted with keys ({', '.join(keys)}) \n")
print(f"[i] Preparing and writing {length} bytes to pe image")
pe_image = auxiliary.prepare_pe_image(length, bytes)
auxiliary.write_pe_image(pe_image)
print(f"[i] Writing header file")
auxiliary.write_header_file(keys_used, jmp=True)
print(f"[i] Creating decryption routine with recursion depth {self.loops}")
file_clean = auxiliary.write_decrypt("src/jmp_loader/main.c", self.loops)
sys.stdout.write(f"[i] Compiling into PE {self.out_file}...\r")
self.compile_binary.compile("src/jmp_loader/main.c", self.out_file)
auxiliary.clean_up("src/jmp_loader/main.c", file_clean)
print(f"[+] Wrote {auxiliary.get_size(self.out_file)} bytes to {self.out_file}")
def _do_runpe(self):
pass
def _parse_args(self, args):
if args['outfile'] is None:
self.out_file = auxiliary.gen_rand_filename() + ".exe"
print(f"[i] No out filename supplied, contents shall be stored in: {self.out_file}")
else: self.out_file = args['outfile']
if args['upx'] is not False: self.upx = True
else: self.upx = False
if args['jmp'] is not False: self.jmp = True
else: self.jmp = False
if args['runpe'] is not False: self.jmp = True
else: self.runpe = False
if args['shellcode'] is not False: self.shellcode = args['shellcode']
if args['file'] is not False: self.in_file = args['file']
self.crypt_type = args['encrypt']
self.key = args['key']
self.loops = int(args['loop'])
def _do_crypt(self, clean=False):
print(f"[i] Started armouring {self.in_file} ({auxiliary.get_size(self.in_file)} bytes)")
if clean: file_to_clean = infile
if self.jmp:
print(f"[i] Configuring to use JMP loader")
self._do_jmp()
if self.runpe:
self._do_runpe()
def run(self, args):
self._parse_args(args)
self._do_crypt()
if __name__ == '__main__':
darkarmour = DarkArmour()
darkarmour.show_banner()
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--file", required=False, help="file to crypt, assumed as binary if not told otherwise")
ap.add_argument("-e", "--encrypt", required=True, help=f"encryption algorithm to use ({', '.join(darkarmour.enc_algos)})")
ap.add_argument("-S", "--shellcode", required=False, help="file contating the shellcode, needs to be in the 'msfvenom -f raw' style format")
ap.add_argument("-b", "--binary", required=False, action='store_true', help="provide if file is a binary exe")
ap.add_argument("-d", "--dll", required=False, action='store_true', help="use reflective dll injection to execute the binary inside another process")
ap.add_argument("-u", "--upx", required=False, action='store_true', help="pack the executable with upx")
ap.add_argument("-j", "--jmp", required=False, action='store_true', help="use jmp based pe loader")
ap.add_argument("-r", "--runpe", required=False, action='store_true', help="use runpe to load pe")
ap.add_argument("-s", "--source", required=False, action='store_true', help="provide if the file is c source code")
ap.add_argument("-k", "--key", required=False, help="key to encrypt with, randomly generated if not supplied")
ap.add_argument("-l", "--loop", required=False, default=1, help="number of levels of encryption")
ap.add_argument("-o", "--outfile", required=False, help="name of outfile, if not provided then random filename is assigned")
darkarmour.run(vars(ap.parse_args()))
|
StarcoderdataPython
|
5049513
|
from collections import namedtuple
import time
import logging
import numpy as np
import pickle
from es_distributed import tf_util
from es_distributed.policies import policies
from es_distributed.config import Result
from .common import SharedNoiseTable, RunningStat
from . import algo
log = logging.getLogger(__name__)
GAMooTask = namedtuple('GAMooTask', [
'params', 'population', 'ob_mean', 'ob_std', 'timestep_limit',
'num_eps'
])
def rollout_and_update_ob_stat(policy, env, num_eps, timestep_limit, rs, task_ob_stat, calc_obstat_prob):
save_obs = policy.needs_ob_stat and calc_obstat_prob != 0 and rs.rand() < calc_obstat_prob
rewards = []
lens = []
for _ in range(num_eps):
if save_obs:
rollout_rews, rollout_len, obs = policy.rollout(
env, timestep_limit=timestep_limit,
save_obs=True, random_stream=rs)
task_ob_stat.increment(obs.sum(axis=0),
np.square(obs).sum(axis=0), len(obs))
else:
rollout_rews, rollout_len = policy.rollout(
env, timestep_limit=timestep_limit,
save_obs=False, random_stream=rs)
rewards += [rollout_rews.sum()]
lens += [rollout_len]
return rewards, lens
class GAMoo(algo.Algo):
@property
def name(self):
return "ga_moo"
def __init__(self):
self.population = []
self.max_avg = 0
self.max_std = 0
self.ob_count = 0
self.population_score = np.array([])
self.rs = np.random.RandomState()
def save(self, file, *args, **kwargs):
super(GAMoo, self).save(file)
file.attrs['algo.population'] = np.void(pickle.dumps(self.population, protocol=-1))
file.attrs['algo.max_avg'] = self.max_avg
file.attrs['algo.max_std'] = self.max_std
def load(self, config, file):
super(GAMoo, self).load(config, file)
# don't need to get theta as it s collected from the policy each time
self.population = pickle.loads(file.attrs['algo.population']) if 'algo.population' in file.attrs else []
self.max_avg = pickle.loads(file.attrs['algo.max_avg']) if 'algo.max_avg' in file.attrs else 0
self.max_std = pickle.loads(file.attrs['algo.max_avg']) if 'algo.max_avg' in file.attrs else 0
def setup_job(self, config, noise=None, snapshot=None):
super(GAMoo, self).setup_job(config, noise, snapshot)
self.population_size = config.algo['population_size']
self.num_elites = config.algo['num_elites']
self.num_eps = config.task.episodes_target
def setup_iteration(self, master_client, timestep_limit):
self.theta = self.policy.get_trainable_flat()
assert self.theta.dtype == np.float32
log.debug("declaring task")
return master_client.declare_task(GAMooTask(
params=self.theta,
population=self.population,
ob_mean=self.ob_stat.mean if self.policy.needs_ob_stat else None,
ob_std=self.ob_stat.std if self.policy.needs_ob_stat else None,
timestep_limit=timestep_limit,
num_eps=self.num_eps
))
def process_result(self, result):
if self.policy.needs_ob_stat and result.ob_count > 0:
self.ob_stat.increment(
result.ob_sum, result.ob_sumsq, result.ob_count
)
self.ob_count += result.ob_count
def fitness(self, avg, std):
avg = avg / self.max_avg if self.max_avg > 0 else 0
std = std / self.max_std if self.max_std > 0 else 0
return (1 - avg) + std
def process_iteration(self, config, iteration_results):
#import pdb; pdb.set_trace()
noise_inds_n = list(self.population[:self.num_elites])
returns_n2 = list(self.population_score[:self.num_elites])
action_rews = []
fitness_n2 = []
self.max_avg = np.max(
[self.max_avg] +
[np.mean(r.returns_n2) for r in iteration_results.results])
self.max_std = np.max(
[self.max_std] +
[np.std(r.returns_n2) for r in iteration_results.results])
if iteration_results.eval_returns:
elite_avg = np.mean(iteration_results.eval_returns)
elite_std = np.std(iteration_results.eval_returns)
fitness_n2.append(self.fitness(elite_avg, elite_std))
for r in iteration_results.results:
avg = np.mean(r.returns_n2)
std = np.std(r.returns_n2)
fitness_n2.append(self.fitness(avg, std))
noise_inds_n.extend(r.noise_inds_n)
returns_n2.extend(r.returns_n2)
action_rews.append(r.action_mean)
noise_inds_n = np.array(noise_inds_n)
returns_n2 = np.array(returns_n2)
lengths_n2 = np.array([r.lengths_n2 for r in iteration_results.results])
fitness_n2 = np.array(fitness_n2)
idx = [x[0] for x in sorted(
enumerate(fitness_n2),
key=lambda y: y[1])][:self.population_size]
'''
idx = np.argpartition(
fitness_n2,
(-self.population_size, -1)
)[-1:-self.population_size-1:-1][::-1]
'''
self.population = noise_inds_n[idx]
self.population_score = fitness_n2[idx]
assert len(self.population) == self.population_size, "%d != %d" % (len(self.population), self.population_size)
assert np.min(fitness_n2) == self.population_score[0], "%d != %d" % (np.min(fitness_n2), self.population_score[0])
self.policy.set_trainable_flat(
self.noise.get(self.population[0][0], self.policy.num_params)
)
self.policy.reinitialize()
v = self.policy.get_trainable_flat()
for seed in self.population[0][1:]:
v += config.algo['noise_stdev'] * self.noise.get(seed, self.policy.num_params)
self.policy.set_trainable_flat(v)
log.info("Max Avg: %.3f, Max Std: %.3f", self.max_avg, self.max_std)
return returns_n2, lengths_n2, action_rews
def run_episode(self, config, task_id, task_data):
if self.policy.needs_ob_stat:
self.policy.set_ob_stat(task_data.ob_mean, task_data.ob_std)
if self.rs.rand() < config.algo['eval_prob']:
self.policy.set_trainable_flat(task_data.params)
eval_rews, eval_length = self.policy.rollout(self.env)
eval_return = eval_rews.sum()
log.debug("Eval job, Reward: %f, TS: %f, Action Reward: %f",
eval_return, eval_length, eval_return / eval_length)
return Result(
worker_id=1,
noise_inds_n=None,
returns_n2=None,
signreturns_n2=None,
lengths_n2=None,
action_mean=None,
eval_return=eval_return,
eval_length=eval_length,
ob_sum=None,
ob_sumsq=None,
ob_count=None
)
task_tstart = time.time()
noise_inds, returns, signreturns, lengths = [], [], [], []
task_ob_stat = RunningStat(self.env.observation_space.shape, eps=0.) # eps=0 because we're incrementing only
while not noise_inds or time.time() - task_tstart < .2:
if len(task_data.population) > 0:
seeds = list(
task_data.population[self.rs.randint(len(task_data.population))]) + [self.noise.sample_index(self.rs, self.policy.num_params)]
else:
seeds = [self.noise.sample_index(self.rs, self.policy.num_params)]
v = self.noise.get(seeds[0], self.policy.num_params)
self.policy.set_trainable_flat(v)
self.policy.reinitialize()
v = self.policy.get_trainable_flat()
for seed in seeds[1:]:
v += config.algo['noise_stdev'] * self.noise.get(seed, self.policy.num_params)
self.policy.set_trainable_flat(v)
rews_pos, len_pos, = rollout_and_update_ob_stat(
self.policy, self.env, task_data.num_eps,
task_data.timestep_limit, self.rs, task_ob_stat,
config.algo['calc_obstat_prob'])
noise_inds.append(seeds)
returns.extend(rews_pos)
signreturns.extend(np.sign(rews_pos))
lengths.extend(len_pos)
#import pdb; pdb.set_trace()
log.debug("Result: {} (avg), timesteps: {}".format(np.mean(returns), sum(lengths)))
return Result(
worker_id=1,
noise_inds_n=noise_inds,
returns_n2=np.array(returns, dtype=np.float32),
signreturns_n2=np.array(signreturns, dtype=np.float32),
lengths_n2=np.array(lengths, dtype=np.int32),
action_mean=np.mean(returns),
eval_return=None,
eval_length=None,
ob_sum=None if task_ob_stat.count == 0 else task_ob_stat.sum,
ob_sumsq=None if task_ob_stat.count == 0 else task_ob_stat.sumsq,
ob_count=task_ob_stat.count
)
|
StarcoderdataPython
|
1803055
|
<reponame>iahsanujunda/federated
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script for federated training of EMNIST classifiers with targeted attack and corrsponding defenses."""
import collections
import os
from absl import app
from absl import flags
import numpy as np
from scipy import io
import tensorflow as tf
import tensorflow_federated as tff
from tensorflow_federated.python.research.targeted_attack import aggregate_fn
from tensorflow_federated.python.research.targeted_attack import attacked_fedavg
FLAGS = flags.FLAGS
# training parameters
flags.DEFINE_string('root_output_dir', '/tmp/emnist_grids/',
'Root directory for writing experiment output.')
flags.DEFINE_integer('random_seed', 0, 'Random seed.')
flags.DEFINE_integer('evaluate_per_rounds', 1, 'Clients number per round.')
flags.DEFINE_boolean('only_digits', True, 'True: 10 classes, False 62 classes.')
# server parameters
flags.DEFINE_integer('num_clients_per_round', 5, 'Clients number per round.')
flags.DEFINE_integer('num_rounds', 300, 'Number of rounds.')
flags.DEFINE_float('server_learning_rate', 1., 'Server learning rate.')
flags.DEFINE_float('server_momentum', 0., 'Server learning rate.')
# client parameters
flags.DEFINE_integer('num_epochs', 5, 'Number of epochs in the client.')
flags.DEFINE_integer('batch_size', 20, 'Training batch size.')
flags.DEFINE_float('client_learning_rate', 0.1, 'Client learning rate.')
flags.DEFINE_float('client_momentum', 0., 'Client learning rate.')
# attack parameters
flags.DEFINE_integer('attack_freq', 1, 'Attacking frequency of the attacker.')
flags.DEFINE_integer('task_num', 30,
'The number of attack tasks we want to insert.')
flags.DEFINE_integer(
'client_round_num', 5,
'Number of local rounds used to compute the malicious update.')
# defense parameters
flags.DEFINE_float('drop_prob', 0.5, 'Dropping probability of each layer')
flags.DEFINE_float('norm_bound', 0.33,
'The maximum norm for malicious update before boosting.')
flags.DEFINE_float('l2_norm_clip', 1.0, 'The clipped l2 norm')
flags.DEFINE_float('mul_factor', 0.,
'The multiplication factor to ensure privacy')
keys = [
'random_seed', 'num_clients_per_round', 'num_rounds',
'server_learning_rate', 'num_epochs', 'batch_size', 'client_learning_rate',
'client_round_num', 'attack_freq', 'task_num', 'drop_prob', 'norm_bound',
'l2_norm_clip', 'mul_factor'
]
use_nchw_format = False
data_format = 'channels_first' if use_nchw_format else 'channels_last'
data_shape = [1, 28, 28] if use_nchw_format else [28, 28, 1]
def preprocess(dataset):
"""Preprocess dataset."""
def element_fn(element):
return collections.OrderedDict([
('x', tf.reshape(element['pixels'], data_shape)),
('y', tf.reshape(element['label'], [-1])),
])
return dataset.repeat(FLAGS.num_epochs).map(element_fn).batch(
FLAGS.batch_size)
def load_malicious_dataset(num_tasks):
"""Load malicious dataset consisting of malicious target samples."""
url_malicious_dataset = 'https://storage.googleapis.com/tff-experiments-public/targeted_attack/emnist_malicious/emnist_target.mat'
filename = 'emnist_target.mat'
path = tf.keras.utils.get_file(filename, url_malicious_dataset)
emnist_target_data = io.loadmat(path)
emnist_target_x = emnist_target_data['target_train_x'][0]
emnist_target_y = emnist_target_data['target_train_y'][0]
target_x = np.concatenate(emnist_target_x[-num_tasks:], axis=0)
target_y = np.concatenate(emnist_target_y[-num_tasks:], axis=0)
dict_malicious = collections.OrderedDict([('x', target_x), ('y', target_y)])
dataset_malicious = tf.data.Dataset.from_tensors(dict_malicious)
return dataset_malicious, target_x, target_y
def load_test_data():
"""Load test data for faster evaluation."""
url_test_data = 'https://storage.googleapis.com/tff-experiments-public/targeted_attack/emnist_test_data/emnist_test_data.mat'
filename = 'emnist_test_data.mat'
path = tf.keras.utils.get_file(filename, url_test_data)
emnist_test_data = io.loadmat(path)
test_image = emnist_test_data['test_x']
test_label = emnist_test_data['test_y']
return test_image, test_label
def make_federated_data_with_malicious(client_data,
dataset_malicious,
client_ids,
with_attack=1):
"""Make federated dataset with potential attackers."""
benign_dataset = [
preprocess(client_data.create_tf_dataset_for_client(x))
for x in client_ids
]
malicious_dataset = [dataset_malicious for x in client_ids]
if with_attack:
client_type_list = \
[tf.cast(0, tf.bool)] * (len(client_ids)-1) + [tf.cast(1, tf.bool)]
else:
client_type_list = [tf.cast(0, tf.bool)] * len(client_ids)
return benign_dataset, malicious_dataset, client_type_list
def sample_clients_with_malicious(client_data,
client_ids,
dataset_malicious,
num_clients=3,
with_attack=1):
"""Sample client and make federated dataset."""
sampled_clients = np.random.choice(client_ids, num_clients)
federated_train_data, federated_malicious_data, client_type_list = \
make_federated_data_with_malicious(client_data, dataset_malicious,
sampled_clients, with_attack)
return federated_train_data, federated_malicious_data, client_type_list
def create_keras_model():
"""Build compiled keras model."""
num_classes = 10 if FLAGS.only_digits else 62
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(
32,
kernel_size=(3, 3),
activation='relu',
input_shape=data_shape,
data_format=data_format),
tf.keras.layers.Conv2D(
64, kernel_size=(3, 3), activation='relu', data_format=data_format),
tf.keras.layers.MaxPool2D(pool_size=(2, 2), data_format=data_format),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(num_classes, activation='softmax')
])
return model
def evaluate(state, x, y, target_x, target_y, batch_size=100):
"""Evaluate the model on both main task and target task."""
keras_model = create_keras_model()
keras_model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
state.model.assign_weights_to(keras_model)
test_metrics = keras_model.evaluate(x, y, batch_size=batch_size)
test_metrics_target = keras_model.evaluate(
target_x, target_y, batch_size=batch_size)
return test_metrics, test_metrics_target
def write_print(file_handle, line):
print(line)
file_handle.write(line + '\n')
def log_tfboard(name, value, step):
tf.summary.scalar(name, value, step=step)
def create_if_not_exists(path):
try:
tf.io.gfile.makedirs(path)
except tf.errors.OpError:
print('Skipping creation of directory {}, directory already exists'.format(
path))
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
config = tf.compat.v1.ConfigProto()
config.graph_options.rewrite_options.layout_optimizer = 2
tf.compat.v1.enable_eager_execution(config)
np.random.seed(FLAGS.random_seed)
flag_dict = FLAGS.flag_values_dict()
configs = '-'.join(
['{}={}'.format(k, flag_dict[k]) for k in keys if k != 'root_output_dir'])
file_name = 'log' + configs
create_if_not_exists(FLAGS.root_output_dir)
file_handle = open(os.path.join(FLAGS.root_output_dir, file_name), 'w')
global_step = tf.Variable(1, name='global_step', dtype=tf.int64)
file_writer = tf.summary.create_file_writer(
os.path.join(FLAGS.root_output_dir))
file_writer.set_as_default()
write_print(file_handle, '=======configurations========')
write_print(file_handle, configs)
write_print(file_handle, '=======configurations========')
# prepare dataset.
write_print(file_handle, 'Loading Dataset!')
emnist_train, _ = tff.simulation.datasets.emnist.load_data(
only_digits=FLAGS.only_digits)
# prepare test set
write_print(file_handle, 'Loading Test Set!')
test_image, test_label = load_test_data()
# load malicious dataset
write_print(file_handle, 'Loading malicious dataset!')
dataset_malicious, target_x, target_y = load_malicious_dataset(FLAGS.task_num)
# prepare model_fn.
example_dataset = preprocess(
emnist_train.create_tf_dataset_for_client(emnist_train.client_ids[0]))
input_spec = example_dataset.element_spec
def model_fn():
keras_model = create_keras_model()
return tff.learning.from_keras_model(
keras_model,
input_spec=input_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
# define server optimizer
nesterov = True if FLAGS.server_momentum != 0 else False
def server_optimizer_fn():
return tf.keras.optimizers.SGD(
learning_rate=FLAGS.server_learning_rate,
momentum=FLAGS.server_momentum,
nesterov=nesterov)
# build interative process
write_print(file_handle, 'Building Iterative Process!')
client_update_function = attacked_fedavg.ClientProjectBoost(
boost_factor=float(FLAGS.num_clients_per_round),
norm_bound=FLAGS.norm_bound,
round_num=FLAGS.client_round_num)
aggregation_function = aggregate_fn.build_dp_aggregate(
l2_norm=FLAGS.l2_norm_clip,
mul_factor=FLAGS.mul_factor,
num_clients=FLAGS.num_clients_per_round)
iterative_process = attacked_fedavg.build_federated_averaging_process_attacked(
model_fn=model_fn,
stateful_delta_aggregate_fn=aggregation_function,
client_update_tf=client_update_function,
server_optimizer_fn=server_optimizer_fn)
state = iterative_process.initialize()
# training loop
for cur_round in range(FLAGS.num_rounds):
if cur_round % FLAGS.attack_freq == FLAGS.attack_freq // 2:
with_attack = 1
write_print(file_handle, 'Attacker appears!')
else:
with_attack = 0
# sample clients and make federated dataset
federated_train_data, federated_malicious_data, client_type_list = \
sample_clients_with_malicious(
emnist_train, client_ids=emnist_train.client_ids,
dataset_malicious=dataset_malicious,
num_clients=FLAGS.num_clients_per_round, with_attack=with_attack)
# one round of attacked federated averaging
write_print(file_handle, 'Round starts!')
state, train_metrics = iterative_process.next(state, federated_train_data,
federated_malicious_data,
client_type_list)
write_print(
file_handle,
'Training round {:2d}, train_metrics={}'.format(cur_round,
train_metrics))
log_tfboard('train_acc', train_metrics['sparse_categorical_accuracy'],
global_step)
log_tfboard('train_loss', train_metrics['loss'], global_step)
# evaluate current model on test data and malicious data
if cur_round % FLAGS.evaluate_per_rounds == 0:
test_metrics, test_metrics_target = evaluate(state, test_image,
test_label, target_x,
target_y)
write_print(
file_handle,
'Evaluation round {:2d}, <sparse_categorical_accuracy={},loss={}>'
.format(cur_round, test_metrics[1], test_metrics[0]))
write_print(
file_handle,
'Evaluation round {:2d}, <sparse_categorical_accuracy={},loss={}>'
.format(cur_round, test_metrics_target[1], test_metrics_target[0]))
log_tfboard('test_acc', test_metrics[1], global_step)
log_tfboard('test_loss', test_metrics[0], global_step)
log_tfboard('test_acc_target', test_metrics_target[1], global_step)
log_tfboard('test_loss_target', test_metrics_target[0], global_step)
global_step.assign_add(1)
if __name__ == '__main__':
app.run(main)
|
StarcoderdataPython
|
6608080
|
"""Module for Financial Transactions
"""
import datetime
class Transaction:
def __init__(
self,
ID,
amount: float,
inflow: bool,
time: datetime.datetime,
description: str = '',
currency_code: str = 'USD',
) -> None:
self.ID = ID
self.amount = amount
self.inflow = inflow
self.time = time
self.description = description
self.currency_code = currency_code
@property
def amount(self) -> float:
return self._amount
@amount.setter
def amount(self, amount) -> None:
assert type(amount) == float or type(amount) == int
assert amount > 0
self._amount = amount
@property
def inflow(self) -> bool:
return self._inflow
@inflow.setter
def inflow(self, inflow) -> None:
assert type(inflow) == bool
self._inflow = inflow
@property
def time(self) -> datetime.datetime:
return self._time
@time.setter
def time(self, time) -> None:
assert type(time) == datetime.datetime
self._time = time
self._date = time.date()
@property
def date(self) -> datetime.date:
return self._date
@property
def currency_code(self) -> str:
return self._currency_code
@currency_code.setter
def currency_code(self, currency_code) -> None:
assert type(currency_code) == str
assert currency_code in [
'USD',
'IRR',
'AUD',
'EUR',
]
self._currency_code = currency_code
def __str__(self) -> str:
return f"""{'+' if self.inflow else '-'}{self.amount:,} - {self.description}"""
|
StarcoderdataPython
|
1684925
|
import numpy as np
import math
import random
from network.convolution.ConvolutionWrapper import ConvolutionWrapper
class LSTMWrapper(ConvolutionWrapper):
def __init__(self, agent, history_size=10):
super(LSTMWrapper, self).__init__(agent)
self.history_size = history_size
def request_action(self):
#get old state
self.state_old = self.get_state()
reward_old = self.get_reward()
self.total_moves += 1
# print(f"state = {state_old}")
#perform random actions based on agent.epsilon, or choose the action
if random.randint(0, 500) > self.player.max_survival_time or random.randint(0, 10) == 0:
self.last_move = random.randint(0, 3)
self.random_moves += 1
# print("random move")
else:
# predict action based on the old state
states = []
states_index = 0
states.append(self.state_old)
while states_index > -len(self.memory) and states_index > -self.history_size - 1 and self.memory[states_index][-1] != True:
states.append(self.memory[states_index][0])
states_index -= 1
prediction = self.model.predict(states)
self.last_move = np.argmax(prediction)
#perform new move and get new state
self.player.do_action(int(self.last_move))
def replay_new(self):
# print(f'random moves : {100 * float(self.random_moves) / self.total_moves}')
self.random_moves = 0
self.total_moves = 0
# minibatch = [a for a in self.memory if a[2] != 0]
minibatch = range(len(self.memory))
if len(minibatch) > 1000:
minibatch = random.sample(range(len(minibatch)), 1000)
for index in minibatch:
state, action, reward, next_state, done = self.memory[index]
states = []
states_index = 0
while states_index + index >= 0 and states_index > -self.history_size - 1 and self.memory[states_index + index][-1] != True:
states.append(self.memory[states_index + index][0])
states_index -= 1
if len(states) != 0:
target = reward
target_f = self.model.predict(states)
target_f[action] = target
self.model.fit(states, target_f)
def train_short_memory(self):
state, action, reward, next_state, done = self.memory[-1]
states = []
states_index = 0
while states_index > -len(self.memory) and states_index > -self.history_size - 1 and self.memory[states_index][-1] != True:
states.append(self.memory[states_index][0])
states_index -= 1
if len(states) != 0:
target = reward
target_f = self.model.predict(states)
target_f[action] = target
self.model.fit(states, target_f)
|
StarcoderdataPython
|
297679
|
<reponame>Barroso03/iteracion
`Punto 1´
def mcd_euclides(x,y):
while y != 0:
xux = y
y = x%y
x = xux
return x
`Punto 2´
def mcd_sumas_y_restas(x,y):
while y != 0:
xux = y
y -= xux
x = xux
return x
#Creamos una función para iniciar ambos pasos
def inicio():
1numero = 2
2numero = 3
print("punto 1",(mcd_euclides(x, y)))
print("punto 2",(mcd_sumas_y_restas(x, y)))
|
StarcoderdataPython
|
3588248
|
import code.book_plots as bp
import code.gh_internal as gh
import matplotlib.pyplot as plt
import numpy as np;
import time
from pylab import *
from drawnow import drawnow, figure
from filterpy.discrete_bayes import normalize
from filterpy.discrete_bayes import predict
from filterpy.discrete_bayes import update
from scipy.ndimage import measurements
import filterpy.stats as stats
from filterpy.stats import gaussian, multivariate_gaussian
from numpy.random import randn,seed
from code.DogSimulation import DogSimulation
from code import kf_internal
from filterpy.kalman import predict, update
from filterpy.kalman import KalmanFilter
from filterpy.common import Q_discrete_white_noise
from code.mkf_internal import plot_track
class Chapter06_25(object):
def __init__(self):
pass
def pos_vel_filter(self, x, P, R, Q=0., dt=1.0):
""" Returns a KalmanFilter which implements a
constant velocity model for a state [x dx].T
"""
kf = KalmanFilter(dim_x=2, dim_z=1)
kf.x = np.array([x[0], x[1]]) # location and velocity
kf.F = np.array([[1., dt],
[0., 1.]]) # state transition matrix
kf.H = np.array([[1., 0]]) # Measurement function
kf.R *= R # measurement uncertainty
if np.isscalar(P):
kf.P *= P # covariance matrix
else:
kf.P[:] = P # [:] makes deep copy
if np.isscalar(Q):
kf.Q = Q_discrete_white_noise(dim=2, dt=dt, var=Q)
else:
kf.Q[:] = Q
return kf
def compute_dog_data(self, z_var, process_var, count=1, dt=1.):
"returns track, measurements 1D ndarrays"
x, vel = 0., 1.
z_std = math.sqrt(z_var)
p_std = math.sqrt(process_var)
xs, zs = [], []
for _ in range(count):
v = vel + (randn() * p_std * dt)
x += v*dt
xs.append(x)
zs.append(x + randn() * z_std)
return np.array(xs), np.array(zs)
def draw_fig1(self):
plt.scatter(self.sc1X, self.sc1Y)
plt.scatter(self.sc2X,self.sc2Y)
def run(self,x0=(0.,0.), P=500, R=0, Q=0, dt=1.0,
track=None, zs=None,
count=0, do_plot=True, **kwargs):
"""
track is the actual position of the dog, zs are the
corresponding measurements.
"""
# Simulate dog if no data provided.
if zs is None:
track, zs = self.compute_dog_data(R, Q, count)
# create the Kalman filter
kf = self.pos_vel_filter(x0, R=R, P=P, Q=Q, dt=dt)
# run the kalman filter and store the results
xs, cov = [], []
for z in zs:
kf.predict()
kf.update(z)
xs.append(kf.x)
cov.append(kf.P)
xs, cov = np.array(xs), np.array(cov)
if do_plot:
plot_track(xs[:, 0], track, zs, cov,
dt=dt, **kwargs)
return xs, cov
def start(self):
P = np.diag([500., 49.])
Ms, Ps = self.run(count=50, R=10, Q=0.01, P=P)
def main():
ch = Chapter06_25()
ch.start()
if __name__ == "__main__": main()
|
StarcoderdataPython
|
1607760
|
import base64
import copy
import inspect
import json
import logging
import os
import re
import warnings
from collections import OrderedDict
from typing import Optional
from urllib.parse import urlparse
try:
from azure.identity import DefaultAzureCredential
from azure.keyvault.secrets import SecretClient
except ImportError:
SecretClient = None
DefaultAzureCredential = None
try:
import boto3
from botocore.exceptions import ClientError
except ImportError:
boto3 = None
ClientError = None
try:
from google.cloud import secretmanager
except ImportError:
secretmanager = None
import pyparsing as pp
import great_expectations.exceptions as ge_exceptions
from great_expectations.data_context.types.base import (
CheckpointConfig,
CheckpointConfigSchema,
DataContextConfig,
DataContextConfigDefaults,
DataContextConfigSchema,
)
from great_expectations.util import load_class, verify_dynamic_loading_support
try:
import sqlalchemy as sa
except ImportError:
sa = None
logger = logging.getLogger(__name__)
# TODO: Rename config to constructor_kwargs and config_defaults -> constructor_kwarg_default
# TODO: Improve error messages in this method. Since so much of our workflow is config-driven, this will be a *super* important part of DX.
def instantiate_class_from_config(config, runtime_environment, config_defaults=None):
"""Build a GE class from configuration dictionaries."""
if config_defaults is None:
config_defaults = {}
config = copy.deepcopy(config)
module_name = config.pop("module_name", None)
if module_name is None:
try:
module_name = config_defaults.pop("module_name")
except KeyError:
raise KeyError(
"Neither config : {} nor config_defaults : {} contains a module_name key.".format(
config,
config_defaults,
)
)
else:
# Pop the value without using it, to avoid sending an unwanted value to the config_class
config_defaults.pop("module_name", None)
verify_dynamic_loading_support(module_name=module_name)
class_name = config.pop("class_name", None)
if class_name is None:
logger.warning(
"Instantiating class from config without an explicit class_name is dangerous. Consider adding "
"an explicit class_name for %s" % config.get("name")
)
try:
class_name = config_defaults.pop("class_name")
except KeyError:
raise KeyError(
"Neither config : {} nor config_defaults : {} contains a class_name key.".format(
config,
config_defaults,
)
)
else:
# Pop the value without using it, to avoid sending an unwanted value to the config_class
config_defaults.pop("class_name", None)
class_ = load_class(class_name=class_name, module_name=module_name)
config_with_defaults = copy.deepcopy(config_defaults)
config_with_defaults.update(config)
if runtime_environment is not None:
# If there are additional kwargs available in the runtime_environment requested by a
# class to be instantiated, provide them
argspec = inspect.getfullargspec(class_.__init__)[0][1:]
missing_args = set(argspec) - set(config_with_defaults.keys())
config_with_defaults.update(
{
missing_arg: runtime_environment[missing_arg]
for missing_arg in missing_args
if missing_arg in runtime_environment
}
)
# Add the entire runtime_environment as well if it's requested
if "runtime_environment" in missing_args:
config_with_defaults.update({"runtime_environment": runtime_environment})
try:
class_instance = class_(**config_with_defaults)
except TypeError as e:
raise TypeError(
"Couldn't instantiate class: {} with config: \n\t{}\n \n".format(
class_name, format_dict_for_error_message(config_with_defaults)
)
+ str(e)
)
return class_instance
def build_store_from_config(
store_name: str = None,
store_config: dict = None,
module_name: str = "great_expectations.data_context.store",
runtime_environment: dict = None,
):
if store_config is None or module_name is None:
return None
try:
config_defaults: dict = {
"store_name": store_name,
"module_name": module_name,
}
new_store = instantiate_class_from_config(
config=store_config,
runtime_environment=runtime_environment,
config_defaults=config_defaults,
)
except ge_exceptions.DataContextError as e:
new_store = None
logger.critical(f"Error {e} occurred while attempting to instantiate a store.")
if not new_store:
class_name: str = store_config["class_name"]
module_name = store_config["module_name"]
raise ge_exceptions.ClassInstantiationError(
module_name=module_name,
package_name=None,
class_name=class_name,
)
return new_store
def format_dict_for_error_message(dict_):
# TODO : Tidy this up a bit. Indentation isn't fully consistent.
return "\n\t".join("\t\t".join((str(key), str(dict_[key]))) for key in dict_)
def substitute_config_variable(
template_str, config_variables_dict, dollar_sign_escape_string: str = r"\$"
):
"""
This method takes a string, and if it contains a pattern ${SOME_VARIABLE} or $SOME_VARIABLE,
returns a string where the pattern is replaced with the value of SOME_VARIABLE,
otherwise returns the string unchanged. These patterns are case sensitive. There can be multiple
patterns in a string, e.g. all 3 will be substituted in the following:
$SOME_VARIABLE${some_OTHER_variable}$another_variable
If the environment variable SOME_VARIABLE is set, the method uses its value for substitution.
If it is not set, the value of SOME_VARIABLE is looked up in the config variables store (file).
If it is not found there, the input string is returned as is.
If the value to substitute is not a string, it is returned as-is.
If the value to substitute begins with dollar_sign_escape_string it is not substituted.
If the value starts with the keyword `secret|`, it tries to apply secret store substitution.
:param template_str: a string that might or might not be of the form ${SOME_VARIABLE}
or $SOME_VARIABLE
:param config_variables_dict: a dictionary of config variables. It is loaded from the
config variables store (by default, "uncommitted/config_variables.yml file)
:param dollar_sign_escape_string: a string that will be used in place of a `$` when substitution
is not desired.
:return: a string with values substituted, or the same object if template_str is not a string.
"""
if template_str is None:
return template_str
# 1. Make substitutions for non-escaped patterns
try:
match = re.finditer(
r"(?<!\\)\$\{(.*?)\}|(?<!\\)\$([_a-zA-Z][_a-zA-Z0-9]*)", template_str
)
except TypeError:
# If the value is not a string (e.g., a boolean), we should return it as is
return template_str
for m in match:
# Match either the first group e.g. ${Variable} or the second e.g. $Variable
config_variable_name = m.group(1) or m.group(2)
config_variable_value = config_variables_dict.get(config_variable_name)
if config_variable_value is not None:
if not isinstance(config_variable_value, str):
return config_variable_value
template_str = template_str.replace(m.group(), config_variable_value)
else:
raise ge_exceptions.MissingConfigVariableError(
f"""\n\nUnable to find a match for config substitution variable: `{config_variable_name}`.
Please add this missing variable to your `uncommitted/config_variables.yml` file or your environment variables.
See https://great-expectations.readthedocs.io/en/latest/reference/data_context_reference.html#managing-environment-and-secrets""",
missing_config_variable=config_variable_name,
)
# 2. Replace the "$"'s that had been escaped
template_str = template_str.replace(dollar_sign_escape_string, "$")
template_str = substitute_value_from_secret_store(template_str)
return template_str
def substitute_value_from_secret_store(value):
"""
This method takes a value, tries to parse the value to fetch a secret from a secret manager
and returns the secret's value only if the input value is a string and contains one of the following patterns:
- AWS Secrets Manager: the input value starts with ``secret|arn:aws:secretsmanager``
- GCP Secret Manager: the input value matches the following regex ``^secret\\|projects\\/[a-z0-9\\_\\-]{6,30}\\/secrets``
- Azure Key Vault: the input value matches the following regex ``^secret\\|https:\\/\\/[a-zA-Z0-9\\-]{3,24}\\.vault\\.azure\\.net``
Input value examples:
- AWS Secrets Manager: ``secret|arn:aws:secretsmanager:eu-west-3:123456789012:secret:my_secret``
- GCP Secret Manager: ``secret|projects/gcp_project_id/secrets/my_secret``
- Azure Key Vault: ``secret|https://vault-name.vault.azure.net/secrets/my-secret``
:param value: a string that might or might not start with `secret|`
:return: a string with the value substituted by the secret from the secret store,
or the same object if value is not a string.
"""
if isinstance(value, str) and value.startswith("secret|"):
if value.startswith("secret|arn:aws:secretsmanager"):
return substitute_value_from_aws_secrets_manager(value)
elif re.compile(r"^secret\|projects\/[a-z0-9\_\-]{6,30}\/secrets").match(value):
return substitute_value_from_gcp_secret_manager(value)
elif re.compile(
r"^secret\|https:\/\/[a-zA-Z0-9\-]{3,24}\.vault\.azure\.net"
).match(value):
return substitute_value_from_azure_keyvault(value)
return value
def substitute_value_from_aws_secrets_manager(value):
"""
This methods uses a boto3 client and the secretsmanager service to try to retrieve the secret value
from the elements it is able to parse from the input value.
- value: string with pattern ``secret|arn:aws:secretsmanager:${region_name}:${account_id}:secret:${secret_name}``
optional : after the value above, a secret version can be added ``:${secret_version}``
optional : after the value above, a secret key can be added ``|${secret_key}``
- region_name: `AWS region used by the secrets manager <https://docs.aws.amazon.com/general/latest/gr/rande.html>`_
- account_id: `Account ID for the AWS account used by the secrets manager <https://docs.aws.amazon.com/en_us/IAM/latest/UserGuide/console_account-alias.html>`_
This value is currently not used.
- secret_name: Name of the secret
- secret_version: UUID of the version of the secret
- secret_key: Only if the secret's data is a JSON string, which key of the dict should be retrieve
:param value: a string that starts with ``secret|arn:aws:secretsmanager``
:return: a string with the value substituted by the secret from the AWS Secrets Manager store
:raises: ImportError, ValueError
"""
regex = re.compile(
r"^secret\|arn:aws:secretsmanager:([a-z\-0-9]+):([0-9]{12}):secret:([a-zA-Z0-9\/_\+=\.@\-]+)"
r"(?:\:([a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}))?(?:\|([^\|]+))?$"
)
if not boto3:
logger.error(
"boto3 is not installed, please install great_expectations with aws_secrets extra > "
"pip install great_expectations[aws_secrets]"
)
raise ImportError("Could not import boto3")
matches = regex.match(value)
if not matches:
raise ValueError(f"Could not match the value with regex {regex}")
region_name = matches.group(1)
secret_name = matches.group(3)
secret_version = matches.group(4)
secret_key = matches.group(5)
# Create a Secrets Manager client
session = boto3.session.Session()
client = session.client(service_name="secretsmanager", region_name=region_name)
if secret_version:
secret_response = client.get_secret_value(
SecretId=secret_name, VersionId=secret_version
)
else:
secret_response = client.get_secret_value(SecretId=secret_name)
# Decrypts secret using the associated KMS CMK.
# Depending on whether the secret is a string or binary, one of these fields will be populated.
if "SecretString" in secret_response:
secret = secret_response["SecretString"]
else:
secret = base64.b64decode(secret_response["SecretBinary"]).decode("utf-8")
if secret_key:
secret = json.loads(secret)[secret_key]
return secret
def substitute_value_from_gcp_secret_manager(value):
"""
This methods uses a google.cloud.secretmanager.SecretManagerServiceClient to try to retrieve the secret value
from the elements it is able to parse from the input value.
value: string with pattern ``secret|projects/${project_id}/secrets/${secret_name}``
optional : after the value above, a secret version can be added ``/versions/${secret_version}``
optional : after the value above, a secret key can be added ``|${secret_key}``
- project_id: `Project ID of the GCP project on which the secret manager is implemented <https://cloud.google.com/resource-manager/docs/creating-managing-projects#before_you_begin>`_
- secret_name: Name of the secret
- secret_version: ID of the version of the secret
- secret_key: Only if the secret's data is a JSON string, which key of the dict should be retrieve
:param value: a string that matches the following regex ``^secret|projects/[a-z0-9_-]{6,30}/secrets``
:return: a string with the value substituted by the secret from the GCP Secret Manager store
:raises: ImportError, ValueError
"""
regex = re.compile(
r"^secret\|projects\/([a-z0-9\_\-]{6,30})\/secrets/([a-zA-Z\_\-]{1,255})"
r"(?:\/versions\/([a-z0-9]+))?(?:\|([^\|]+))?$"
)
if not secretmanager:
logger.error(
"secretmanager is not installed, please install great_expectations with gcp extra > "
"pip install great_expectations[gcp]"
)
raise ImportError("Could not import secretmanager from google.cloud")
client = secretmanager.SecretManagerServiceClient()
matches = regex.match(value)
if not matches:
raise ValueError(f"Could not match the value with regex {regex}")
project_id = matches.group(1)
secret_id = matches.group(2)
secret_version = matches.group(3)
secret_key = matches.group(4)
if not secret_version:
secret_version = "latest"
name = f"projects/{project_id}/secrets/{secret_id}/versions/{secret_version}"
try:
secret = client.access_secret_version(name=name)._pb.payload.data.decode(
"utf-8"
)
except AttributeError:
secret = client.access_secret_version(name=name).payload.data.decode(
"utf-8"
) # for google-cloud-secret-manager < 2.0.0
if secret_key:
secret = json.loads(secret)[secret_key]
return secret
def substitute_value_from_azure_keyvault(value):
"""
This methods uses a azure.identity.DefaultAzureCredential to authenticate to the Azure SDK for Python
and a azure.keyvault.secrets.SecretClient to try to retrieve the secret value from the elements
it is able to parse from the input value.
- value: string with pattern ``secret|https://${vault_name}.vault.azure.net/secrets/${secret_name}``
optional : after the value above, a secret version can be added ``/${secret_version}``
optional : after the value above, a secret key can be added ``|${secret_key}``
- vault_name: `Vault name of the secret manager <https://docs.microsoft.com/en-us/azure/key-vault/general/about-keys-secrets-certificates#objects-identifiers-and-versioning>`_
- secret_name: Name of the secret
- secret_version: ID of the version of the secret
- secret_key: Only if the secret's data is a JSON string, which key of the dict should be retrieve
:param value: a string that matches the following regex ``^secret|https://[a-zA-Z0-9-]{3,24}.vault.azure.net``
:return: a string with the value substituted by the secret from the Azure Key Vault store
:raises: ImportError, ValueError
"""
regex = re.compile(
r"^secret\|(https:\/\/[a-zA-Z0-9\-]{3,24}\.vault\.azure\.net)\/secrets\/([0-9a-zA-Z-]+)"
r"(?:\/([a-f0-9]{32}))?(?:\|([^\|]+))?$"
)
if not SecretClient:
logger.error(
"SecretClient is not installed, please install great_expectations with azure_secrets extra > "
"pip install great_expectations[azure_secrets]"
)
raise ImportError("Could not import SecretClient from azure.keyvault.secrets")
matches = regex.match(value)
if not matches:
raise ValueError(f"Could not match the value with regex {regex}")
keyvault_uri = matches.group(1)
secret_name = matches.group(2)
secret_version = matches.group(3)
secret_key = matches.group(4)
credential = DefaultAzureCredential()
client = SecretClient(vault_url=keyvault_uri, credential=credential)
secret = client.get_secret(name=secret_name, version=secret_version).value
if secret_key:
secret = json.loads(secret)[secret_key]
return secret
def substitute_all_config_variables(
data, replace_variables_dict, dollar_sign_escape_string: str = r"\$"
):
"""
Substitute all config variables of the form ${SOME_VARIABLE} in a dictionary-like
config object for their values.
The method traverses the dictionary recursively.
:param data:
:param replace_variables_dict:
:return: a dictionary with all the variables replaced with their values
"""
if isinstance(data, DataContextConfig):
data = DataContextConfigSchema().dump(data)
if isinstance(data, CheckpointConfig):
data = CheckpointConfigSchema().dump(data)
if isinstance(data, dict) or isinstance(data, OrderedDict):
return {
k: substitute_all_config_variables(v, replace_variables_dict)
for k, v in data.items()
}
elif isinstance(data, list):
return [
substitute_all_config_variables(v, replace_variables_dict) for v in data
]
return substitute_config_variable(
data, replace_variables_dict, dollar_sign_escape_string
)
def file_relative_path(dunderfile, relative_path):
"""
This function is useful when one needs to load a file that is
relative to the position of the current file. (Such as when
you encode a configuration file path in source file and want
in runnable in any current working directory)
It is meant to be used like the following:
file_relative_path(__file__, 'path/relative/to/file')
H/T https://github.com/dagster-io/dagster/blob/8a250e9619a49e8bff8e9aa7435df89c2d2ea039/python_modules/dagster/dagster/utils/__init__.py#L34
"""
return os.path.join(os.path.dirname(dunderfile), relative_path)
def parse_substitution_variable(substitution_variable: str) -> Optional[str]:
"""
Parse and check whether the string contains a substitution variable of the case insensitive form ${SOME_VAR} or $SOME_VAR
Args:
substitution_variable: string to be parsed
Returns:
string of variable name e.g. SOME_VAR or None if not parsable. If there are multiple substitution variables this currently returns the first e.g. $SOME_$TRING -> $SOME_
"""
substitution_variable_name = pp.Word(pp.alphanums + "_").setResultsName(
"substitution_variable_name"
)
curly_brace_parser = "${" + substitution_variable_name + "}"
non_curly_brace_parser = "$" + substitution_variable_name
both_parser = curly_brace_parser | non_curly_brace_parser
try:
parsed_substitution_variable = both_parser.parseString(substitution_variable)
return parsed_substitution_variable.substitution_variable_name
except pp.ParseException:
return None
def default_checkpoints_exist(directory_path: str) -> bool:
checkpoints_directory_path: str = os.path.join(
directory_path,
DataContextConfigDefaults.DEFAULT_CHECKPOINT_STORE_BASE_DIRECTORY_RELATIVE_NAME.value,
)
return os.path.isdir(checkpoints_directory_path)
class PasswordMasker:
"""
Used to mask passwords in Datasources. Does not mask sqlite urls.
Example usage
masked_db_url = PasswordMasker.mask_db_url(url)
where url = "postgresql+psycopg2://username:password@host:65432/database"
and masked_url = "postgresql+psycopg2://username:***@host:65432/database"
"""
MASKED_PASSWORD_STRING = "***"
@staticmethod
def mask_db_url(url: str, use_urlparse: bool = False, **kwargs) -> str:
"""
Mask password in database url.
Uses sqlalchemy engine parsing if sqlalchemy is installed, otherwise defaults to using urlparse from the stdlib which does not handle kwargs.
Args:
url: Database url e.g. "postgresql+psycopg2://username:password@host:65432/database"
use_urlparse: Skip trying to parse url with sqlalchemy and use urlparse
**kwargs: passed to create_engine()
Returns:
url with password masked e.g. "postgresql+psycopg2://username:***@host:65432/database"
"""
if sa is not None and use_urlparse is False:
engine = sa.create_engine(url, **kwargs)
return engine.url.__repr__()
else:
warnings.warn(
"SQLAlchemy is not installed, using urlparse to mask database url password which ignores **kwargs."
)
# oracle+cx_oracle does not parse well using urlparse, parse as oracle then swap back
replace_prefix = None
if url.startswith("oracle+cx_oracle"):
replace_prefix = {"original": "oracle+cx_oracle", "temporary": "oracle"}
url = url.replace(
replace_prefix["original"], replace_prefix["temporary"]
)
parsed_url = urlparse(url)
# Do not parse sqlite
if parsed_url.scheme == "sqlite":
return url
colon = ":" if parsed_url.port is not None else ""
masked_url = (
f"{parsed_url.scheme}://{parsed_url.username}:{PasswordMasker.MASKED_PASSWORD_STRING}"
f"@{parsed_url.hostname}{colon}{parsed_url.port or ''}{parsed_url.path or ''}"
)
if replace_prefix is not None:
masked_url = masked_url.replace(
replace_prefix["temporary"], replace_prefix["original"]
)
return masked_url
|
StarcoderdataPython
|
5005154
|
<gh_stars>1-10
import newrelic.api.external_trace
def instrument(module):
def url_query(graph_obj, method, path, *args, **kwargs):
return '/'.join([graph_obj.url, path])
newrelic.api.external_trace.wrap_external_trace(
module, 'GraphAPI._query', 'facepy', url_query)
#def url_method(graph_obj, path, *args, **kwargs):
#return '/'.join([graph_obj.url, path])
#newrelic.api.external_trace.wrap_external_trace(
#module, 'GraphAPI.get', 'facepy', url_method)
#newrelic.api.external_trace.wrap_external_trace(
#module, 'GraphAPI.post', 'facepy', url_method)
#newrelic.api.external_trace.wrap_external_trace(
#module, 'GraphAPI.delete', 'facepy', url_method)
#def url_search(graph_obj, path, *args, **kwargs):
#return '/'.join([graph_obj.url, 'search'])
#newrelic.api.external_trace.wrap_external_trace(
#module, 'GraphAPI.search', 'facepy', url_search)
|
StarcoderdataPython
|
11364455
|
<gh_stars>1-10
# Environment is not present in original assistive_gym library at https://github.com/Healthcare-Robotics/assistive-gym
from gym import spaces
import numpy as np
import pybullet as p
from .env import AssistiveEnv
from gym.utils import seeding
from collections import OrderedDict
import os
import time
reach_arena = (np.array([-.25, -.5, 1]), np.array([.6, .4, .2]))
default_orientation = p.getQuaternionFromEuler([0, 0, 0])
class ValveEnv(AssistiveEnv):
def __init__(self, robot_type='jaco', success_dist=.05, target_indices=None, session_goal=False, frame_skip=5,
capture_frames=False, stochastic=True, debug=False, min_error_threshold=np.pi / 16,
max_error_threshold=np.pi / 4, num_targets=None, use_rand_init_angle=True, term_cond=None,
term_thresh=20, preserve_angle=False, **kwargs):
super(ValveEnv, self).__init__(robot_type=robot_type, task='reaching', frame_skip=frame_skip, time_step=0.02,
action_robot_len=7, obs_robot_len=14)
obs_dim = 3 + 4 + 3 + 2 + 1 + 7 + 7
encoder_obs_dim = 3 + 2
if stochastic:
obs_dim += 3 # for valve pos
encoder_obs_dim += 3
self.observation_space = spaces.Box(-np.inf, np.inf, (obs_dim,), dtype=np.float32)
self.encoder_observation_space = spaces.Box(-np.inf, np.inf, (encoder_obs_dim,), dtype=np.float32)
self.num_targets = num_targets
self.success_dist = success_dist
self.debug = debug
self.stochastic = stochastic
self.goal_feat = ['target_angle'] # Just an FYI
self.feature_sizes = OrderedDict({'goal': 2})
self.session_goal = session_goal
self.use_rand_init_angle = use_rand_init_angle
if self.num_targets is not None:
self.target_angles = np.linspace(-np.pi, np.pi, self.num_targets, endpoint=False)
if not self.use_rand_init_angle:
self.target_angles = np.delete(self.target_angles, np.argwhere(self.target_angles == 0))
self.target_indices = np.arange(len(self.target_angles))
self.min_error_threshold = min_error_threshold
self.max_error_threshold = max_error_threshold
self.error_threshold = min_error_threshold
self.preserve_angle = preserve_angle
self.last_angle = None
self.wall_color = None
self.calibrate = False
self.term_cond = term_cond
self.term_thresh = term_thresh
self.n_success = 0 # number of consecutive steps in success condition
self.target_norm = .55
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
self.init_pos_random, _ = seeding.np_random(seed)
return [seed]
def step(self, action):
old_tool_pos = self.tool_pos
self.take_step(action, robot_arm='left', gains=self.config('robot_gains'), forces=self.config('robot_forces'))
obs = self._get_obs([0])
reward = np.exp(-np.abs(self.angle_diff(self.valve_angle, self.target_angle))) - 1
direction = np.zeros(3)
if self.task_success:
index = 0
self.n_success += 1
tracking_angle = self.valve_angle
else:
tracking_angle = self.valve_angle
if self.angle_diff(self.valve_angle, self.target_angle) > 0:
index = 1
tracking_angle = self.wrap_angle(tracking_angle - 2 * self.min_error_threshold)
else:
index = 2
tracking_angle = self.wrap_angle(tracking_angle + 2 * self.min_error_threshold)
self.n_success = 0
tracking_input = self.target_norm * np.array((-np.cos(tracking_angle), np.sin(tracking_angle))) + \
np.delete(self.valve_pos, 1)
direction[index] = 1
if self.n_success >= self.term_thresh:
color = [0, 1, 0, 1]
elif self.task_success:
color = [0, 0, 1, 1]
else:
color = [1, 0, 0, 1]
p.changeVisualShape(self.target_indicator, -1, rgbaColor=color)
info = {
'task_success': self.task_success,
'old_tool_pos': old_tool_pos,
'tool_pos': self.tool_pos,
'valve_pos': self.valve_pos,
'valve_angle': self.valve_angle,
'target_angle': self.target_angle,
'error_threshold': self.error_threshold,
'direction': direction,
'angle_error': self.angle_diff(self.valve_angle, self.target_angle),
'target_position': self.target_position,
'tracking_input': tracking_input
}
done = False
if self.term_cond == 'auto':
done = self.n_success >= self.term_thresh
elif self.term_cond == 'keyboard':
keys = p.getKeyboardEvents()
if self.n_success >= self.term_thresh and p.B3G_RETURN in keys and keys[p.B3G_RETURN] & p.KEY_WAS_TRIGGERED:
done = True
time.sleep(1)
info['feedback'] = True if done else -1
return obs, reward, done, info
def _get_obs(self, forces):
robot_joint_states = p.getJointStates(self.robot, jointIndices=self.robot_left_arm_joint_indices,
physicsClientId=self.id)
robot_joint_positions = np.array([x[0] for x in robot_joint_states])
robot_joint_velocities = np.array([x[1] for x in robot_joint_states])
angle_features = [np.sin(self.valve_angle), np.cos(self.valve_angle)]
obs = [self.tool_pos, self.tool_orient, self.tool_velocity,
angle_features, [self.valve_velocity],
robot_joint_positions, robot_joint_velocities
]
encoder_obs = [self.tool_pos, angle_features]
if self.stochastic:
obs.append(self.valve_pos)
encoder_obs.append(self.valve_pos)
robot_obs = dict(
raw_obs=np.concatenate(obs),
encoder_obs=np.concatenate(encoder_obs),
hindsight_goal=np.array([np.sin(self.valve_angle), np.cos(self.valve_angle)]),
goal=self.goal.copy(),
)
self.last_angle = self.valve_angle
return robot_obs
def update_curriculum(self, success):
if success:
self.error_threshold -= self.min_error_threshold
self.error_threshold = max(self.min_error_threshold, self.error_threshold)
else:
self.error_threshold += self.min_error_threshold
self.error_threshold = min(self.max_error_threshold, self.error_threshold)
def reset(self):
"""set up standard environment"""
self.setup_timing()
_human, self.wheelchair, self.robot, self.robot_lower_limits, self.robot_upper_limits, _human_lower_limits, \
_human_upper_limits, self.robot_right_arm_joint_indices, self.robot_left_arm_joint_indices, self.gender \
= self.world_creation.create_new_world(furniture_type='wheelchair', init_human=False,
static_human_base=True, human_impairment='random',
print_joints=False, gender='random')
self.robot_lower_limits = self.robot_lower_limits[self.robot_left_arm_joint_indices]
self.robot_upper_limits = self.robot_upper_limits[self.robot_left_arm_joint_indices]
self.reset_robot_joints()
wheelchair_pos, wheelchair_orient = p.getBasePositionAndOrientation(self.wheelchair, physicsClientId=self.id)
p.resetBasePositionAndOrientation(self.robot, np.array(wheelchair_pos) + np.array([-0.35, -0.3, 0.3]),
p.getQuaternionFromEuler([0, 0, -np.pi / 2.0], physicsClientId=self.id),
physicsClientId=self.id)
base_pos, base_orient = p.getBasePositionAndOrientation(self.robot, physicsClientId=self.id)
self.human_controllable_joint_indices = []
self.human_lower_limits = np.array([])
self.human_upper_limits = np.array([])
"""set up target and initial robot position"""
if not self.session_goal:
self.set_target_index() # instance override in demos
self.reset_noise()
self.init_robot_arm()
wall_collision = p.createCollisionShape(p.GEOM_BOX, halfExtents=[4, .1, 1])
wall_visual = p.createVisualShape(p.GEOM_BOX, halfExtents=[4, .1, 1], rgbaColor=self.wall_color)
wall_pos, wall_orient = np.array([0., -1.1, 1.]), np.array([0, 0, 0, 1])
if self.stochastic and not self.calibrate:
wall_pos = wall_pos + self.wall_noise
self.wall = p.createMultiBody(basePosition=wall_pos, baseOrientation=wall_orient,
baseCollisionShapeIndex=wall_collision, baseVisualShapeIndex=wall_visual,
physicsClientId=self.id)
valve_pos, valve_orient = p.multiplyTransforms(wall_pos, wall_orient, [0, 0.1, 0],
p.getQuaternionFromEuler([0, 0, 0]),
physicsClientId=self.id)
if self.stochastic:
valve_pos = np.array(valve_pos) + self.valve_pos_noise
self.valve = p.loadURDF(os.path.join(self.world_creation.directory, 'valve', 'valve.urdf'),
basePosition=valve_pos, useFixedBase=True,
baseOrientation=valve_orient, globalScaling=1,
physicsClientId=self.id)
if self.preserve_angle and self.last_angle is not None:
p.resetJointState(self.valve, 0, self.last_angle, physicsClientId=self.id)
elif self.use_rand_init_angle:
p.resetJointState(self.valve, 0, self.rand_init_angle, physicsClientId=self.id)
"""configure pybullet"""
p.setGravity(0, 0, 0, physicsClientId=self.id)
p.setPhysicsEngineParameter(numSubSteps=5, numSolverIterations=10, physicsClientId=self.id)
# Enable rendering
p.resetDebugVisualizerCamera(cameraDistance=.1, cameraYaw=180, cameraPitch=-10,
cameraTargetPosition=[0, -.3, 1.1], physicsClientId=self.id)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1, physicsClientId=self.id)
self.goal = np.array([np.sin(self.target_angle), np.cos(self.target_angle)])
sphere_visual = p.createVisualShape(shapeType=p.GEOM_SPHERE, radius=0.1,
rgbaColor=[1, 0, 0, 1], physicsClientId=self.id)
target_coord = self.target_norm * np.array((-np.cos(self.target_angle), 0, np.sin(self.target_angle))) + \
valve_pos + [0, 0.105, 0]
self.target_indicator = p.createMultiBody(baseMass=0.0, baseCollisionShapeIndex=-1,
baseVisualShapeIndex=sphere_visual, basePosition=target_coord,
useMaximalCoordinates=False, physicsClientId=self.id)
self.n_success = 0
obs = self._get_obs([0])
return obs
def init_start_pos(self):
"""exchange this function for curriculum"""
self.init_pos = np.array([0, -.5, 1.1])
self.init_pos += self.init_pos_random.uniform([-0.1, -0.1, -0.1], [0.1, 0.1, 0.1], size=3)
def init_robot_arm(self):
self.init_start_pos()
init_orient = p.getQuaternionFromEuler(np.array([0, np.pi / 2.0, 0]), physicsClientId=self.id)
self.util.ik_random_restarts(self.robot, 11, self.init_pos, init_orient, self.world_creation,
self.robot_left_arm_joint_indices, self.robot_lower_limits,
self.robot_upper_limits,
ik_indices=[0, 1, 2, 3, 4, 5, 6], max_iterations=100,
max_ik_random_restarts=10, random_restart_threshold=0.03, step_sim=True)
self.world_creation.set_gripper_open_position(self.robot, position=1, left=True, set_instantly=True)
self.tool = self.world_creation.init_tool(self.robot, mesh_scale=[0.001] * 3, pos_offset=[0, 0, 0.02],
orient_offset=p.getQuaternionFromEuler([0, -np.pi / 2.0, 0],
physicsClientId=self.id),
maximal=False)
def set_target_index(self, index=None):
if self.num_targets is not None:
if index is None:
self.target_index = self.np_random.choice(self.target_indices)
else:
self.target_index = index
def reset_noise(self):
self.rand_init_angle = (self.np_random.rand() - 0.5) * 2 * np.pi
# init angle either self.rand_init_angle or 0
if self.preserve_angle and self.last_angle is not None:
avoid = self.last_angle
elif self.use_rand_init_angle:
avoid = self.rand_init_angle
else:
avoid = 0
self.rand_angle = None
while self.rand_angle is None or np.abs(self.angle_diff(self.rand_angle, avoid)) < self.error_threshold:
self.rand_angle = (self.np_random.rand() - 0.5) * 2 * np.pi
if self.stochastic:
self.valve_pos_noise = np.array([self.np_random.uniform(-.05, .05), 0, 0])
# no y noise so can use 2D coordinates only for goal estimation
self.wall_noise = np.zeros(3)
def wrong_goal_reached(self):
return False
def calibrate_mode(self, calibrate, split):
self.wall_color = [255 / 255, 187 / 255, 120 / 255, 1] if calibrate else None
self.calibrate = calibrate
@property
def tool_pos(self):
return np.array(p.getBasePositionAndOrientation(self.tool, physicsClientId=self.id)[0])
@property
def tool_orient(self):
return np.array(p.getBasePositionAndOrientation(self.tool, physicsClientId=self.id)[1])
@property
def tool_velocity(self):
return np.array(p.getBaseVelocity(self.tool, physicsClientId=self.id)[0])
@property
def valve_pos(self):
return p.getLinkState(self.valve, 0, computeForwardKinematics=True, physicsClientId=self.id)[0]
@property
def valve_angle(self):
return self.wrap_angle(p.getJointStates(self.valve, jointIndices=[0], physicsClientId=self.id)[0][0])
@property
def valve_velocity(self):
return p.getJointStates(self.valve, jointIndices=[0], physicsClientId=self.id)[0][1]
@property
def target_angle(self):
return self.rand_angle if self.num_targets is None or not self.calibrate else \
self.wrap_angle(self.target_angles[self.target_index])
@property
def target_position(self):
return np.delete(np.array(p.getBasePositionAndOrientation(self.target_indicator, physicsClientId=self.id)[0]), 1)
def wrap_angle(self, angle):
return angle - 2 * np.pi * np.floor((angle + np.pi) / (2 * np.pi))
def angle_diff(self, angle1, angle2):
a = angle1 - angle2
if a > np.pi:
a -= 2 * np.pi
elif a < -np.pi:
a += 2 * np.pi
return a
@property
def task_success(self):
return np.abs(self.angle_diff(self.valve_angle, self.target_angle)) < self.error_threshold
class ValveJacoEnv(ValveEnv):
def __init__(self, **kwargs):
super().__init__(robot_type='jaco', **kwargs)
|
StarcoderdataPython
|
161467
|
"""
Routines for Fourier transform.
"""
from __future__ import division
from ..datatable.wrapping import wrap
from ..datatable import column
from . import waveforms, specfunc
import numpy as np
import numpy.fft as fft
def truncate_len_pow2(trace, truncate_power=None):
"""
Truncate trace length to the the nearest power of 2.
If `truncate_power` is not ``None``, it determines the minimal power of 2 that has to divide the length.
(if it is ``None``, than it's the maximal possible power).
"""
if truncate_power==0:
return trace
if truncate_power<0:
truncate_power=None
l=len(trace)
chunk_l=1
power=0
while chunk_l*2<=l:
chunk_l=chunk_l*2
power=power+1
if truncate_power is not None and power>=truncate_power:
break
l=(l//chunk_l)*chunk_l
return wrap(trace).t[:l]
def normalize_fourier_transform(ft, normalization="none"):
"""
Normalize the Fourier transform data.
`ft` is a 2D data with 2 columns: frequency and complex amplitude.
`normalization` can be ``'none'`` (none done), ``'sum'`` (the power sum is preserved: ``sum(abs(ft)**2)==sum(abs(trace)**2)``)
or ``'density'`` (power spectral density normalization).
"""
l=len(ft)
if normalization=="sum":
ft=wrap(ft).copy()
ft[:,1]=ft[:,1]/np.sqrt(l)
ft=ft.cont
elif normalization=="density" or normalization=="dBc":
ft=wrap(ft).copy()
norm=np.sqrt(l**2*abs(ft[1,0]-ft[0,0]))
if normalization=="dBc":
norm=norm*ft[len(ft)//2,1]/l
ft[:,1]=ft[:,1]/norm
ft=ft.cont
elif normalization!="none":
raise ValueError("unrecognized normalization mode: {0}".format(normalization))
return ft
def apply_window(trace_values, window="rectangle", window_power_compensate=True):
"""
Apply FT window to the trace.
If ``window_power_compensate==True``, multiply the data is multiplied by a compensating factor to preserve power in the spectrum.
"""
if window=="rectangle":
return trace_values
window=specfunc.get_window_func(window)
window_trace=window(np.arange(len(trace_values)),len(trace_values),ft_compensated=window_power_compensate)
return trace_values*window_trace
def fourier_transform(trace, truncate=False, truncate_power=None, normalization="none", no_time=False, single_sided=False, window="rectangle", window_power_compensate=True):
"""
Calculate a fourier transform of the trace.
Args:
trace: Time trace to be transformed. Either an ``Nx2`` array, where ``trace[:,0]`` is time and ``trace[:,1]`` is data (real or complex),
or an ``Nx3`` array, where ``trace[:,0]`` is time, ``trace[:,1]`` is the real part of the signal and ``trace[:,2]`` is the imaginary part.
truncate (bool): If ``True``, cut the data to the power of 2.
truncate_power: If ``None``, cut to the nearest power of 2; otherwise, cut to the largest possible length that divides ``2**truncate_power``.
Only relevant if ``truncate==True``.
normalization (str): Fourier transform normalization:
- ``'none'``: no normalization;
- ``'sum'``: then norm of the data is conserved (``sum(abs(ft[:,1])**2)==sum(abs(trace[:,1])**2)``);
- ``'density'``: power spectral density normalization, in ``x/rtHz`` (``sum(abs(ft[:,1])**2)*df==mean(abs(trace[:,1])**2)``);
- ``'dBc'``: like ``'density'``, but normalized to the mean trace value.
no_time (bool): If ``True``, assume that the time axis is missing and use the standard index instead (if trace is 1D data, `no_time` is always ``True``).
single_sided (bool): If ``True``, only leave positive frequency side of the transform.
window (str): FT window. Can be ``'rectangle'`` (essentially, no window), ``'hann'`` or ``'hamming'``.
window_power_compensate (bool): If ``True``, the data is multiplied by a compensating factor to preserve power in the spectrum.
Returns:
a two-column array, where the first column is frequency, and the second is complex FT data.
"""
wrapped=wrap(trace)
column_names=["frequency","ft_data"]
if trace.ndim==1:
trace_values=wrapped[:]
else:
if wrapped.shape()[1]==(1 if no_time else 2):
trace_values=wrapped[:,-1]
elif wrapped.shape()[1]==(2 if no_time else 3):
trace_values=wrapped[:,-2]+1j*wrapped[:,-1]
else:
raise ValueError("fourier_transform doesn't work for an array with shape {0}".format(wrapped.shape()))
dt=1. if (no_time or wrapped.ndim()==1) else wrapped[1,0]-wrapped[0,0]
if len(trace_values)==0:
return wrapped.from_array(np.zeros((0,2)),column_names,wrapped=False)
if len(trace_values)==1:
return wrapped.from_array(np.array([[0,trace_values[0]]]),column_names,wrapped=False)
if truncate:
trace_values=truncate_len_pow2(trace_values,truncate_power=truncate_power)
trace_values=apply_window(trace_values,window,window_power_compensate=window_power_compensate)
ft=fft.fftshift(fft.fft(trace_values))
df=1./(dt*len(ft))
frequencies=column.crange(-len(ft)/2.,len(ft)/2.)*df
ft=wrapped.from_columns([frequencies.as_array(),ft],column_names,wrapped=False) if wrapped.ndim()>1 else np.column_stack((frequencies,ft))
ft=normalize_fourier_transform(ft,normalization)
if single_sided:
ft=wrap(ft).t[len(ft)//2:,:]
ft[0,0]=0 # numerical error compensation
return ft
def flip_fourier_transform(ft):
"""
Flip the fourier transform (analogous to making frequencies negative and flipping the order).
"""
ft=wrap(ft).copy()
if len(ft)%2==1:
ft[:,1]=ft[::-1,1]
else:
ft[1::,1]=ft[:0:-1,1]
return ft.cont
def inverse_fourier_transform(ft, truncate=False, truncate_power=None, no_freq=False, zero_loc=None, symmetric_time=False):
"""
Calculate an inverse fourier transform of the trace.
Args:
ft: Fourier transform data to be inverted. Is an ``Nx2`` array, where ``ft[:,0]`` is frequency and ``ft[:,1]`` is fourier transform (real or complex).
truncate (bool): If ``True``, cut the data to the power of 2.
truncate_power: If ``None``, cut to the nearest power of 2; otherwise, cut to the largest possible length that divides ``2**truncate_power``.
Only relevant if ``truncate==True``.
no_freq (bool): If ``True``, assume that the frequency axis is missing and use the standard index instead (if trace is 1D data, `no_freq` is always ``True``).
zero_loc (bool): Location of the zero frequency point. Can be ``None`` (the one with the value of f-axis closest to zero), ``'center'`` (mid-point)
or an integer index.
symmetric_time (bool): If ``True``, make time axis go from ``(-0.5/df, 0.5/df)`` rather than ``(0, 1./df)``.
Returns:
a two-column array, where the first column is frequency, and the second is the complex-valued trace data.
"""
wrapped=wrap(ft)
column_names=["time","data"]
if len(ft)==0:
return wrapped.from_array(np.zeros((0,2)),column_names,wrapped=False)
if len(ft)==1:
return wrapped.from_array(np.array([[0,wrapped[:,0]]]),column_names,wrapped=False)
no_freq=no_freq or wrapped.ndim()==1
if zero_loc is None:
if no_freq:
zero_freq_point=0
else:
zero_freq_point=waveforms.find_closest_arg(wrapped.c[0],0,ordered=True)
if zero_freq_point is None:
raise ValueError("can't find zero frequency point; closest is {0}".format(wrapped[zero_freq_point,0]))
elif zero_loc=="center":
zero_freq_point=len(ft)//2
else:
zero_freq_point=zero_loc
if wrapped.ndim()==1:
ft_ordered=np.concatenate(( wrapped[zero_freq_point:], wrapped[:zero_freq_point] ))
else:
ft_ordered=np.concatenate(( wrapped[zero_freq_point:,-1], wrapped[:zero_freq_point,-1] ))
if truncate:
ft_ordered=truncate_len_pow2(ft_ordered,truncate_power=truncate_power)
trace=fft.ifft(ft_ordered)
l=len(trace)
df=1. if no_freq else wrapped[1,0]-wrapped[0,0]
dt=1./(df*l)
times=column.crange(len(ft))*dt
if symmetric_time:
times=times-times[l//2]
trace=np.concatenate((trace[l//2:],trace[:l//2]))
if wrapped.ndim()==1:
return np.column_stack((times,trace))
else:
return wrapped.from_columns([times.as_array(),trace],column_names,wrapped=False)
def power_spectral_density(trace, truncate=False, truncate_power=None, normalization="density", no_time=False, single_sided=False, window="rectangle", window_power_compensate=True):
"""
Calculate a power spectral density of the trace.
Args:
trace: Time trace to be transformed. Either an ``Nx2`` array, where ``trace[:,0]`` is time and ``trace[:,1]`` is data (real or complex),
or an ``Nx3`` array, where ``trace[:,0]`` is time, ``trace[:,1]`` is the real part of the signal and ``trace[:,2]`` is the imaginary part.
truncate (bool): If ``True``, cut the data to the power of 2.
truncate_power: If ``None``, cut to the nearest power of 2; otherwise, cut to the largest possible length that divides ``2**truncate_power``.
Only relevant if ``truncate==True``.
normalization (str): Fourier transform normalization:
- ``'none'``: no normalization;
- ``'sum'``: then norm of the data is conserved (``sum(PSD[:,1]))==sum(abs(trace[:,1])**2)``);
- ``'density'``: power spectral density normalization, in ``x/rtHz`` (``sum(PSD[:,1])*df==mean(abs(trace[:,1])**2)``);
- ``'dBc'``: like ``'density'``, but normalized to the mean trace value.
no_time (bool): If ``True``, assume that the time axis is missing and use the standard index instead (if trace is 1D data, `no_time` is always ``True``).
single_sided (bool): If ``True``, only leave positive frequency side of the PSD.
window (str): FT window. Can be ``'rectangle'`` (essentially, no window), ``'hann'`` or ``'hamming'``.
window_power_compensate (bool): If ``True``, the data is multiplied by a compensating factor to preserve power in the spectrum.
Returns:
a two-column array, where the first column is frequency, and the second is positive PSD.
"""
column_names=["frequency","PSD"]
ft=fourier_transform(trace, truncate=truncate, truncate_power=truncate_power, normalization=normalization, no_time=no_time, single_sided=single_sided, window=window, window_power_compensate=window_power_compensate)
wrapped=wrap(ft)
PSD=wrapped.from_columns((wrapped.c[0].real,abs(wrapped.c[1])**2),column_names,wrapped=False)
return PSD
def get_real_part(ft):
"""
Get the fourier transform of the real part only from the fourier transform of a complex variable.
"""
re_ft=wrap(ft).copy()
re_ft[1:,1]=(ft[1:,1]+ft[:0:-1,1].conjugate())*0.5
re_ft[0,1]=np.real(ft[0,1])
return re_ft.cont
def get_imag_part(ft):
"""
Get the fourier transform of the imaginary part only from the fourier transform of a complex variable.
"""
im_ft=wrap(ft).copy()
im_ft[1:,1]=(im_ft[1:,1]-im_ft[:0:-1,1].conjugate())/2.j
im_ft[0,1]=im_ft[0,1].imag
return im_ft.cont
def get_correlations(ft_a, ft_b, zero_mean=True, normalization="none"):
"""
Calculate the correlation function of the two variables given their fourier transforms.
Args:
ft_a: first variable fourier transform
ft_b: second variable fourier transform
zero_mean (bool): If ``True``, the value corresponding to the zero frequency is set to zero (only fluctuations around means of a and b are calculated).
normalization (str): Can be ``'whole'`` (correlations are normalized by product of PSDs derived from `ft_a` and `ft_b`)
or ``'individual'`` (normalization is done for each frequency individually, so that the absolute value is always 1).
"""
if len(ft_a)!=len(ft_b):
raise ValueError("transforms should be of the same length")
corr=ft_a.copy()
corr[:,1]=corr[:,1]*ft_b[:,1].conjugate()
if (zero_mean):
corr[len(corr)/2,1]=0.
if normalization=="whole":
norm_a=(abs(ft_a[:,1])**2).sum()-abs(ft_a[len(ft_a)/2,1])**2
norm_b=(abs(ft_b[:,1])**2).sum()-abs(ft_b[len(ft_b)/2,1])**2
corr[:,1]=corr[:,1]/(norm_a*norm_b)**.5
elif normalization=="individual":
norm_factors=abs(ft_a[:,1]*ft_b[:,1])
corr[:,1]=corr[:,1]/norm_factors
elif normalization!="none":
raise ValueError("unrecognized normalization method: {0}".format(normalization))
return corr
|
StarcoderdataPython
|
11257989
|
<gh_stars>0
from .models import Profile,Business
from django import forms
from django.forms import ModelForm
class NewProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = ['user']
class NewbusinessForm(forms.ModelForm):
class Meta:
model = Business
exclude = ['user','description']
|
StarcoderdataPython
|
196170
|
from app import api
from app.controller.soal import Soal
from app.controller.soal import Jawab
api.add_resource(Soal,'/soal')
api.add_resource(Jawab,'/soal/jawab')
|
StarcoderdataPython
|
11279058
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import time
import urllib2
from threathunter_common.metrics.influxdbproxy import _extract_metrics_params, get_metrics
from threathunter_common.metrics.metricsagent import MetricsAgent
from threathunter_common.metrics.redismetrics import RedisMetrics
__author__ = "nebula"
def test_extract_params():
print _extract_metrics_params("/db/Monitor/series?p=test&q=select+event%2C+count%28event%29+from+%22Event%22+where+time+%3E+now%28%29-1h+group+by+time%281m%29%2C+event+order+asc&u=root")
print _extract_metrics_params("/db/Monitor/series?p=test&q=select mean(value) from \"test_metrics\" where time > 1438333217779ms and time < 1438333277779ms and (\"tag1\" = 'tag1' or \"tag1\" = 'tag2') group by time(60s)")
print _extract_metrics_params("/db/Monitor/series?p=test&q=select+sum(sum_count)+from+%22auth_pv%22+where+time+%3E+now()-1h+and+hit%3D1+and+qtype%3D%27mobile%27+group+by+time(15m)+fill(0)+order+asc")
print _extract_metrics_params("/db/Monitor/series?p=test&q=select+source_mark,+sum(sum_count)+from+%22crawl_mobile%22+where+time+%3E+now()-1h+group+by+time(10m),+source_mark+order+asc")
def test_redis():
MetricsAgent.get_instance().initialize_by_dict({"redis": {"type": "redis", "host": "localhost", "port": "6379"}}, "redis")
MetricsAgent.get_instance().clear("test", "test")
MetricsAgent.get_instance().add_metrics("test", "test", {"tag1": "tag1"}, 1.0, 60)
MetricsAgent.get_instance().add_metrics("test", "test", {"tag1": "tag2"}, 3.0, 60)
time.sleep(1)
result = get_metrics("/db/test/series?p=test&q=select sum(value) from \"test\" where time > now()-1h and (\"tag1\" = 'tag1' or \"tag1\" = 'tag2') group by time(60s)")
print result
assert result[0]["points"][0][1] == 4.0
def test_influxdb():
MetricsAgent.get_instance().initialize_by_dict({"influxdb": {"type": "influxdb", "url": "http://127.0.0.1:8086/", "username": "test", "password": "<PASSWORD>"}}, "influxdb")
MetricsAgent.get_instance().clear("test", "test")
MetricsAgent.get_instance().add_metrics("test", "test", {"tag1": "tag1"}, 1.0, 60)
MetricsAgent.get_instance().add_metrics("test", "test", {"tag1": "tag2"}, 3.0, 60)
time.sleep(1)
result = get_metrics("/db/test/series?p=test&q=select sum(value) from \"test\" where time > now()-1h and (\"tag1\" = 'tag1' or \"tag1\" = 'tag2') group by time(60s)")
print result
assert result[0]["points"][0][1] == 4.0
def test_proxy():
MetricsAgent.get_instance().initialize_by_dict({"influxdb": {"type": "influxdb", "url": "http://127.0.0.1:8086/", "username": "test", "password": "<PASSWORD>"}}, "influxdb")
MetricsAgent.get_instance().clear("test", "test")
MetricsAgent.get_instance().add_metrics("test", "test", {"tag1": "tag1"}, 1.0, 60)
MetricsAgent.get_instance().add_metrics("test", "test", {"tag1": "tag2"}, 3.0, 60)
time.sleep(1)
url = "http://127.0.0.1:8086/db/test/series?p=test&q=select%20sum(value)%20from%20test%20where%20time%20%3E%20now()-1h%20and%20(tag1%20=%20%27tag1%27%20or%20tag1%20=%20%27tag2%27)%20group%20by%20time(60s)&u=root"
original_result = json.loads(urllib2.urlopen(url).read())
proxy_result = get_metrics(url)
print original_result
print proxy_result
assert original_result == proxy_result
|
StarcoderdataPython
|
1726500
|
from math import log2
def differentRightmostBit(n, m):
return 2**log2((n^m)&-(n^m))
if __name__ == '__main__':
input0 = [11, 7, 1, 64, 1073741823, 42]
input1 = [13, 23, 0, 65, 1071513599, 22]
expectedOutput = [2, 16, 1, 1, 131072, 4]
assert len(input0) == len(expectedOutput), '# input0 = {}, # expectedOutput = {}'.format(len(input0), len(expectedOutput))
assert len(input1) == len(expectedOutput), '# input1 = {}, # expectedOutput = {}'.format(len(input1), len(expectedOutput))
for i, expected in enumerate(expectedOutput):
actual = differentRightmostBit(input0[i], input1[i])
assert actual == expected, 'differentRightmostBit({}, {}) returned {}, but expected {}'.format(input0[i], input1[i], actual, expected)
print('PASSES {} out of {} tests'.format(len(expectedOutput), len(expectedOutput)))
|
StarcoderdataPython
|
9611451
|
<gh_stars>1-10
import matplotlib.pyplot as plt
import shapefile as shp
import EFD
coeffList = []
# use a fixed no of harmonics
MaxHarmonic = 17
sf = shp.Reader('/home/sgrieve/Hollow_Processing_Files/Mid_Hollows.shp')
# below here is the real processing of the shapes, above is data i/o
# loop over individual polygons in a multipart shapefile
for shaperec in sf.shapeRecords():
# Convert the shape instance into a format that EFD can use
x, y, contour, NormCentroid = EFD.ProcessGeometry(shaperec)
# Compute the final coefficients using the required number of harmonics and
# normalize them
coeffs = EFD.CalculateEFD(x, y, MaxHarmonic)
coeffs = EFD.normalize_efd(coeffs)
coeffList.append(coeffs)
avg = EFD.AverageCoefficients(coeffList, MaxHarmonic)
sd = EFD.AverageSD(coeffList, avg, MaxHarmonic)
a, b = EFD.inverse_transform(avg, harmonic=MaxHarmonic)
c, d = EFD.inverse_transform(avg + sd, harmonic=MaxHarmonic)
e, f = EFD.inverse_transform(avg - sd, harmonic=MaxHarmonic)
# below here is the plotting of an EFD average, with +/- 1 std dev error bounds
ax = EFD.InitPlot()
EFD.PlotEllipse(ax, a, b, 'k', 2.)
EFD.PlotEllipse(ax, c, d, 'r', 0.5)
EFD.PlotEllipse(ax, e, f, 'b', 0.5)
plt.show()
|
StarcoderdataPython
|
3321526
|
<filename>test.py<gh_stars>1-10
import logging
import sys
from huawei_connector import HuaweiTelnet
import yaml
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
with open('test_model.yaml') as stream:
model = yaml.unsafe_load(stream)
huawei_telnet = HuaweiTelnet(host=model['host'],
port=model['port'],
user=model['user'],
pwd=model['<PASSWORD>'],
time_delta=model['time_delta'],
pre_cmd=model['pre_cmd'],
post_cmd=model['post_cmd'])
huawei_telnet.login(network_element='MSC')
huawei_telnet.logout()
|
StarcoderdataPython
|
5053534
|
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from armada import const
from armada.exceptions.helm_exceptions import HelmCommandException
from armada.handlers import helm
from armada.handlers import test
from armada.tests.unit import base
class TestHandlerTestCase(base.ArmadaTestCase):
def _test_test_release_for_success(self, expected_success, exception):
@mock.patch('armada.handlers.helm.K8s')
def do_test(_):
helm_obj = helm.Helm()
release = 'release'
helm_obj.test_release = mock.Mock()
if exception:
helm_obj.test_release.side_effect = exception
test_handler = test.Test({}, release, helm_obj)
success = test_handler.test_release_for_success()
self.assertEqual(expected_success, success)
do_test()
def test_success(self):
self._test_test_release_for_success(True, None)
def test_failure(self):
self._test_test_release_for_success(
False, HelmCommandException(mock.Mock()))
def test_exception(self):
def test():
self._test_test_release_for_success(False, Exception())
self.assertRaises(Exception, test)
def test_cg_disabled(self):
"""Test that tests are disabled when a chart group disables all
tests.
"""
test_handler = test.Test(
chart={},
release_id=helm.HelmReleaseId('release_ns', 'release'),
helm=mock.Mock(),
cg_test_charts=False)
assert test_handler.test_enabled is False
def test_cg_disabled_test_key_enabled(self):
"""Test that tests are enabled when a chart group disables all
tests and the deprecated, boolean `test` key is enabled.
"""
test_handler = test.Test(
chart={'test': True},
release_id=helm.HelmReleaseId('release_ns', 'release'),
helm=mock.Mock(),
cg_test_charts=False)
assert test_handler.test_enabled is True
def test_cg_disabled_test_values_enabled(self):
"""Test that tests are enabled when a chart group disables all
tests and the `test.enabled` key is False.
"""
test_handler = test.Test(
chart={'test': {
'enabled': True
}},
release_id=helm.HelmReleaseId('release_ns', 'release'),
helm=mock.Mock(),
cg_test_charts=False)
assert test_handler.test_enabled is True
def test_cg_enabled_test_key_disabled(self):
"""Test that tests are disabled when a chart group enables all
tests and the deprecated, boolean `test` key is disabled.
"""
test_handler = test.Test(
chart={'test': False},
release_id=helm.HelmReleaseId('release_ns', 'release'),
helm=mock.Mock(),
cg_test_charts=True)
assert test_handler.test_enabled is False
def test_cg_enabled_test_values_disabled(self):
"""Test that tests are disabled when a chart group enables all
tests and the deprecated, boolean `test` key is disabled.
"""
test_handler = test.Test(
chart={'test': {
'enabled': False
}},
release_id=helm.HelmReleaseId('release_ns', 'release'),
helm=mock.Mock(),
cg_test_charts=True)
assert test_handler.test_enabled is False
def test_enable_all_cg_disabled(self):
"""Test that tests are enabled when the `enable_all` parameter is
True and the chart group `test_enabled` key is disabled.
"""
test_handler = test.Test(
chart={},
release_id=helm.HelmReleaseId('release_ns', 'release'),
helm=mock.Mock(),
cg_test_charts=False,
enable_all=True)
assert test_handler.test_enabled is True
def test_enable_all_test_key_disabled(self):
"""Test that tests are enabled when the `enable_all` parameter is
True and the deprecated, boolean `test` key is disabled.
"""
test_handler = test.Test(
chart={'test': True},
release_id=helm.HelmReleaseId('release_ns', 'release'),
helm=mock.Mock(),
enable_all=True)
assert test_handler.test_enabled is True
def test_enable_all_test_values_disabled(self):
"""Test that tests are enabled when the `enable_all` parameter is
True and the `test.enabled` key is False.
"""
test_handler = test.Test(
chart={'test': {
'enabled': False
}},
release_id=helm.HelmReleaseId('release_ns', 'release'),
helm=mock.Mock(),
enable_all=True)
assert test_handler.test_enabled is True
def test_deprecated_test_key_false(self):
"""Test that tests can be disabled using the deprecated, boolean value
for a chart's test key.
"""
test_handler = test.Test(
chart={'test': False},
release_id=helm.HelmReleaseId('release_ns', 'release'),
helm=mock.Mock())
assert not test_handler.test_enabled
def test_deprecated_test_key_timeout(self):
"""Test that the default helm timeout is used when tests are enabled
using the deprecated, boolean value for a chart's `test` key.
"""
mock_helm = mock.Mock()
test_handler = test.Test(
chart={'test': True},
release_id=helm.HelmReleaseId('release_ns', 'release'),
helm=mock_helm)
assert test_handler.timeout == const.DEFAULT_TEST_TIMEOUT
def test_tests_disabled(self):
"""Test that tests are disabled by a chart's values using the
`test.enabled` path.
"""
test_handler = test.Test(
chart={'test': {
'enabled': False
}},
release_id=helm.HelmReleaseId('release_ns', 'release'),
helm=mock.Mock())
assert test_handler.test_enabled is False
def test_no_test_values(self):
"""Test that the default values are enforced when no chart `test`
values are provided (i.e. tests are enabled).
"""
test_handler = test.Test(
chart={},
release_id=helm.HelmReleaseId('release_ns', 'release'),
helm=mock.Mock())
assert test_handler.test_enabled is True
def test_default_timeout_value(self):
"""Test that the default timeout value is used if a test timeout value,
`test.timeout` is not provided.
"""
test_handler = test.Test(
chart={'test': {
'enabled': True
}},
release_id=helm.HelmReleaseId('release_ns', 'release'),
helm=mock.Mock())
assert test_handler.timeout == helm.DEFAULT_HELM_TIMEOUT
def test_timeout_value(self):
"""Test that a chart's test timeout value, `test.timeout` overrides the
default test timeout.
"""
chart = {'test': {'enabled': True, 'timeout': 800}}
test_handler = test.Test(
chart=chart,
release_id=helm.HelmReleaseId('release_ns', 'release'),
helm=mock.Mock())
assert test_handler.timeout is chart['test']['timeout']
|
StarcoderdataPython
|
338474
|
<filename>gp_lib/kernels.py
import numpy as np
import scipy as sp
import scipy.spatial
from functools import reduce
class Kernel(object):
def __call__(self, x, y):
"""
Returns
-------
kernel: m x n array
"""
raise NotImplementedError
def trace_x_x(self, x):
"""
Returns tr(k(x, x))
-------
trace: scalar
"""
raise NotImplementedError
def get_theta(self):
"""
Returns
-------
theta: p-length array of kernel hyperparameters
"""
raise NotImplementedError
def set_theta(self, theta):
"""
Parameters
----------
theta: p-length array of kernel hyperparameters
"""
raise NotImplementedError
def jacobian(self):
"""
Returns
-------
jacobian: p x m x m array of kernel gradient with respect to each of the p parameters
"""
raise NotImplementedError
class SumKernel(Kernel):
"""
Sum of multiple kernels.
Parameters
----------
kernels: list of Kernel objects
"""
def __init__(self, kernels):
self.kernels = kernels
self.n_parameters = sum([k.n_parameters for k in kernels])
def __call__(self, x, y):
return np.sum([k(x, y) for k in self.kernels], axis=0)
def __repr__(self):
return "(" + " + ".join([k.__repr__() for k in self.kernels]) + "])" + ")"
def trace_x_x(self, x):
return np.sum([k.trace_x_x(x) for k in self.kernels])
def get_theta(self):
return np.hstack([k.get_theta() for k in self.kernels if k.n_parameters > 0])
def set_theta(self, theta):
ptr = 0
for k in filter(lambda k: k.n_parameters > 0, self.kernels):
k.set_theta(theta[ptr:ptr + k.n_parameters])
ptr += k.n_parameters
def jacobian(self):
return np.vstack([k.jacobian() for k in self.kernels if k.n_parameters > 0])
class ProductKernel(Kernel):
"""
Product of multiple kernels.
Parameters
----------
kernels: list of Kernel objects
"""
def __init__(self, kernels):
self.kernels = kernels
self.n_parameters = sum([k.n_parameters for k in kernels])
self.cache = {}
def __call__(self, x, y):
self.cache["k"] = np.prod([k(x, y) for k in self.kernels], axis=0)
return self.cache["k"]
def __repr__(self):
return "(" + " * ".join([k.__repr__() for k in self.kernels]) + "])" + ")"
def trace_x_x(self, x):
return np.prod([k.trace_x_x(x) for k in self.kernels])
def get_theta(self):
return np.hstack([k.get_theta() for k in self.kernels if k.n_parameters > 0])
def set_theta(self, theta):
ptr = 0
for k in filter(lambda k: k.n_parameters > 0, self.kernels):
k.set_theta(theta[ptr:ptr + k.n_parameters])
ptr += k.n_parameters
def jacobian(self):
return np.vstack([self.cache["k"] / (k.cache["k"] + 1e-4) * k.jacobian() \
for k in self.kernels if k.n_parameters > 0])
class WhiteKernel(Kernel):
"""
White noise kernel.
k(x, y) = 1{x == y} * c
Parameters
----------
c: float
"""
n_parameters = 1
def __init__(self, c=1.0):
self.c = c
self.cache = {}
def __call__(self, x, y):
if x.shape != y.shape:
self.cache["k"] = np.zeros((len(x), len(y)))
else:
self.cache["k"] = np.diag(np.product(x == y, axis=1)) * self.c
return self.cache["k"]
def __repr__(self):
return f"WhiteKernel({self.c:.2f})"
def trace_x_x(self, x):
return x.shape[0] * self.c
def get_theta(self):
return np.array([np.log(self.c)])
def set_theta(self, theta):
self.c = np.exp(theta.squeeze())
def jacobian(self):
return np.array([self.cache["k"]])
class ConstantKernel(Kernel):
"""
Constant kernel.
k(x, y) = c
Parameters
----------
c: float
"""
n_parameters = 1
def __init__(self, c=1.0):
self.c = c
self.cache = {}
def __call__(self, x, y):
self.cache["k"] = np.ones((len(x), len(y))) * self.c
return self.cache["k"]
def __repr__(self):
return f"ConstantKernel({self.c:.2f})"
def trace_x_x(self, x):
return x.shape[0] * self.c
def get_theta(self):
return np.array([np.log(self.c)])
def set_theta(self, theta):
self.c = np.exp(theta.squeeze())
def jacobian(self):
return np.array([self.cache["k"]])
class SEKernel(Kernel):
"""
Squared exponential kernel.
k(x, y) = exp(-0.5 || x - y ||² / l²)
[ Equation (2.16), <NAME>, 2006 ]
Parameters
----------
length_scale: float
dims: variable-length array of indices to specify which dimensions to include in calculation;
if None, default to all dimensions
"""
n_parameters = 1
def __init__(self, length_scale=1.0, dims=None):
self.length_scale = length_scale
self.dims = dims
self.cache = {}
def __call__(self, x, y):
i = self.dims if self.dims is not None else np.arange(x.shape[1])
dists = sp.spatial.distance.cdist(x[:,i], y[:,i], metric="sqeuclidean")
self.cache["k"] = np.exp(-0.5 * dists / self.length_scale)
return self.cache["k"]
def __repr__(self):
return f"SEKernel(length_scale={self.length_scale:.2f}, " \
f"dims={self.dims})"
def trace_x_x(self, x):
return x.shape[0]
def get_theta(self):
return np.array([np.log(self.length_scale)])
def set_theta(self, theta):
self.length_scale = np.exp(theta[0])
def jacobian(self):
return np.array([-self.cache["k"] * np.log(self.cache["k"] + 1e-4)])
class AnisotropicSEKernel(Kernel):
"""
Anisotropic squared exponential kernel.
Parameters
----------
length_scale: p-length array
dims: variable-length array of indices to specify which dimensions to include in calculation;
if None, default to all dimensions
"""
def __init__(self, length_scale=None):
self.length_scale = length_scale
self.length_scale_ext = self.length_scale[:, np.newaxis, np.newaxis]
self.n_parameters = len(length_scale)
self.cache = {}
def __call__(self, x, y):
self.cache["dists"] = np.stack([(x[:, i, np.newaxis] - y[:, i, np.newaxis].T) ** 2 \
for i in range(self.n_parameters)])
self.cache["k"] = np.exp(-0.5 * (self.cache["dists"] / self.length_scale_ext).sum(axis=0))
return self.cache["k"]
def __repr__(self):
return f"AnisotropicSEKernel(length_scale={np.array2string(self.length_scale, precision=1)})"
def trace_x_x(self, x):
return x.shape[0]
def get_theta(self):
return np.log(self.length_scale)
def set_theta(self, theta):
self.length_scale = np.exp(theta) + 1e-4
self.length_scale_ext = self.length_scale[:, np.newaxis, np.newaxis]
def jacobian(self):
return 0.5 * self.cache["k"] * self.cache["dists"] / self.length_scale_ext
class DotProductKernel(Kernel):
"""
Dot product kernel.
k(x, y) = xᵀy
Parameters
----------
dims: variable-length array of indices to specify which dimensions to include in calculation;
if None, default to all dimensions
"""
n_parameters = 0
def __init__(self, dims=None):
self.dims = dims
def __call__(self, x, y):
i = self.dims if self.dims is not None else np.arange(x.shape[1])
return np.dot(x[:,i], y[:,i].T)
def __repr__(self):
return f"DotProductKernel(sigma_sq={self.sigma_sq:.2f}, " \
f"dims={self.dims})"
def trace_x_x(self, x):
return np.sum(x ** 2)
def get_theta(self):
raise ValueError("DotProductKernel takes no parameters.")
def set_theta(self, theta):
raise ValueError("DotProductKernel takes no parameters.")
def jacobian(self):
raise ValueError("DotProductKernel takes no parameters.")
|
StarcoderdataPython
|
1885901
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class for a sequential synthesizer using the primer search space."""
from __future__ import annotations
import copy
import random
import sys
import traceback
from typing import List, Sequence, Tuple, Optional
from absl import logging
from abstract_nas.abstract.base import AbstractProperty
from abstract_nas.model.concrete import new_op
from abstract_nas.model.concrete import Op
from abstract_nas.model.concrete import OpType
from abstract_nas.model.subgraph import SubgraphModel
from abstract_nas.synthesis.random_enum_sequential import RandomEnumerativeSequentialSynthesizer
def log_exc():
exc_type, exc_value, exc_traceback = sys.exc_info()
logging.info("".join(
traceback.format_exception(exc_type, exc_value, exc_traceback)))
MUTABLE_OPS = [OpType.DENSE, OpType.CONV, OpType.GROUP_NORM,
OpType.AVG_POOL, OpType.MAX_POOL]
class PrimerSequentialSynthesizer(RandomEnumerativeSequentialSynthesizer):
"""Synthesizer that uses the primer search primitives.
This synthesizer only works for sequential subgraphs (see sequential.py).
The mutations are:
- Delete an op
- Insert an op
- Delete and insert an op
- Mutate field
- Swap ops
"""
def __init__(self,
subgraphs_and_props,
generation,
abstract = True,
max_len = -1,
max_delta = -1,
min_len = 0,
min_delta = -1,
use_automl_zero = False):
new_subgraph_and_props = []
for subg, _ in subgraphs_and_props:
new_subgraph_and_props.append((subg, []))
super().__init__(new_subgraph_and_props, generation, abstract, max_len,
max_delta, min_len, min_delta)
self.use_automl_zero = use_automl_zero
def synthesize(self):
"""Returns a new subgraph."""
subgraph_spec = self.subgraphs_and_props[0][0].subgraph
subg_ops = [copy.deepcopy(node.op) for node in subgraph_spec]
mutations = [
self.delete,
self.insert,
self.mutate_field,
lambda x: self.insert(self.delete(x)),
self.swap]
if self.use_automl_zero:
mutations.append(lambda _: self.randomize())
# Certain mutations may not be applicable for the selected subgraph, and
# they will return None (e.g., if the subgraph is of size 1, we cannot
# swap). So loop through all mutations in a random order until we find a
# mutation that is applicable.
random.shuffle(mutations)
mutated_subg_ops = None
while mutations and mutated_subg_ops is None:
mutation = mutations.pop()
mutated_subg_ops = mutation(subg_ops)
if mutated_subg_ops is None:
raise ValueError("Synthesis failed.")
subg_ops = mutated_subg_ops
prefix = f"gen{self.generation}/"
if not subg_ops:
subg_ops.append(new_op("dummy", OpType.IDENTITY, [self.input_name]))
for op in subg_ops:
op.name = f"{prefix}{op.type.name.lower()}"
subgraph_spec = self.make_subgraph_spec(subg_ops)
return self.make_subgraph_models(subgraph_spec)
def delete(self, subg_ops):
pos = random.randrange(len(subg_ops))
logging.info("deleting %s", subg_ops[pos].name)
del subg_ops[pos]
return subg_ops
def insert(self, subg_ops):
pos = random.randrange(len(subg_ops) + 1)
ops = self.op_enumerator(full=True)
ops = list(ops)
op = random.choice(ops)
logging.info("inserting %s\n"
" op_kwargs=%s\n"
" input_kwargs=%s\n",
op.name, op.op_kwargs, op.input_kwargs)
subg_ops.insert(pos, op)
return subg_ops
def mutate_field(self, subg_ops):
mutable = [op for op in subg_ops if op.type in MUTABLE_OPS]
if not mutable: return None
op: Op = random.choice(mutable)
logging.info("mutating %s", op.name)
op_kwargs_dict, input_kwargs_dict = self.all_kwargs_for_op_type(
self.kwarg_defaults, full=True, op_type=op.type)
keys_to_choose = []
for kwargs_dict in [op_kwargs_dict, input_kwargs_dict]:
for k, v in kwargs_dict.items():
if v and len(v) > 1:
keys_to_choose.append(k)
keys_to_choose = list(set(keys_to_choose))
if not keys_to_choose:
logging.info("No fields to mutate.")
return None
key = random.choice(keys_to_choose)
if key in op_kwargs_dict:
value = random.choice(list(op_kwargs_dict[key]))
op.op_kwargs[key] = value
else:
value = random.choice(list(input_kwargs_dict[key]))
op.input_kwargs[key] = value
logging.info("mutated %s\n"
" op_kwargs=%s\n"
" input_kwargs=%s\n",
op.name, op.op_kwargs, op.input_kwargs)
return subg_ops
def swap(self, subg_ops):
if len(subg_ops) == 1: return None
pos1 = random.randrange(len(subg_ops))
pos2 = random.randrange(len(subg_ops) - 1)
if pos1 == pos2:
pos2 = len(subg_ops) - 1
logging.info("swapping %s and %s", subg_ops[pos1].name, subg_ops[pos2].name)
op = subg_ops[pos1]
subg_ops[pos1] = subg_ops[pos2]
subg_ops[pos2] = op
return subg_ops
def randomize(self):
logging.info("randomizing")
# We initialize the synthesizer without any properties, so the call to super
# will use the random enumerative strategy to synthesize a random subgraph.
subg_models = super().synthesize()
subg_ops = [copy.deepcopy(node.op) for node in subg_models[0].subgraph]
return subg_ops
|
StarcoderdataPython
|
1765539
|
class TokenGenerator(object):
token_separator = ':'
component_separator = '.'
def __init__(self, component, module):
self._component = component
self._module = module
self._tag_prefix = self.component_separator.join([self._component, self._module]) + self.token_separator
return
def get_tag(self, category):
return self._tag_prefix + category
|
StarcoderdataPython
|
3215151
|
<gh_stars>1-10
# base imports
from base.middleware import RequestMiddleware
from base.utils import get_our_models
# django imports
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.conf import settings
@receiver(post_save)
def audit_log(sender, instance, created, raw, update_fields, **kwargs):
"""
Post save signal that creates a log when an object from a models from
our apps is created or updated.
"""
# only listening models created in our apps
if sender not in get_our_models():
return
sensitive_fields = settings.LOG_SENSITIVE_FIELDS
ignored_fields = settings.LOG_IGNORE_FIELDS
user = get_user()
if created:
message = {'Created': instance.to_dict(
exclude=ignored_fields + sensitive_fields,
include_m2m=False,
)}
instance.save_addition(user, message)
elif not raw:
change_message = []
changed_field_labels = {}
original_dict = instance.original_dict
actual_dict = instance.to_dict(
exclude=ignored_fields,
include_m2m=False,
)
change = False
for key in original_dict.keys():
if original_dict[key] != actual_dict[key]:
change = True
if key in sensitive_fields:
changed_field_labels[key] = {'change': 'field updated'}
else:
changed_field_labels[key] = {
'from': original_dict[key],
'to': actual_dict[key],
}
if change:
change_message = {'changed': {'fields': changed_field_labels}}
instance.save_edition(user, change_message)
@receiver(post_delete)
def audit_delete_log(sender, instance, **kwargs):
"""
Post delete signal that creates a log when an object from a models from
our apps is deleted.
"""
# only listening models created in our apps
if sender not in get_our_models():
return
user = get_user()
instance.save_deletion(user)
def get_user():
thread_local = RequestMiddleware.thread_local
if hasattr(thread_local, 'user'):
user = thread_local.user
else:
user = None
return user
|
StarcoderdataPython
|
6524548
|
<reponame>sorasful/minos-python<filename>packages/core/minos-microservice-aggregate/tests/test_aggregate/test_entities/test_models/test_base.py
import unittest
from uuid import (
UUID,
uuid4,
)
from minos.aggregate import (
Entity,
)
from minos.common import (
NULL_UUID,
DeclarativeModel,
)
from tests.utils import (
OrderItem,
)
class TestEntity(unittest.TestCase):
def test_subclass(self):
self.assertTrue(issubclass(Entity, DeclarativeModel))
def test_default(self):
entity = OrderItem("foo")
self.assertIsInstance(entity, DeclarativeModel)
self.assertIsNot(entity.uuid, NULL_UUID)
self.assertIsInstance(entity.uuid, UUID)
self.assertEqual("foo", entity.name)
def test_uuid(self):
uuid = uuid4()
entity = OrderItem("foo", uuid=uuid)
self.assertIsInstance(entity, DeclarativeModel)
self.assertIsNot(entity.uuid, NULL_UUID)
self.assertEqual(uuid, entity.uuid)
self.assertEqual("foo", entity.name)
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
3428726
|
import torch
import torch.nn as nn
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
channels = [3, 64, 256, 512]
self.leaky_relu = nn.LeakyReLU(0.2)
self.sigmoid = nn.Sigmoid()
self.conv1 = nn.Conv2d(channels[0], channels[1], kernel_size= 3, padding= 1)
self.pool1 = nn.MaxPool2d((4, 4), (4, 4))
self.conv2 = nn.Conv2d(channels[1], channels[2], kernel_size= 3, padding= 1)
self.pool2 = nn.MaxPool2d((4, 4), (4, 4))
self.conv3 = nn.Conv2d(channels[2], channels[3], kernel_size= 3, padding= 1)
self.pool3 = nn.MaxPool2d((4, 4), (4, 4))
self.fc1 = nn.Linear(channels[-1] * 1 * 1, 8)
self.fc2 = nn.Linear(8, 1)
def name(self):
return 'd_02'
def forward(self, x):
x = self.leaky_relu(self.conv1(x))
x = self.pool1(x)
x = self.leaky_relu(self.conv2(x))
x = self.pool2(x)
x = self.leaky_relu(self.conv3(x))
x = self.pool3(x)
x = x.view(x.size(0), -1)
x = self.leaky_relu(self.fc1(x))
x = self.sigmoid(self.fc2(x))
return x
def parameter_number(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def unit_test():
imgs = torch.zeros(10, 3, 64, 64)
model = Discriminator()
out = model(imgs)
print('Parameter number: ',parameter_number(model))
print('Input size: ', imgs.size())
print('Output size:', out.size())
if __name__ == '__main__':
unit_test()
|
StarcoderdataPython
|
6460023
|
Increment & Decrement Triangle Pattern
Increment & Decrement Triangle Pattern: The program must accept an Integer N as the input. The program must print hyphens and integers In N+1 lines based on the following conditions.
In the 1st line, the program must print N hyphens and an integer (0).
In the 2nd line, the program must print N-1 hyphens and three Integers (N, 0, N).
In the 3rd line, the program must print N-2 hyphens and five Integers (N-1, N, 0, N, N-1).
In the 4th line, the program must print N-3 hyphens and seven Integers (N-2, N-1, N, 0, N, N-1, N-2).
Similarly, the program must print the remaining lines as the output.
Input Format:
The first line contains N.
Output Format:
The first N+1 lines contain hyphens and integers based on the given conditions.
Example Input/Output 1:
Input:
5
Output:
-----0
----505
---45054
--3450543
-234505432
12345054321
Example Input/Output 2:
Input:
9
Output:
---------0
--------909
-------89098
------7890987
-----678909876
----56789098765
---4567890987654
--345678909876543
-23456789098765432
1234567890987654321
n=int(input())
print("-"*n,0,sep="")
for i in range(n,0,-1):
print("-"*(i-1),end="")
for j in range(i,n+1):
print(j,end="")
print(0,end="")
for j in range(n,i-1,-1):
print(j,end="")
print()
|
StarcoderdataPython
|
3229360
|
<reponame>gsw945/flask-sio-demo<filename>run.py
# -*- coding: utf-8 -*-
from flask import Flask, request
from sio_server import socketio
from task_client import print_log
app = Flask(__name__)
socketio.init_app(app)
app.socketio = socketio
index_tmpl_str = '''
<!DOCUMENT html>
<html>
<head>
<meta charset="uff-8" />
<title>flask socketio demo</title>
</head>
<body>
<div>please watch server response at console of web browser(you can press F12)</div>
<div>
<a target="_blank" href="/task?msg=hello">send task</a>
</div>
<!-- socket.io cdn -->
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/socket.io/2.0.4/socket.io.slim.js"></script>
<script type="text/javascript">
var namespace = '/test';
var ws_url = [location.protocol, '//', document.domain, ':', location.port, namespace].join('');
var socket = io.connect(ws_url, {
path: '/ws/'
});
socket.on('send_log', function(message) {
console.log(message);
});
</script>
</body>
</html>
'''
@app.route("/")
def view_index():
return index_tmpl_str
@app.route("/task")
def view_task():
msg = request.values.get('msg', 'default empty msg')
print_log(msg)
return 'send task ok with msg:<br /><div style="border: 1px solid #fa1;">' + msg + '</div>'
if __name__ == '__main__':
cfg = {
'host': '0.0.0.0',
'port': 5000,
'debug': True
}
print('visit by [http://{0}:{1}]'.format(cfg['host'], cfg['port']))
socketio.run(app, **cfg)
|
StarcoderdataPython
|
3571008
|
import selenium.common.exceptions
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
import datetime as dt
import time
import re
import urllib.parse
import pandas as pd
class BourseDirect:
def __init__(self, Display, login, password, download_path):
self.login = login
self.password = password
self.dwd_path = download_path
timeout = 5
s = Service(ChromeDriverManager(log_level=5).install())
chrome_options = Options()
if download_path == None:
pass
else:
prefs = {"download.default_directory": download_path}
chrome_options.add_experimental_option("prefs", prefs)
if Display == False: # does not worl on WSL
chrome_options.add_argument("--headless");
chrome_options.add_argument("--disable-gpu");
chrome_options.add_argument("--window-size=1920,1200");
chrome_options.add_argument("--ignore-certificate-errors");
chrome_options.add_argument("--disable-extensions");
chrome_options.add_argument("--no-sandbox");
chrome_options.add_argument("--disable-dev-shm-usage");
self.driver = webdriver.Chrome(service=s, options=chrome_options)
self.driver.maximize_window()
else:
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument("--window-size=1920x1080")
self.driver = webdriver.Chrome(service=s, options=chrome_options)
self.driver.maximize_window()
self.driver.get("https://www.boursedirect.fr/fr/login")
print('\n> Connexion...')
notice = EC.presence_of_element_located((By.ID, 'didomi-notice-agree-button'))
WebDriverWait(self.driver, timeout).until(notice).click()
self.driver.find_element(by=By.ID, value='bd_auth_login_type_login').send_keys(str(self.login))
self.driver.find_element(by=By.ID, value='bd_auth_login_type_password').send_keys(str(self.password))
self.driver.find_element(by=By.ID, value='bd_auth_login_type_submit').click()
timeout = 5
try:
pub = EC.presence_of_element_located((By.CLASS_NAME, 'btn-modal-close'))
WebDriverWait(self.driver, timeout).until(pub).click()
except selenium.common.exceptions.ElementNotInteractableException:
pass
print("> Connecté à Bourse Direct.",end='\r')
def close_connection(self):
self.driver.close()
def execute(self, order):
self.order = order
self.ordertype = order['ORDERTYPE']
searchbar = EC.presence_of_element_located((By.ID, 'searchbar-input'))
timeout = 5
WebDriverWait(self.driver, timeout).until(searchbar).send_keys(str(self.order['ISIN']))
self.driver.find_element(by='id', value="searchbar-input").send_keys(Keys.ENTER)
tradingboard = EC.presence_of_element_located((By.ID, 'quantity'))
timeout = 5
WebDriverWait(self.driver, timeout).until(tradingboard).send_keys(str(self.order['QUANTITE']))
if self.order['SENS'] == 'achat':
self.driver.find_element(by=By.XPATH,
value='/html/body/div[4]/div[4]/div/div[2]/div[1]/div[2]/header/div[3]/div/div/div/div/div/div/form/div/div[2]/div/div[1]/div/div[1]/div/div[1]/div/div/label[1]').click()
elif self.order['SENS'] == 'vente':
self.driver.find_element(by=By.XPATH,
value='/html/body/div[4]/div[4]/div/div[2]/div[1]/div[2]/header/div[3]/div/div/div/div/div/div/form/div/div[2]/div/div[1]/div/div[1]/div/div[1]/div/div/label[2]').click()
if self.ordertype == 'market':
self.market_order()
elif self.ordertype == 'limit':
self.limit_order()
elif self.ordertype == 'best_limit':
self.best_limit_order()
elif self.ordertype == 'tal':
self.tal_order()
elif self.ordertype == 'stop':
self.stop_order()
elif self.ordertype == 'stop_limit':
self.stop_limit_order()
self.validation()
return
def validation(self):
if self.order['VIRTUAL'] == 'on':
print("[VIRTUAL]--> Ordre envoyé (heure d\'envoi) {}.".format(dt.datetime.now().strftime('%H:%M:%S')))
return
elif self.order['VIRTUAL'] == 'off':
validation = EC.presence_of_element_located((By.CLASS_NAME, 'container-validate'))
timeout = 5
WebDriverWait(self.driver, timeout).until(validation).click()
time.sleep(2)
validation = EC.presence_of_element_located((By.CLASS_NAME, 'container-validate'))
timeout = 5
WebDriverWait(self.driver, timeout).until(validation).click()
print('--> Ordre envoyé (heure d\'envoi) {}.'.format(dt.datetime.now().strftime('%H:%M:%S')))
time.sleep(2)
self.driver.find_element(by=By.XPATH,
value='/html/body/div[4]/div[4]/div/div[2]/div[1]/div[2]/header/div[3]/div/div/div/div/div/div/div/div/div/p[1]/a/i').click()
print('--> TermSheet téléchargée.')
return
def market_order(self):
select = Select(self.driver.find_element(by=By.ID, value='order_type'))
select.select_by_value('market')
def limit_order(self):
select = Select(self.driver.find_element(by=By.ID, value='order_type'))
select.select_by_value('limit')
timeout = 5
limit = EC.presence_of_element_located((By.ID, 'limit'))
WebDriverWait(self.driver, timeout).until(limit).send_keys(str(self.order['LIMIT/STOP']).replace('.', ','))
def best_limit_order(self):
select = Select(self.driver.find_element(by=By.ID, value='order_type'))
select.select_by_value('best_limit') # market, limit, best_limit, stop, stop_limit, tal
def tal_order(self):
select = Select(self.driver.find_element(by=By.ID, value='order_type'))
select.select_by_value('tal') # market, limit, best_limit, stop, stop_limit, tal
def stop_order(self):
select = Select(self.driver.find_element(by=By.ID, value='order_type'))
select.select_by_value('stop') # market, limit, best_limit, stop, stop_limit, tal
self.driver.find_element(by=By.ID, value='stop').send_keys(str(self.order['LIMIT/STOP']).replace('.', ','))
def stop_limit_order(self, order):
select = Select(self.driver.find_element(by=By.ID, value='order_type'))
select.select_by_value('stop_limit') # market, limit, best_limit, stop, stop_limit, tal
self.driver.find_element(by=By.ID, value='limit').send_keys(str(order['LIMIT/STOP'][0]).replace('.', ','))
self.driver.find_element(by=By.ID, value='stop').send_keys(str(order['LIMIT/STOP'][1]).replace('.', ','))
def show_portfolio(self):
timeout = 5
current_url = self.driver.current_url
e = WebDriverWait(self.driver, timeout).until(
EC.element_to_be_clickable((By.ID, 'user-dropdown'))
)
if e:
e.click()
e = WebDriverWait(self.driver, timeout).until(
EC.element_to_be_clickable((By.XPATH, '//a[@id="user-dropdown"]/../div/ul/li[1]/a'))
)
if e:
e.click()
WebDriverWait(self.driver, timeout).until(EC.url_changes(current_url))
current_url = self.driver.current_url
print('got portfolio page', current_url)
WebDriverWait(self.driver, timeout).until(EC.frame_to_be_available_and_switch_to_it((By.XPATH,"/html/body/div[4]/div[4]/div/div[2]/iframe")))
rl = self.driver.find_elements(by=By.XPATH, value="//div[@id='detailTableHolder']/table/tbody[2]/tr")
df = pd.DataFrame()
if rl:
for r in rl:
cl = r.find_elements(by=By.XPATH, value=".//td")
if cl:
asset=[]
for c in cl:
t = re.sub("(^ *| *$|(?<=[0-9]) (?=[0-9])| €$| *%$|\([^\)]*\))","",c.text)
t =re.sub("(?<=[0-9]),(?=[0-9])",".",t)
if len(asset) == 0: # first cell is asset name with anchor
link=''
ticker=''
try:
e = c.find_element(by=By.XPATH, value="./a")
link = e.get_attribute('href')
link = urllib.parse.unquote(link)
link = re.sub("(^javascript:popupStream\(\'|\'.*$)","",link)
link = 'https://www.boursedirect.fr/priv/new/'+link
ticker = re.sub("(^.*val=E:|&.*$)","",link)
except:
pass
asset.append(ticker)
asset.append(link)
else:
try:
t = float(t)
except:
pass
asset.append(t)
if len(asset) == 11 and isinstance(asset[9], float): # skipping stocks where buy is in progress
# 11 columns: ticker url name Qté PRU Cours Valo +/-Val. var/PRU var/Veille %
row = {
'ticker' : asset[0],
'url' : asset[1],
'name' : asset[2],
'Qté' : asset[3],
'PRU' : asset[4],
'Cours' : asset[5],
'Valo' : asset[6],
'+/-Val.' : asset[7],
'var/PRU' : asset[8],
'var/Veille' : asset[9],
'%' : asset[10],
}
df = df.append(row,ignore_index=True)
print(asset)
return df
|
StarcoderdataPython
|
364117
|
# -*- coding: utf-8 -*-
"""
整个数据集的错误案例
"""
import os
import jieba
import re
import pandas as pd
import pickle
import torch
from torch.autograd import Variable
from data_process import data_processing
from TextCNN import TextCNN
from BiLSTM import BiLSTM
from TextCNN_BN import TextCNN_BN, TextCNN_multi_channel
from BiLSTM_b import BiLSTM_b
from CNN_BiLSTM_Concat import CNN_BiLSTM_a
from TextRCNN import TextRCNN
from TextCNN_BN_Pretrained_embed import TextCNN_BN_with_pretrained_embed
use_cuda = torch.cuda.is_available()
MODEL_PATH = os.path.join(os.path.dirname(__file__), 'models')
DATA_PATH = os.path.join(os.path.dirname(__file__), 'new_data')
# 一些超参数
BATCH_SIZE = 16
EMBEDDING_DIM = 128
MAXLENGTH = 50
KERNEL_SIZES = [3, 4, 5]
KERNEL_NUM = 128 # 卷积核数量
HIDDEN_SIZE = 128 # lstm 隐藏层
def getBadCases(choose_model):
badcases_contents = []
badcases_scores = []
badcases_true_labels = []
badcases_pred_labels = []
x, y, vocabulary, vocabulary_inv, labelToindex, _, labelNumdict = data_processing.load_input_data(MAXLENGTH)
# word2vec预训练权重
weight_array = pickle.load(open(os.path.join(DATA_PATH, 'weight_array'), 'rb'))
# 选择test的模型
if choose_model == 'TextCNN':
model = TextCNN(1, KERNEL_NUM, len(vocabulary), EMBEDDING_DIM, len(labelToindex))
elif choose_model == 'BiLSTM':
model = BiLSTM(len(vocabulary), EMBEDDING_DIM, HIDDEN_SIZE, len(labelToindex))
elif choose_model == 'TextCNN_BN':
model = TextCNN_BN(len(vocabulary), EMBEDDING_DIM, KERNEL_SIZES, KERNEL_NUM, len(labelToindex), MAXLENGTH, weight_array=None)
elif choose_model == 'BiLSTM_b':
model = BiLSTM_b(len(vocabulary), EMBEDDING_DIM, HIDDEN_SIZE, len(labelToindex), MAXLENGTH)
elif choose_model == 'CNN_BiLSTM_a':
model = CNN_BiLSTM_a(len(vocabulary), EMBEDDING_DIM, KERNEL_SIZES, KERNEL_NUM, HIDDEN_SIZE, len(labelToindex), MAXLENGTH)
elif choose_model == 'CNN_with_pretrained_embedding':
model = TextCNN_BN_with_pretrained_embed(len(vocabulary), EMBEDDING_DIM, KERNEL_SIZES, KERNEL_NUM, len(labelToindex), MAXLENGTH, weight_array)
elif choose_model == 'TextRCNN':
model = TextRCNN(len(vocabulary), EMBEDDING_DIM, KERNEL_SIZES, HIDDEN_SIZE, KERNEL_NUM, len(labelToindex), MAXLENGTH, weight_array)
elif choose_model == 'TextCNN_multi_channel':
model = TextCNN_multi_channel(len(vocabulary), EMBEDDING_DIM, KERNEL_SIZES, KERNEL_NUM, len(labelToindex), MAXLENGTH, weight_array)
model.load_state_dict(torch.load(os.path.join(MODEL_PATH, choose_model + '_201807110957.pkl'))) # 日期要变
if use_cuda:
model = model.cuda()
print("Model loaded!")
# 所有样本
all_samples = pd.read_csv(os.path.join(DATA_PATH, 'all_labeled_datas.csv'))
all_samples_contents = all_samples['content']
all_samples_scores = all_samples['score']
all_samples_labels = all_samples['label']
all_samples_pro_contents = []
all_samples_pro_scores = []
all_samples_pro_labels = []
for content, score, label in zip(all_samples_contents, all_samples_scores, all_samples_labels):
punctuation = re.compile(u"[-~!@#$%^&*()_+`=\[\]\\\{\}\"|;':,./<>?·!@#¥%……&*()——+【】、;‘:“”,。、《》?「『」』 ]")
digit = re.compile(u"[0-9]")
number = re.compile(u"[a-zA-Z]")
content = punctuation.sub('', content)
content = digit.sub("", content)
content = number.sub("", content)
if content != '':
all_samples_pro_contents.append(content)
all_samples_pro_scores.append(score)
all_samples_pro_labels.append(label)
all_pro_seg_contents = []
all_pro_seg_scores = []
all_pro_seg_labels = []
sentenceToindex = {}
for content, score, label in zip(all_samples_pro_contents, all_samples_pro_scores, all_samples_pro_labels):
seg_content = jieba.cut(content)
seg_con = []
for word in seg_content:
if word not in data_processing.get_stop_words().keys() and word in vocabulary.keys():
seg_con.append(word)
# 文本去重
tmpSentence = ''.join(seg_con)
if tmpSentence != '':
if tmpSentence in sentenceToindex:
continue
else:
sentenceToindex[tmpSentence] = len(sentenceToindex)
all_pro_seg_contents.append(seg_con)
all_pro_seg_scores.append(score)
all_pro_seg_labels.append(label)
for i, ct in enumerate(all_pro_seg_contents):
ct_pad = data_processing.pad_sentences([ct], MAXLENGTH)
input_x, input_y = data_processing.build_input_data(ct_pad, all_pro_seg_labels[i], vocabulary)
input_x = Variable(torch.LongTensor(input_x))
input_y = Variable(torch.LongTensor(input_y))
if use_cuda:
input_x = input_x.cuda()
input_y = input_y.cuda()
model_out = model(input_x)
_, pre_y = torch.max(model_out, 1)
if pre_y.item() != input_y.item():
badcases_contents.append(' '.join(all_pro_seg_contents[i]))
badcases_scores.append(all_pro_seg_scores[i])
badcases_true_labels.append(all_pro_seg_labels[i])
badcases_pred_labels.append(pre_y.item())
dataframe = pd.DataFrame({"content": badcases_contents, "user_score": badcases_scores, "true_label": badcases_true_labels,
"pred_label": badcases_pred_labels})
dataframe.to_csv(os.path.join(DATA_PATH, 'badcases.csv'), index=False, sep=',')
print("Badcases done!")
if __name__ == '__main__':
print("Bad cases!")
getBadCases(choose_model='TextCNN_BN')
|
StarcoderdataPython
|
1908994
|
"""
练习1:定义一个类描述数字时钟。
"""
from time import sleep
class Clock(object):
def __init__(self, h=0, m=0, s=0):
# 如果希望属性是私有的,在给属性命名时可以用两个下划线作为开头
self._h = h
self._m = m
self._s = s
def run(self):
self._s += 1
if self._s == 60:
self._s = 0
self._m += 1
if self._m == 60:
self._m = 0
self._h += 1
if self._h == 24:
self._h = 0
def show(self):
# %d是整型输出格式。bai02的意思是如果输du出的整型数zhi不足两位,dao左侧用0补齐。
return '%02d:%02d:%02d' % \
(self._h, self._m, self._s)
def main():
clock = Clock(22, 22, 22)
while True:
print(clock.show())
sleep(1)
clock.run()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
9628056
|
#!/usr/bin/env python2
# Written for python 2.7
# This script is used for triggering a search in Sonarr for a specific numner of episodes
# Stdlib
import urlparse
import logging
import datetime
# 3rf Party
import yaml
import requests
with open("sonarr_backfiller.yaml", "r") as settings_file:
settings_import = yaml.safe_load(settings_file)
# TODO Add Error Checking
# TODO Add propper logging
def get_queue():
api_endpoint = "/api/queue"
uri_endpoint = urlparse.urljoin(settings_import["sonarr"]["address"], api_endpoint)
headers = {
'X-Api-Key': settings_import["sonarr"]["api_key"]
}
request_response = requests.get(uri_endpoint, headers=headers)
logging.debug('request response url: %s', request_response.url)
logging.debug('request response headers: %s', request_response.headers)
logging.debug('request response encoding: %s', request_response.apparent_encoding)
logging.debug('request response text: %s', request_response.text)
logging.info('request response reason: %s', request_response.reason)
logging.info('request response status code: %s', request_response.status_code)
logging.info('request response time elapsed: %s', request_response.elapsed)
json_response = request_response.json()
download_queue = len(json_response)
return download_queue
def get_wanted():
episode_id_list = []
api_endpoint = "/api/wanted/missing"
uri_endpoint = urlparse.urljoin(settings_import["sonarr"]["address"], api_endpoint)
headers = {
'X-Api-Key': settings_import["sonarr"]["api_key"]
}
query = {"sortKey": "airDateUtc", "sortDir": settings_import["sonarr"]["sort_direction"],
"pageSize": settings_import["sonarr"]["number_of_results"]}
request_response = requests.get(uri_endpoint, params=query, headers=headers)
logging.debug('request response url: %s', request_response.url)
logging.debug('request response headers: %s', request_response.headers)
logging.debug('request response encoding: %s', request_response.apparent_encoding)
logging.debug('request response text: %s', request_response.text)
logging.info('request response reason: %s', request_response.reason)
logging.info('request response status code: %s', request_response.status_code)
logging.info('request response time elapsed: %s', request_response.elapsed)
json_response = request_response.json()
for episode_id in json_response["records"]:
episode_id_list.append(episode_id['id'])
return episode_id_list
def queue_search(episode_list):
api_endpoint = "/api/Command"
uri_endpoint = urlparse.urljoin(settings_import["sonarr"]["address"], api_endpoint)
headers = {
'content-type': 'application/json',
'X-Api-Key': settings_import["sonarr"]["api_key"]
}
payload = episode_list
request_response = requests.post(uri_endpoint, headers=headers, json=payload)
logging.debug('request submit uri %s', uri_endpoint)
logging.debug('request submit headers %s', headers)
logging.debug('request submit payload %s', payload)
logging.debug('request response url: %s', request_response.url)
logging.debug('request response headers: %s', request_response.headers)
logging.debug('request response encoding: %s', request_response.apparent_encoding)
logging.debug('request response text: %s', request_response.text)
logging.debug('request response reason: %s', request_response.json)
logging.info('request response reason: %s', request_response.reason)
logging.info('request response status code: %s', request_response.status_code)
logging.info('request response time elapsed: %s', request_response.elapsed)
request_output = request_response.json()
logging.info("Command ID = %s", request_output["id"])
logging.info("Command State = %s", request_output["state"])
def main():
log_entry_format = ':'.join(
[
'%(asctime)s',
'%(levelname)s',
'%(filename)s',
'%(funcName)s',
'%(lineno)s',
'%(message)s',
]
)
logging.basicConfig(
format=log_entry_format,
level=logging.INFO,
filename=".".join(
[
settings_import["logging_file_name"],
datetime.datetime.now().strftime("%Y-%m-%d"),
"log"
]
)
)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.INFO)
requests_log.propagate = True
queue_length = get_queue()
if queue_length <= settings_import["sonarr"]["queue_minimum"]:
logging.info("Queue length is %s, adding an additional %s items",
queue_length,
settings_import["sonarr"]["number_of_results"]
)
episode_list = {"name": "EpisodeSearch"}
episode_list.update({"EpisodeIds": get_wanted()})
queue_search(episode_list)
else:
logging.info("Queue length is %s, which exceeds limit of %s to add additional items",
queue_length,
settings_import["sonarr"]["number_of_results"]
)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
6540523
|
<filename>rules/rule7.py
#####################################
### RULE 7: lf_semmeddb_triggers ###
#####################################
'''
semmeddb_triggers: keywords that indicate an almost confirmed presence of ADE-Drug from SemMedDB // note: SemMedDB's CUI used to match mentions in discharge summaries
MATCHING: any pair of trigger words in semmeddb_triggers found in discharge summary
'''
################
### packages ###
################
import pandas as pd
import snorkel
from snorkel.labeling import labeling_function
# LF outputs for inv-trans matching
MATCHING = 1
NOT_MATCHING = 0
ABSTAIN = -1
# get keywords
semmeddb_triggers_raw = pd.read_csv("../rules/keywords/semmeddb_triggers.csv")
semmeddb_triggers = list(semmeddb_triggers_raw['semmeddb_triggers'])
@labeling_function()
# MATCHING: any pair of trigger words in semmeddb_triggers found in discharge summary
def lf_semmeddb_triggers(x) :
found = 0
for j in range(0, len(semmeddb_triggers)) :
arg1 = semmeddb_triggers[j][0]
arg2 = semmeddb_triggers[j][1]
if (arg1 in x.summary.lower()) and (arg2 in x.summary.lower()) :
found = 1
if found == 0 :
return ABSTAIN
else :
return MATCHING
|
StarcoderdataPython
|
4963638
|
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance("instance")
@pytest.fixture(scope="module", autouse=True)
def setup_nodes():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_http_get_is_readonly():
assert "Cannot execute query in readonly mode" in instance.http_query_and_get_error(
"CREATE TABLE xxx (a Date) ENGINE = MergeTree(a, a, 256)"
)
assert (
"Cannot modify 'readonly' setting in readonly mode"
in instance.http_query_and_get_error(
"CREATE TABLE xxx (a Date) ENGINE = MergeTree(a, a, 256)",
params={"readonly": 0},
)
)
|
StarcoderdataPython
|
1722825
|
from sklearn.base import BaseEstimator, TransformerMixin
import numpy as np
rooms_idx, bedrooms_idx, population_idx, households_idx = 3, 4, 5, 6
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
def __init__(self, add_bedrooms_per_room=True):
self.add_bedrooms_per_room = add_bedrooms_per_room
def fit(self, X):
return self
def transform(self, X, y=None):
rooms_per_household = X[:, rooms_idx] / X[:, households_idx]
population_per_household = X[:, population_idx] / X[:, households_idx]
if self.add_bedrooms_per_room:
bedrooms_per_room = X[:, bedrooms_idx] / X[:, rooms_idx]
return np.c_[X, rooms_per_household, population_per_household, bedrooms_per_room]
else:
return np.c_[X, rooms_per_household, population_per_household]
|
StarcoderdataPython
|
3547095
|
# Copyright (c) 2018 Evalf
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os, contextlib, random, functools, typing, types, sys
supports_fd = os.supports_dir_fd >= {os.open, os.link, os.unlink}
_devnull = os.open(os.devnull, os.O_WRONLY)
_opener = lambda path, flags: os.dup(_devnull)
devnull = functools.partial(open, os.devnull, opener=_opener)
class directory:
'''Directory with support for dir_fd.'''
def __init__(self, path: str) -> None:
os.makedirs(path, exist_ok=True)
if supports_fd:
# convert to file descriptor
self._fd = os.open(path, flags=os.O_RDONLY) # type: typing.Optional[int]
self._path = None # type: typing.Optional[str]
else:
self._fd = None
self._path = path
self._rng = randomnames()
def _join(self, name: str) -> str:
return name if self._path is None else os.path.join(self._path, name)
def open(self, filename: str, mode: str, *, encoding: typing.Optional[str] = None, umask: int = 0o666) -> typing.IO[typing.Any]:
if mode not in ('w', 'wb'):
raise ValueError('invalid mode: {!r}'.format(mode))
return open(self._join(filename), mode+'+', encoding=encoding, opener=lambda name, flags: os.open(name, flags|os.O_CREAT|os.O_EXCL, mode=umask, dir_fd=self._fd))
def openfirstunused(self, filenames: typing.Iterable[str], mode: str, *, encoding: typing.Optional[str] = None, umask: int = 0o666) -> typing.Tuple[typing.IO[typing.Any], str]:
for filename in filenames:
try:
return self.open(filename, mode, encoding=encoding, umask=umask), filename
except FileExistsError:
pass
raise ValueError('all filenames are in use')
@contextlib.contextmanager
def temp(self, mode: str) -> typing.Generator[typing.IO[typing.Any], None, None]:
try:
f, name = self.openfirstunused(self._rng, mode)
with f:
yield f
finally:
os.unlink(f.name, dir_fd=self._fd)
def link(self, src: typing.IO[typing.Any], dst: str) -> None:
os.link(src.name, self._join(dst), src_dir_fd=self._fd, dst_dir_fd=self._fd)
def linkfirstunused(self, src: typing.IO[typing.Any], dsts: typing.Iterable[str]) -> str:
for dst in dsts:
try:
self.link(src, dst)
except FileExistsError:
pass
else:
return dst
raise ValueError('all destinations are in use')
def __del__(self) -> None:
if os and os.close and self._fd is not None:
os.close(self._fd)
def sequence(filename: str) -> typing.Generator[str, None, None]:
'''Generate file names a.b, a-1.b, a-2.b, etc.'''
yield filename
splitext = os.path.splitext(filename)
i = 1
while True:
yield '-{}'.format(i).join(splitext)
i += 1
def randomnames(characters: str = 'abcdefghijklmnopqrstuvwxyz0123456789_', length: int = 8) -> typing.Generator[str, None, None]:
rng = random.Random()
while True:
yield ''.join(rng.choice(characters) for dummy in range(length))
def set_ansi_console() -> None:
if sys.platform == "win32":
import platform
if platform.version() < '10.':
raise RuntimeError('ANSI console mode requires Windows 10 or higher, detected {}'.format(platform.version()))
import ctypes
handle = ctypes.windll.kernel32.GetStdHandle(-11) # https://docs.microsoft.com/en-us/windows/console/getstdhandle
mode = ctypes.c_uint32() # https://docs.microsoft.com/en-us/windows/desktop/WinProg/windows-data-types#lpdword
ctypes.windll.kernel32.GetConsoleMode(handle, ctypes.byref(mode)) # https://docs.microsoft.com/en-us/windows/console/getconsolemode
mode.value |= 4 # add ENABLE_VIRTUAL_TERMINAL_PROCESSING
ctypes.windll.kernel32.SetConsoleMode(handle, mode) # https://docs.microsoft.com/en-us/windows/console/setconsolemode
# vim:sw=2:sts=2:et
|
StarcoderdataPython
|
6629853
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import json
from django.core.serializers.json import DjangoJSONEncoder
from konst import Constant
class ExtendedJSONEncoder(DjangoJSONEncoder):
"""Add support for serializing our class Constant."""
def default(self, obj):
if isinstance(obj, Constant):
return obj.v
else:
return super(ExtendedJSONEncoder, self).default(obj)
def dumps(*args, **kwargs):
kwargs["cls"] = kwargs.pop("cls", ExtendedJSONEncoder)
return json.dumps(*args, **kwargs)
def loads(*args, **kwargs):
return json.loads(*args, **kwargs)
|
StarcoderdataPython
|
1906562
|
import gym
import os
import numpy as np
import pickle
import gym_minigrid
from gym_minigrid import wrappers
import torch
import torch.nn as nn
import pfrl
from pfrl.agents import PPO
from pfrl.utils.batch_states import batch_states
from imitation.data.types import Trajectory
from modules.pfrl_networks import get_mlp_model
import argparse
parser = argparse.ArgumentParser(description="Full Process")
parser.add_argument(
"--weights_path",
"-w",
type=str,
default="mlp_run/50000_finish/",
help="Path to weights",
)
parser.add_argument(
"--env_name", "-e", type=str, default="MiniGrid-Empty-5x5-v0",
)
parser.add_argument(
"--save_name", "-s", default="check_run", help="Save pkl file with this name",
)
parser.add_argument(
"--num_traj", "-nt", type=int, default=50, help="How many traj to save",
)
parser.add_argument(
"--time_limit", "-tl", type=int, default=200,
)
parser.add_argument(
"--render", "-r", action="store_true",
)
args = parser.parse_args()
env = gym.make(args.env_name)
env = wrappers.FlatObsWrapper(env)
obs_size = env.reset().shape[0]
n_actions = env.action_space.n
model = get_mlp_model(obs_size, n_actions)
opt = torch.optim.Adam(model.parameters(), lr=3e-4)
agent = PPO(model, opt, gpu=0, recurrent=False, act_deterministically=False,)
agent.load(args.weights_path)
agent.training = False
traj_dataset = []
for traj in range(args.num_traj):
obs_list = []
action_list = []
info_list = []
obs = env.reset()
obs_list.append(obs)
for i in range(args.time_limit):
action = agent._batch_act_eval(batch_states([obs], agent.device, agent.phi))
obs, reward, done, info = env.step(action)
action_list.append(action)
# info_list.append({})
obs_list.append(obs)
if args.render:
env.render()
if done:
break
# traj_dataset.append(
# Trajectory(
# obs=np.array(obs_list),
# acts=np.array(action_list),
# # infos=np.array(info_list),
# )
# )
traj_dataset.append([np.array(obs_list), np.array(action_list)])
with open(args.save_name + ".pkl", "wb") as handle:
pickle.dump(traj_dataset, handle, protocol=pickle.HIGHEST_PROTOCOL)
|
StarcoderdataPython
|
5102817
|
<filename>webapp/webapp/html_builder.py
SQLI1_LINKS = {
"vulnerability_source_code": "https://github.com/neumaneuma/appseccheat.codes/blob/main/webapp/webapp/vulnerabilities/sqli_login_bypass.py",
"vulnerability_gist": "https://gist.github.com/neumaneuma/39a853dfe14e7084ecc8ac8b304c60a3.js",
"exploit_source_code": "https://github.com/neumaneuma/appseccheat.codes/blob/main/exploits/sqli_login_bypass.py",
"exploit_gist": "https://gist.github.com/neumaneuma/2cd5ffda86a9f3beee7858fd3ee21b10.js",
"patch_source_code": "https://github.com/neumaneuma/appseccheat.codes/blob/main/webapp/webapp/patches/sqli_login_bypass.py",
"patch_gist": "https://gist.github.com/neumaneuma/0076b3c6735f6002c680415483566e6e.js",
}
SQLI2_LINKS = {
"vulnerability_source_code": "https://github.com/neumaneuma/appseccheat.codes/blob/main/webapp/webapp/vulnerabilities/sqli_second_order.py",
"vulnerability_gist": "https://gist.github.com/neumaneuma/a96c8d6c304e94cdd343a17a7ad0a7ee.js",
"exploit_source_code": "https://github.com/neumaneuma/appseccheat.codes/blob/main/exploits/sqli_second_order.py",
"exploit_gist": "https://gist.github.com/neumaneuma/a258d30a2551184f00e455969d9fc413.js",
"patch_source_code": "https://github.com/neumaneuma/appseccheat.codes/blob/main/webapp/webapp/patches/sqli_second_order.py",
"patch_gist": "https://gist.github.com/neumaneuma/bef037bcd02ae91e6c6ecf1aac46546d.js",
}
def build_headers(title, introduction):
return {
"title": title,
"introduction": introduction,
"news": "Heard about it in the news?",
"challenge": "Challenge",
"code": "Solution in code",
"vulnerability": "Vulnerability",
"exploit": "Exploit",
"patch": "Patch",
"explanation": "Explanation",
}
|
StarcoderdataPython
|
5069812
|
<filename>burgerkin-board/__init__.py<gh_stars>0
from burgerkin-board.board import Board
|
StarcoderdataPython
|
3441695
|
'''
Blind Curated 75 - Problem 72
=============================
Non-overlapping Intervals
-------------------------
Given a collection of intervals, find the minimum number of intervals you need
to remove to make the rest of the intervals non-overlapping.
[→ LeetCode][1]
[1]: https://leetcode.com/problems/non-overlapping-intervals/
'''
def solution(intervals):
'''
Sort the intervals by start time. If two intervals are conflicting, remove the
one which ends earlier.
'''
if not intervals:
return 0
intervals.sort(key=lambda x: x[0])
removed_count = 0
last = intervals[0]
for curr in intervals[1:]:
if curr[0] < last[1]:
removed_count += 1
last = last if last[1] < curr[1] else curr
else:
last = curr
return removed_count
|
StarcoderdataPython
|
3501639
|
<filename>quest/quests/prison.py
import copy
from random import randint
from ..quest import Quest
from ..quest_segment import QuestSegment
from utils.command_set import CommandSet
from utils.string_parsing import list_to_string
GOLD_REWARD = 350
GOLD_PENALTY = 50
GOLD_PENALTY_WAIT = 120
GOLD_VARIANCE = 26
EXP_REWARD = 3
class Prison(Quest):
def __init__(self, quest_manager):
super().__init__(quest_manager)
self.starting_segment = Start
class Start(QuestSegment):
def set_commands(self):
commands = {}
for party_member in self.quest.party[1:]:
# Due to party_member changing every iteration, we have to copy the value of party_member
# to something else, or the same reference will be used for every iteration
commands['!{}'.format(party_member.lower())] = (
lambda display_name, target=party_member: self.pick(display_name, target))
self.commands = CommandSet(exact_match_commands=commands)
def play(self):
msg = (
'In the Castle of the Mad Yordle, {0} stumbles across a prison full of comrades. But the castle is '
'collapsing with only time to save one! Do you save {1}?'.format(
self.quest.party[0], list_to_string(self.quest.party[1:], join_word='or', prefix='!')
)
)
self.channel.send_msg(msg)
def pick(self, display_name, saved):
# Has to be the first player to be valid input
if display_name != self.quest.party[0]:
return
gold_gained = GOLD_REWARD + randint(-GOLD_VARIANCE, GOLD_VARIANCE)
gold_lost = GOLD_PENALTY + randint(-GOLD_VARIANCE, GOLD_VARIANCE)
forsaken_adventurers = [player for player in self.quest.party if player != display_name and player != saved]
msg = (
'{0} decided to save {1}! {2} are left behind and crushed under the rubble of the collapsing castle, '
'losing {3} gold. {0} and {1} gain {4} gold and {5} exp!'.format(
display_name, saved, list_to_string(forsaken_adventurers), gold_lost, gold_gained, EXP_REWARD
)
)
self.channel.send_msg(msg)
self.reward([display_name, saved], gold=gold_gained, exp=EXP_REWARD)
self.penalize(forsaken_adventurers, gold=gold_lost)
self.complete_quest()
def timeout(self):
gold_lost = GOLD_PENALTY_WAIT + randint(-GOLD_VARIANCE, GOLD_VARIANCE)
msg = (
'{0} took too long deciding who to save, so everyone ended up crushed by the collapsing castle. {1} '
'all lose {2} gold. Ouch.'.format(
self.quest.party[0], self.quest.party, gold_lost
)
)
self.channel.send_msg(msg)
self.penalize(self.quest.party, gold=gold_lost)
self.complete_quest()
|
StarcoderdataPython
|
8042552
|
<filename>buying/migrations/0006_depot_sign_up_secret.py
# Generated by Django 3.1.3 on 2020-11-24 14:52
import buying.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('buying', '0005_auto_20201123_1650'),
]
operations = [
migrations.AddField(
model_name='depot',
name='sign_up_secret',
field=models.CharField(default=buying.models.make_uuid, max_length=6),
),
]
|
StarcoderdataPython
|
1892829
|
"""
from zlib import crc32
import struct
def u32_as_bytes_le(x):
return struct.pack("I",x)
value = crc32(b"",0)
for i in range(0,10000):
value = crc32(u32_as_bytes_le(value),0)
print("{:08x}".format(value))
"""
# Xorshift+
def rng(seed):
s0 = seed ^ 0xabcd1234abcd1234
s1 = seed ^ 0xdcba4321dcba4321
def rand():
nonlocal s0, s1
x, y = s0, s1
x = x ^ ((x << 23) & 0xffffffffffffffff)
x = (x ^ (x >> 17)) ^ (y ^ (y >> 26))
s0, s1 = y, x
return (s0 + s1) & 0xffffffffffffffff
return rand
def generate_data(seed,size):
rand = rng(seed)
return bytes(rand() & 0xff for k in range(size))
from zlib import crc32
from hashlib import sha256
data = generate_data(0,100000)
# value = crc32(data,0)
# print("{:08x}".format(value))
print(sha256(data).hexdigest())
|
StarcoderdataPython
|
4966841
|
<filename>src/submit_results.py
import requests
from concurrent.futures import ThreadPoolExecutor
# post massege to STA
def post_single_STA(payloads,num,url,user,password):
resp = requests.post(url+'/s/Observations', json=payloads[num], auth=requests.auth.HTTPBasicAuth(user, password))
print(resp.text)
# split process to sub threads and post messages in each thread
# at the same time
def post_batch_STA(payloads,num,length,url,user,password):
payloads=payloads[num:num+length]
num_list=[i for i in range(0,length)]
print('batch_started')
with ThreadPoolExecutor(max_workers=50) as pool:
pool.map(post_single_STA, [payloads]* length,num_list, [url] * length,[user] * length,[password] * length)
pool.shutdown(wait=True)
# split process to sub threads and post messages in each thread
# at the same time
def post_real_STA(payloads,length,url,user,password):
num_list=[i for i in range(0,length)]
print('batch_started')
with ThreadPoolExecutor(max_workers=50) as pool:
pool.map(post_single_STA, [payloads]* length,num_list, [url] * length,[user] * length,[password] * length)
pool.shutdown(wait=True)
|
StarcoderdataPython
|
1946253
|
def run():
from pyfiglet import Figlet
f = Figlet(style='slant')
f.renderText("Tutorial")
start = """
Welcome to the BabySploit Tutorial
In this tutorial you will learn how to navigate and use
BabySploit. The framework is geared towards beginners so
it shouldn't be too hard to get the hang of things.
"""
input("Press Enter To Continue...")
def animated(msg):
import sys, time
for char in msg:
time.sleep(0.1)
sys.stdout.write(char)
sys.stdout.flush()
|
StarcoderdataPython
|
11227860
|
"""
In:
* 23andMe_raw_genotype.txt
Out:
* missing_rsids.v rsid
* missing_genos.v (rsid, genotype)
* dataframe.v immediate report for app dashboard
* dataframe.csv immediate csv for app dashboard
Trigger:
* display available research report in web-app
* collect research pubs for missing rsids
* collect metadata on rsid genotypes from SNPedia
"""
# Adds this file to top-level interpretter path, so runs "as if" from top.
# import sys
# from pathlib import Path
# sys.path.insert(0, str(Path(__file__).resolve().parent.parent))
# %%
# // LOAD MODULES / PACKAGES
import sys
#import os.path
from os import path
from importlib import reload
import numpy as np
import pandas as pd
from collections import OrderedDict
import pickle
import copy
import modules.functions as fn
from subprocess import Popen
# from modules.functions import create_connection, execute_query, check_db
# // RELOAD ALL CHANGED MODULES (JUPYTER SPECIAL FUNCTION)
# // autoreloads on each cell execution.
# https://stackoverflow.com/questions/34377270/reload-module-from-a-folder
%load_ext autoreload
%autoreload 2
# // RELOAD ALL MODULES
# for module in sys.modules.values():
# reload(module)
# https://stackoverflow.com/questions/45405600/how-to-reload-all-imported-modules
# // RELOAD ONE MODULE
# reload(functions)
# // FIRST TIME - SETUP FUNCTIONS
if not path.exists("./data/SNP_db.sqlite"):
# // DATABASE SETUP
fn.create_db()
if not path.exists("./data/SNPedia_ids.txt"):
# --- build list of all SNP rsids available on ** SNPedia **
# function saves list of rsids to file: "./data/SNPedia_ids.txt"
fn.all_snpedia_rsids()
# TODO: rename functions (to exclude "SNPedia" & otherwise (to avoid LICENSING dispute) - do so "project-wide" at END)
# TODO: run this function separately on sleep timer to update list of SNPedia RSIDS periodically.
# %%
# Import list of all SNP rsids available on ** SNPedia **
with open("./data/SNPedia_ids.txt", "rb") as f:
wiki_rsids = pickle.load(f)
# load 23andMe Data (raw test file)
df = pd.read_csv(
f"./data/temp/{fn.find_files('data/temp', '*.txt')[0]}", sep="\t", skiprows=20, names=["rsid", "chromosome", "position", "genotype"]
)
# --- Keep only rsids - if they exist in both SNPedia & the 23andMe raw test file
keep_rsids = set(df.rsid).intersection(set(wiki_rsids))
df_select = df.query('rsid in @keep_rsids')
# Reduce further to rm INDEL MARKERS proprietary to 23andMe.
df_no_indels = fn.exclude_indels(df_select)
# TODO: design method to handle 23andMe indels and include in query (see api_functions.py for details)
print(f"\nRSIDs\n23andMe Raw: {len(df)} | SNPedia Total: {len(wiki_rsids)} | Intersect: {len(df_select)} | After Indels Removed: {len(df_no_indels)}\n")
df = df_no_indels
# -------- check if RSIDs are missing from publications table
print('RSIDs')
query = "SELECT DISTINCT rsid FROM rsid_pubs"
rsid_compare = fn.check_db(query=query, compare=[df.rsid], path="data/SNP_db.sqlite")
with open("./data/temp/missing_rsids.v", "wb") as f:
pickle.dump(rsid_compare["missing"], f) # save missing_rsids for later collection
# -------- check if SNPedia data is missing from genotype table
print('Genos')
query = "SELECT rsid, genotype FROM genotypes"
geno_compare = fn.check_db(query=query, compare=[df.rsid.tolist(), df.genotype.tolist()], path="data/SNP_db.sqlite")
# DF of only rsid:geno pairs missing from DB:genotypes table
missing_genos = [i[0] + i[1] for i in geno_compare['missing']]
missing_genos = df.query('(rsid + genotype) == @missing_genos')
with open("./data/temp/missing_genos.v", "wb") as f:
pickle.dump(missing_genos, f) # save missing_genos for later collection
# ------- trigger scrape for missing information
Popen(["python3","./wiki_scrape.py"]) # start non-blocking subprocess
Popen(["python3","./txt_scrape.py"])
# %%
# ------- Stop here if no data is available for immediate report
if (len(rsid_compare['available']) > 0) and (len(geno_compare['available']) > 0):
# ------- select available genotypes from db to match on user
cnx = fn.create_connection("data/SNP_db.sqlite")
genos = f"{geno_compare['available']}"[1:-1] # convert to str, rm []
query = f"SELECT * FROM genotypes WHERE (rsid, genotype) IN (VALUES {genos});"
geno_df = pd.read_sql_query(query, cnx)
# ------- sort data by magnitude & split by repute [good & bad]
geno_df = geno_df.sort_values("magnitude", ascending=False)
g_df = copy.deepcopy(geno_df[geno_df.repute == "good"][:50])
b_df = copy.deepcopy(geno_df[geno_df.repute == "bad"][:50])
df = g_df.append(b_df, ignore_index=True).iloc[:, 1:]
# ------- import summaries and publication metadata
rsids = f"{df.rsid.tolist()}"[1:-1] # convert to str and remove [] brackets
# most_cited summaries
query = f"""
SELECT rsid, abs_summary, most_cited, most_recent, pmids
FROM rsid_summaries
WHERE (rsid IN ({rsids}) AND most_cited = 1);
"""
c_df = pd.read_sql_query(query, cnx)
# most_recent summaries
query = f"""
SELECT rsid, abs_summary, most_cited, most_recent, pmids
FROM rsid_summaries
WHERE (rsid IN ({rsids}) AND most_recent = 1);
"""
r_df = pd.read_sql_query(query, cnx)
# publications metadata
query = f"""
SELECT rsid, pmid, date, n_citedby, ncbi_url
FROM rsid_pubs
WHERE rsid IN ({rsids})
"""
pub_df = pd.read_sql_query(query, cnx)
# ------- construct final dataframe
c_df = c_df.set_index("rsid")
r_df = r_df.set_index("rsid")
df = df.set_index("rsid")
df = df.join(c_df[["abs_summary", "pmids"]], how="left", rsuffix="_mC")
df = df.join(c_df[["abs_summary", "pmids"]], how="left", rsuffix="_mR")
df = df.rename(columns={"abs_summary": "abs_summary_mC", "pmids": "pmids_mC"})
df["fullurl"] = df.fullurl.apply(lambda x: x.replace("bots.", ""))
df["fullurl"] = df.fullurl.apply(lambda x: x.rsplit("(")[0] + "(" + x.rsplit("(")[1].upper())
df.columns
with open("data/temp/dataframe.v", "wb") as f:
pickle.dump(df, f)
df.to_csv("data/temp/dataframe.csv")
# ------- trigger app.py to display dataframe in browser
# TODO add this process. State how many geno's still need to be looked-up by backend prior to produce final report.
# TODO pop-up question, would you like to be notified when your report is ready? Enter email here.
else:
print('no data for your genotype yet, come back later')
# TODO trigger app.py to display DB empty message.
# TODO use asyncio to enable concurrency. Start scrape, and conduct scrape for concurrent threading. MUST refactor code into non-blocking I/O, for this to be effective.
# https://www.youtube.com/watch?v=bs9tlDFWWdQ&t=356s
# https://docs.python.org/3.8/library/asyncio.html
# https://testdriven.io/blog/concurrency-parallelism-asyncio/#recap-when-to-use-multiprocessing-vs-asyncio-or-threading
# async (concurrent) API calls
# multi-process runtimes for frontend web-app v. backend scrapes (using concurrent.futures)
# backend scrape: subprocess PUB collection from SNPedia metadata collection; converge at ML summary generation step.
# Can ML be subprocessed or would that be too much overhead on the GPU?
# TODO: rename functions (to exclude "SNPedia" & otherwise (to avoid LICENSING dispute) - do so "project-wide" at END)
# ----------------------- mvp-origins ----------------------- Research-SNPit -----------------------
# TODO: 'origins' folder v 'operations' folder (with DB setup & checks).
# %%
# with open("data/temp/dataframe.v", "rb") as f:
# df = pickle.load(f)
|
StarcoderdataPython
|
12842833
|
"""
m2wsgi.io.gevent: gevent-based I/O module for m2wsgi
=====================================================
This module provides subclasses of m2wsgi.WSGIHandler and related classes
that are specifically tuned for running under gevent. You can import
and use the classes directory from here, or you can select this module
when launching m2wsgi from the command-line::
m2wsgi --io=gevent dotted.app.name tcp://127.0.0.1:9999
You will need the gevent_zeromq package from here:
https://github.com/traviscline/gevent-zeromq
"""
# Copyright (c) 2011, <NAME>.
# All rights reserved; available under the terms of the MIT License.
from __future__ import absolute_import
from m2wsgi.util import fix_absolute_import
fix_absolute_import(__file__)
from m2wsgi.io import base
import gevent
import gevent.monkey
import gevent.event
import gevent.core
import gevent.hub
import gevent_zeromq
from gevent_zeromq import zmq
import zmq.core.poll as zmq_poll
if hasattr(zmq, '_Context'):
ZContext = zmq._Context
else:
ZContext = zmq.Context
if hasattr(zmq, '_Socket'):
ZSocket = zmq._Socket
else:
ZSocket = zmq.Socket
def monkey_patch():
"""Hook to monkey-patch the interpreter for this IO module.
This calls the standard gevent monkey-patching routines. Don't worry,
it's not called by default unless you're running from the command line.
"""
gevent.monkey.patch_all()
gevent_zeromq.monkey_patch()
# Patch signal module for gevent compatability.
# Courtesy of http://code.google.com/p/gevent/issues/detail?id=49
import signal
_orig_signal = signal.signal
def gevent_signal_wrapper(signum,*args,**kwds):
handler = signal.getsignal(signum)
if callable(handler):
handler(signum,None)
def gevent_signal(signum,handler):
_orig_signal(signum,handler)
return gevent.hub.signal(signum,gevent_signal_wrapper,signum)
signal.signal = gevent_signal
# The BaseConnection recv logic is based on polling, but I can't get
# gevent polling on multiple sockets to work correctly.
# Instead, we simulate polling on each socket individually by reading an item
# and keeping it in a local buffer.
#
# Ideally I would juse use the _wait_read() method on gevent-zmq sockets,
# but this seems to cause hangs for me. Still investigating.
class _Context(ZContext):
def socket(self,socket_type):
if self.closed:
raise zmq.ZMQError(zmq.ENOTSUP)
return _Socket(self,socket_type)
def term(self):
# This seems to be needed to let other greenthreads shut down.
# Omit it, and the SIGHUP handler gets "bad file descriptor" errors.
gevent.sleep(0.1)
return super(_Context,self).term()
class _Socket(ZSocket):
def __init__(self,*args,**kwds):
self._polled_recv = None
super(_Socket,self).__init__(*args,**kwds)
# This blockingly-reads a message from the socket, but stores
# it in a buffer rather than returning it.
def _recv_poll(self,flags=0,copy=True,track=False):
if self._polled_recv is None:
self._polled_recv = super(_Socket,self).recv(flags,copy,track)
# This uses the buffered result if available, or polls otherwise.
def recv(self,flags=0,copy=True,track=False):
v = self._polled_recv
while v is None:
self._recv_poll(flags,copy=copy,track=track)
v = self._polled_recv
self._polled_recv = None
return v
zmq.Context = _Context
zmq.Socket = _Socket
class Client(base.Client):
__doc__ = base.Client.__doc__
class Request(base.Client):
__doc__ = base.Client.__doc__
class ConnectionBase(base.ConnectionBase):
__doc__ = base.ConnectionBase.__doc__ + """
This ConnectionBase subclass is designed for use with gevent. It uses
the monkey-patched zmq module from gevent and spawns a number of green
threads to manage non-blocking IO and interrupts.
"""
ZMQ_CTX = zmq.Context()
# A blocking zmq.core.poll doesn't play nice with gevent.
# Instead we read from each socket in a separate greenthread, and keep
# the results in a local buffer so they don't get lost. An interrupt
# then just kills all the currently-running threads.
def __init__(self):
super(ConnectionBase,self).__init__()
self.poll_threads = []
def _poll(self,sockets,timeout=None):
# If there's anything available non-blockingly, just use it.
(ready,_,error) = zmq_poll.select(sockets,[],sockets,timeout=0)
if ready:
return ready
if error:
return []
if timeout == 0:
return []
# Spawn a greenthread to poll-recv from each socket.
ready = []
threads = []
res = gevent.event.Event()
for sock in sockets:
threads.append(gevent.spawn(self._do_poll,sock,ready,res,timeout))
self.poll_threads.append((res,threads))
# Wait for one of them to return, or for an interrupt.
try:
res.wait()
finally:
gevent.killall(threads)
gevent.joinall(threads)
return ready
def _do_poll(self,sock,ready,res,timeout):
if timeout is None:
sock._recv_poll()
else:
with gevent.Timeout(timeout,False):
sock._recv_poll()
ready.append(sock)
if not res.is_set():
res.set()
def _interrupt(self):
for (res,threads) in self.poll_threads:
gevent.killall(threads)
if not res.is_set():
res.set()
class Connection(base.Connection,ConnectionBase):
__doc__ = base.Connection.__doc__ + """
This Connection subclass is designed for use with gevent. It uses the
monkey-patched zmq module from gevent and spawns a number of green
threads to manage non-blocking IO and interrupts.
"""
class DispatcherConnection(base.DispatcherConnection,ConnectionBase):
__doc__ = base.DispatcherConnection.__doc__ + """
This DispatcherConnection subclass is designed for use with gevent. It
uses the monkey-patched zmq module from gevent and spawns a number of
green threads to manage non-blocking IO and interrupts.
"""
class StreamingUploadFile(base.StreamingUploadFile):
__doc__ = base.StreamingUploadFile.__doc__ + """
This StreamingUploadFile subclass is designed for use with gevent. It
uses uses gevent.sleep() instead of time.sleep().
"""
def _wait_for_data(self):
curpos = self.fileobj.tell()
cursize = os.fstat(self.fileobj.fileno()).st_size
while curpos >= cursize:
gevent.sleep(0.01)
cursize = os.fstat(self.fileobj.fileno()).st_size
class Handler(base.Handler):
__doc__ = base.Handler.__doc__ + """
This Handler subclass is designed for use with gevent. It spawns a
a new green thread to handle each incoming request.
"""
ConnectionClass = Connection
def __init__(self,*args,**kwds):
super(Handler,self).__init__(*args,**kwds)
# We need to count the number of inflight requests, so the
# main thread can wait for them to complete when shutting down.
self._num_inflight_requests = 0
self._all_requests_complete = gevent.event.Event()
def handle_request(self,req):
self._num_inflight_requests += 1
if self._num_inflight_requests >= 1:
self._all_requests_complete.clear()
@gevent.spawn
def do_handle_request():
try:
self.process_request(req)
finally:
self._num_inflight_requests -= 1
if self._num_inflight_requests == 0:
self._all_requests_complete.set()
def wait_for_completion(self):
if self._num_inflight_requests > 0:
self._all_requests_complete.wait()
class WSGIResponder(base.WSGIResponder):
__doc__ = base.WSGIResponder.__doc__
class WSGIHandler(base.WSGIHandler,Handler):
__doc__ = base.WSGIHandler.__doc__ + """
This WSGIHandler subclass is designed for use with gevent. It spawns a
a new green thread to handle each incoming request.
"""
ResponderClass = WSGIResponder
StreamingUploadClass = StreamingUploadFile
|
StarcoderdataPython
|
11342120
|
<filename>polls/admin.py
from django.contrib import admin
from .models import Question, Choice
# class ChoiceInline(admin.StackedInline):
class ChoiceInline(admin.TabularInline):
# 通过 TabularInline(替代 StackedInline ),关联对象以一种表格式的方式展示,显得更加紧凑
model = Choice
extra = 3
class QuestionAdmin(admin.ModelAdmin):
list_display = ('id', 'question_text', 'pub_date', 'was_published_recently')
# ordering = ('id',)
inlines = [ChoiceInline]
# “过滤器”侧边栏
list_filter = ['pub_date']
# 查询数据
search_fields = ['question_text']
class ChoiceAdmin(admin.ModelAdmin):
list_display = ('question', 'choice_text', 'votes')
ordering = ('id',)
admin.site.register(Question, QuestionAdmin)
admin.site.register(Choice, ChoiceAdmin)
|
StarcoderdataPython
|
1699288
|
# -*- coding:utf-8 -*-
from django.urls import path
from docs.views.infovalue import (
# InfoValueCreateApiView,
InfoValueAddApiView,
InfoValueListApiView,
InfoValueDetailApiView,
InfoValueListAllArticleApiView,
)
urlpatterns = [
# 前缀:/api/v1/docs/infovalue/
# 信息值
# path("create", InfoValueCreateApiView.as_view(), name="create"),
path("add", InfoValueAddApiView.as_view(), name="add"),
path("list", InfoValueListApiView.as_view(), name="list"),
path("<int:pk>/<int:article_id>", InfoValueDetailApiView.as_view(), name="delete_article_infovalue"),
path("<int:pk>", InfoValueDetailApiView.as_view(), name="detail"),
path("<int:pk>/articles", InfoValueListAllArticleApiView.as_view(), name="articles"),
]
|
StarcoderdataPython
|
6460233
|
<filename>ticker_dashboard.py<gh_stars>0
"""
Description: Dashboard to interact with option visuals
"""
import datetime
import streamlit as st
from wallstreet import Stock
from option_utils import (
collect_option_data,
plot_option_percent_gain,
plot_option_asset_value
)
# Constants
FRIDAY_WEEKDAY = 4
TODAY = datetime.datetime.today()
DAYS_BACK = {
'week':7,
'month':30,
'year':365
}
# Streamlit Configuration
st.set_page_config(layout="wide")
# Find next Friday
day_diff = FRIDAY_WEEKDAY - TODAY.weekday()
if day_diff < 0:
day_diff = 7 + day_diff
next_friday = TODAY + datetime.timedelta(days=day_diff)
# Title
col1, col2 = st.beta_columns((1, 2))
col1.title("Do I like the Stock?")
col2.image('imgs/wsb.jpg')
# Column 1 inputs
col1, col2, col3, col4 = st.beta_columns((3, 1, 1, 5))
ticker = col1.text_input('Ticker:')
if ticker == "GME":
col2.image('imgs/diamond.jpg')
col3.image('imgs/hands.jpg')
# Check for Ticker
if ticker:
stock = Stock(ticker)
# Stock Information Section
st.header(stock.name)
col1, col2, col3, col4, col5 = st.beta_columns(5)
# Show Info
col1.write('Current Price ($)')
col1.write(stock.price)
col2.write('Daily Change:')
col2.write(f"${stock.change}")
col2.write(f"{stock.cp} %")
# Historicals
price_view = col3.radio("Select Time Period:", ('week', 'month', 'year'), index=1)
stock_data = stock.historical(days_back=DAYS_BACK[price_view])
price = stock_data['Close'].values
pc = price[-1] - price[0]
pcp = int(100 * (price[-1] - price[0]) / price[0] * 100) / 100
col4.write(price_view.capitalize() + 'ly Change:')
col4.write(f"${pc}")
col4.write(f"{pcp} %")
if pc < 0:
col5.image('imgs/hanginthere.jpg')
else:
col5.image('imgs/rocket.jpg')
# Visualize
st.line_chart(stock_data[['Date', 'Close', 'Low', 'High']].set_index('Date'))
st.bar_chart(stock_data[['Date', 'Volume']].set_index('Date'), width=200)
# Collect Options Information
col1, col2, col3 = st.beta_columns(3)
col1.title(f"Call Options: {ticker}")
col1.write('Cause stonks only go up')
col3.image('imgs/stonks.jpg')
col1, col2 = st.beta_columns(2)
option_date = col1.date_input("Call Option Expiration Date:", next_friday)
col1.write('(Defaults to closest available date)')
initial = col2.number_input('$ purchase of contracts:', value=10000.0, step=1000.0)
col2.write('(Buy N contracts up to this amount)')
# Collect initial date
year = option_date.year
month = option_date.month
day = option_date.day
# Collect the data
data = collect_option_data(ticker, year, month, day, strict=False)
actual_date = data['date'][0]
year = actual_date.year
month = actual_date.month
day = actual_date.day
st.subheader(f"Options Date: {month}/{day}/{year}")
st.subheader('Percent Gain')
pc_fig, pc_ax = plot_option_percent_gain(
ticker, year, month, day, data=data
)
st.pyplot(pc_fig)
st.subheader('Net Value of Assets')
st.write('(Note: Areas of no change may imply that no contracts' +
' are available at that initial investment or strike price)')
asset_fig, asset_ax = plot_option_asset_value(
ticker, year, month, day, initial=initial, data=data
)
st.pyplot(asset_fig)
st.image('imgs/loss.jpg')
st.subheader('(Not a financial advisor)')
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.