id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
1879061
|
"""
Utility dialogs for starcheat itself
"""
import os
import sys
import hashlib
import webbrowser
from PyQt5.QtWidgets import QDialog
from PyQt5.QtWidgets import QFileDialog
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import QProgressDialog
from PyQt5 import QtCore
from urllib.request import urlopen
from urllib.error import URLError
import logging
import config
import qt_options
import qt_about
import qt_mods
from config import Config
from assets.core import Assets
def make_pak_hash():
vanilla = os.path.join(Config().read("assets_folder"), "packed.pak")
mods = Config().read("mods_folder")
pak_list = [vanilla]
timestamps = []
for root, dirs, files in os.walk(mods):
for f in files:
if f.endswith(".pak"):
pak_list.append(os.path.join(root, f))
for pak in pak_list:
timestamps.append(str(os.stat(pak).st_mtime))
final_hash = hashlib.md5()
final_hash.update("_".join(timestamps).encode())
return final_hash.hexdigest()
def build_assets_db(parent):
assets_db_file = Config().read("assets_db")
starbound_folder = Config().read("starbound_folder")
assets_db = Assets(assets_db_file, starbound_folder)
def bad_asset_dialog():
dialog = QMessageBox(parent)
dialog.setWindowTitle("No Assets Found")
dialog.setText("Unable to index Starbound assets.")
dialog.setInformativeText("Check that the Starbound folder was set correctly.")
dialog.setIcon(QMessageBox.Critical)
dialog.exec()
assets_db.db.close()
assets_db.init_db()
asset_files = assets_db.find_assets()
total = 0
progress = QProgressDialog("Indexing Starbound assets...",
"Abort", 0, len(asset_files),
parent)
progress.setWindowTitle("Indexing...")
progress.setWindowModality(QtCore.Qt.ApplicationModal)
progress.forceShow()
progress.setValue(total)
for i in assets_db.create_index():
total += 1
progress.setValue(total)
if progress.wasCanceled():
assets_db.db.close()
return False
progress.hide()
if total == 0:
bad_asset_dialog()
return False
else:
Config().set("pak_hash", make_pak_hash())
return True
def check_index_valid(parent):
old_hash = Config().read("pak_hash")
new_hash = make_pak_hash()
if old_hash != new_hash:
logging.info("Hashes don't match, updating index")
dialog = QMessageBox(parent)
dialog.setWindowTitle("Assets Out-of-date")
dialog.setText("Starbound assets have been changed.")
dialog.setInformativeText("Rebuild the starcheat assets index?")
dialog.setIcon(QMessageBox.Question)
dialog.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
answer = dialog.exec()
if answer == QMessageBox.Yes:
return build_assets_db(parent)
else:
return True
else:
return True
def save_modified_dialog(parent):
"""Display a prompt asking user what to do about a modified file. Return button clicked."""
dialog = QMessageBox(parent)
dialog.setWindowTitle("Save Changes?")
dialog.setText("This player has been modified.")
dialog.setInformativeText("Do you want to save your changes?")
dialog.setStandardButtons(QMessageBox.Save | QMessageBox.Cancel | QMessageBox.Discard)
dialog.setDefaultButton(QMessageBox.Save)
dialog.setIcon(QMessageBox.Question)
return dialog.exec()
def select_starbound_folder_dialog(parent):
folder = QFileDialog.getExistingDirectory(caption="Select Starbound Folder")
while not os.path.isfile(os.path.join(folder, "assets", "packed.pak")):
dialog = QMessageBox(parent)
dialog.setWindowTitle("Wrong Starbound Folder")
dialog.setText("This is not your Starbound folder!")
dialog.setInformativeText("Please try again and select your Starbound folder, which should contain the assets folder.")
dialog.setIcon(QMessageBox.Warning)
dialog.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)
answer = dialog.exec()
if answer == QMessageBox.Cancel:
dialog = QMessageBox(parent)
dialog.setWindowTitle("Starbound Not Installed")
dialog.setIcon(QMessageBox.Critical)
dialog.setText("starcheat needs Starbound installed to work.")
dialog.exec()
Config().remove_config()
sys.exit()
folder = QFileDialog.getExistingDirectory(caption="Select Starbound Folder")
return os.path.normpath(folder)
def new_setup_dialog(parent):
"""Run through an initial setup dialog for starcheat if it's required."""
logging.info("First setup dialog")
if os.path.isfile(Config().ini_file):
config_valid = (Config().has_key("config_version") and
int(Config().read("config_version")) == Config().CONFIG_VERSION)
if not config_valid:
logging.info("rebuild config and assets_db (config_version mismatch)")
dialog = QMessageBox(parent)
dialog.setWindowModality(QtCore.Qt.WindowModal)
dialog.setWindowTitle("Config Out-of-date")
dialog.setText("Your starcheat settings are outdated.")
dialog.setInformativeText("A new config file and assets index will be created...")
dialog.setIcon(QMessageBox.Warning)
dialog.exec()
else:
vanilla_pak = os.path.join(Config().read("starbound_folder"), "assets", "packed.pak")
if os.path.isfile(vanilla_pak):
return True
else:
logging.error("No vanilla pak, Starbound folder may be wrong")
os.remove(Config().ini_file)
# Starbound folder settings
starbound_folder = Config().detect_starbound_folder()
if starbound_folder == "":
dialog = QMessageBox(parent)
dialog.setWindowModality(QtCore.Qt.WindowModal)
dialog.setWindowTitle("Starbound Not Found")
dialog.setText("Unable to detect the main Starbound folder.")
dialog.setInformativeText("Please select it in the next dialog.")
dialog.setIcon(QMessageBox.Warning)
dialog.exec()
starbound_folder = select_starbound_folder_dialog(parent)
else:
dialog = QMessageBox(parent)
dialog.setWindowModality(QtCore.Qt.WindowModal)
dialog.setWindowTitle("Starbound Folder Found")
dialog.setText("Detected the following folder as the location of Starbound. Is this correct?")
dialog.setInformativeText(starbound_folder)
dialog.setStandardButtons(QMessageBox.No | QMessageBox.Yes)
dialog.setIcon(QMessageBox.Question)
answer = dialog.exec()
if answer == QMessageBox.No:
starbound_folder = select_starbound_folder_dialog(parent)
# looks okay enough, let's go
Config().create_config(starbound_folder)
if not build_assets_db(parent):
os.remove(Config().ini_file)
return False
else:
return True
def update_check_worker(result):
if Config().has_key("check_updates"):
check_updates = Config().read("check_updates") == "yes"
else:
check_updates = True
if not check_updates:
logging.info("Skipping update check")
return
logging.info("Checking for updates")
try:
latest_tag = urlopen("https://github.com/wizzomafizzo/starcheat/releases/latest").geturl()
if latest_tag.find("github.com/wizzomafizzo/starcheat/releases") >= 0:
if not latest_tag.endswith("tag/" + config.STARCHEAT_VERSION_TAG):
result[0] = latest_tag
logging.info("update check: found new starcheat version")
return
else:
logging.info("update check: skipping update check because of failed redirect")
except URLError:
logging.info("update check: skipping update check because of no internet connection")
result[0] = False
def update_check_dialog(parent, latest_tag):
dialog = QMessageBox(parent)
dialog.setWindowModality(QtCore.Qt.WindowModal)
dialog.setWindowTitle("Outdated starcheat Version")
dialog.setText("A new version of starcheat is available! Do you want to update now?")
dialog.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
dialog.setDefaultButton(QMessageBox.Yes)
dialog.setIcon(QMessageBox.Question)
if dialog.exec() == QMessageBox.Yes:
webbrowser.open(latest_tag, 2)
sys.exit(0)
class AboutDialog():
def __init__(self, parent):
self.dialog = QDialog(parent)
self.ui = qt_about.Ui_Dialog()
self.ui.setupUi(self.dialog)
set_ver = self.ui.header_info.text().replace("STARCHEAT_VERSION",
config.STARCHEAT_VERSION)
self.ui.header_info.setText(set_ver)
class OptionsDialog():
def __init__(self, parent):
self.dialog = QDialog(parent)
self.ui = qt_options.Ui_Dialog()
self.ui.setupUi(self.dialog)
assets_db_file = Config().read("assets_db")
starbound_folder = Config().read("starbound_folder")
self.db = Assets(assets_db_file, starbound_folder)
self.config = Config()
self.current_folder = self.config.read("starbound_folder")
# read the current config and prefill everything
self.ui.starbound_folder.setText(self.config.read("starbound_folder"))
self.ui.total_indexed.setText(str(self.db.total_indexed()) + " indexed")
self.ui.update_checkbox.setChecked(self.config.read("check_updates") == "yes")
self.ui.starbound_folder_button.clicked.connect(self.open_starbound)
self.ui.rebuild_button.clicked.connect(self.rebuild_db)
self.ui.update_checkbox.toggled.connect(self.write_update_check)
def write(self):
starbound_folder = self.ui.starbound_folder.text()
if self.current_folder != starbound_folder:
self.config.create_config(starbound_folder)
def write_update_check(self):
do_check = self.ui.update_checkbox.isChecked()
# configparser has a getbool method but it's not much smarter than this
# and will complicate the config class. move this to a separate method
# if we need more bools
if do_check:
conf_val = "yes"
else:
conf_val = "no"
self.config.set("check_updates", conf_val)
def open_starbound(self):
filename = QFileDialog.getExistingDirectory(self.dialog,
"Select Starbound Folder",
self.config.read("starbound_folder"))
if filename != "":
self.ui.starbound_folder.setText(filename)
def rebuild_db(self):
self.write()
def bad_asset_dialog():
dialog = QMessageBox(self.dialog)
dialog.setWindowTitle("No Starbound Assets")
dialog.setText("No Starbound assets could be found.")
dialog.setInformativeText("The Starbound folder option might be set wrong.")
dialog.setIcon(QMessageBox.Critical)
dialog.exec()
try:
rebuild = build_assets_db(self.dialog)
except FileNotFoundError:
rebuild = False
assets_db_file = Config().read("assets_db")
starbound_folder = Config().read("starbound_folder")
self.db = Assets(assets_db_file, starbound_folder)
total = str(self.db.total_indexed())
if not rebuild or total == 0:
bad_asset_dialog()
else:
dialog = QMessageBox(self.dialog)
dialog.setWindowTitle("Finished Indexing")
dialog.setText("Finished indexing Starbound assets.")
dialog.setInformativeText("Found %s assets." % total)
dialog.setIcon(QMessageBox.Information)
dialog.exec()
self.ui.total_indexed.setText(total + " indexed")
class ModsDialog():
def __init__(self, parent):
self.dialog = QDialog(parent)
self.ui = qt_mods.Ui_Dialog()
self.ui.setupUi(self.dialog)
starbound_folder = Config().read("starbound_folder")
self.assets = Assets(Config().read("assets_db"),
starbound_folder)
mods = self.assets.get_mods()
self.ui.mods_total.setText(str(len(mods))+" total")
for mod in mods:
self.ui.mods_list.addItem(mod)
self.ui.export_button.clicked.connect(self.export_list)
def export_list(self):
data = ""
for mod in self.assets.get_mods():
data += mod + "\n"
filename = QFileDialog.getSaveFileName(self.dialog,
"Export Mod List As",
filter="Text (*.txt);;All Files (*)")
if filename[0] != "":
json_file = open(filename[0], "w")
json_file.write(data)
json_file.close()
|
StarcoderdataPython
|
12835713
|
<filename>interface/__init__.py
from PySimpleGUI import PySimpleGUI as sg
from sys import exit
sg.theme('DarkGray14')
# sg.theme_previewer()
def layout():
layout = [
[sg.Text('Recomeçar:'),
sg.Radio('Sim', 'recomecar', key='rSim', default=True, enable_events=True),
sg.Radio('Não', 'recomecar', key='rNao', enable_events=True)],
[sg.Text('Usuário:', size=(8, 1), key='usuarioTxt', visible=True)], [sg.Input(key='usuario', size=(20, 1), visible=True)],
[sg.Text('Senha:', size=(8, 1), key='senhaTxt')], [sg.Input(key='senha', password_char='*', size=(20, 1))],
[sg.Text('Frase:', size=(8, 1), key='fraseTxt')], [sg.Input(key='frase', size=(20, 1))],
[sg.Text('Link do Post:', key='linkTxt', visible=True)],
[sg.Input(key='link', size=(40, 1), visible=True)],
[sg.Text('Número de seguidores:', size=(33, 1), key='qtSeguiTxt', visible=True)],
[sg.Input(key='qtSegui', size=(15, 1), visible=True)],
[sg.Text('Buscar:', visible=True, key='buscaTxt')],
[sg.Radio('Seguidores', 'busca', key='bSeguidor', visible=True, default=True, enable_events=True)],
[sg.Radio('Seguindo', 'busca', key='bSeguindo', visible=True, enable_events=True)],
[sg.Text('Navegador:'),
sg.Radio('Opera', 'navegador', key='opera', default=True),
sg.Radio('Google Chrome', 'navegador', key='chrome')],
[sg.Text('Marcações:')],
[sg.Slider(range=(1, 5), default_value=3, size=(20, 15), orientation='h', key='marcar')],
[sg.Text('Quantidade de comentarios:')],
[sg.Slider(range=(1, 300), default_value=20, size=(40, 15), orientation='h', key='comQuant')],
[sg.Button('Iniciar')]
#[sg.Output(size=(40, 20), key='output')]
]
return layout
def janela():
window = sg.Window('Bot de comentários', layout())
while True:
eventos, valores = window.read()
#window['output'].update(value=f'{"Informações":-^60}')
if eventos == sg.WINDOW_CLOSED:
exit()
break
if eventos == 'rSim':
window['link'].update(disabled=False)
window['qtSegui'].update(disabled=False)
window['usuario'].update(disabled=False)
window['senha'].update(disabled=False)
window['frase'].update(disabled=False)
window['bSeguidor'].update(disabled=False)
window['bSeguindo'].update(disabled=False)
elif eventos == 'rNao':
window['link'].update(disabled=True)
window['qtSegui'].update(disabled=True)
window['usuario'].update(disabled=True)
window['senha'].update(disabled=True)
window['frase'].update(disabled=True)
window['bSeguidor'].update(disabled=True)
window['bSeguindo'].update(disabled=True)
if eventos == 'Iniciar':
try:
valores['marcar'] = int(valores['marcar'])
valores['comQuant'] = int(valores['comQuant'])
if valores['rSim']:
valores['qtSegui'] = int(valores['qtSegui'])
return valores
except:
print('Erro! Digite os valores inteiros válidos!')
janela()
|
StarcoderdataPython
|
4908581
|
#### REST FRAMEWORK #####
from rest_framework import generics
from rest_framework import permissions
from rest_framework.response import Response
##### SERIALIZERS #####
from users.serializers import UserSerializer
from users.serializers import BuyerProfileSerializer
from users.serializers import SellerProfileSerializer
##### MODELS #####
from django.contrib.auth.models import User
from users.models import BuyerProfile
from users.models import SellerProfile
# Get User API
class UserAPI(generics.RetrieveAPIView):
permission_classes = [permissions.IsAuthenticated,]
serializer_class = UserSerializer
def get_object(self):
return self.request.user
class BuyerProfileAPI(generics.RetrieveAPIView):
permission_classes = [permissions.IsAuthenticated,]
serializer_class = BuyerProfileSerializer
def get_object(self):
user__ = self.request.user
return BuyerProfile.objects.get(user = user__)
class SellerProfileAPI(generics.RetrieveAPIView):
permission_classes = [permissions.IsAuthenticated,]
serializer_class = SellerProfileSerializer
def get_object(self):
user__ = self.request.user
return SellerProfile.objects.get(user = user__)
|
StarcoderdataPython
|
3364128
|
#!/usr/bin/env python3
def test(i):
print("coroutine starts")
while True:
value = yield i
i += value
b = test(5) # just created; main fn in control;
next(b) # now corutine in control
# execute print statement and then
# yield val `i` to main fn
# yield control back to caller
# waiting to be fed again in pending mode
b.send(3) # main fn feeds this coroutin with val `3`,
# brought back to life, again in control,
# execute i+= value, and then yield to main fn with val updated `i`
b.send(5) # main fn feeds this coroutin with val `5`,
# brought back to life, again in control,
# execute i+= value, and then yield to main fn with val updated `i`
|
StarcoderdataPython
|
5022187
|
<reponame>raymondyeh07/chirality_nets
"""Test for chiral batch_norm1d layer."""
import unittest
import torch
import torch.nn.functional as F
from tests.test_chiral_base import TestChiralBase
from chiral_layers.chiral_batch_norm1d import ChiralBatchNorm1d
class TestChiralBatchNorm1d(TestChiralBase):
"""Implements unittests for chiral conv1d layers."""
def test_single_layer_running_mean_var_updates(self):
print('Tests batchnorm running mean and var updates.')
batch_size = 2
time_size = 5
num_joints = 5
in_dim = 2
out_dim = 2
neg_dim_in = 1
neg_dim_out = 1
sym_groupings = ([2, 2, 1], [2, 2, 1])
# Generate chiral pairs.
x, x_chiral = self._get_input_pairs(batch_size, time_size, num_joints,
in_dim, neg_dim_in, sym_groupings)
# Reshape to joints and dim to a column vector.
x = x.view(x.shape[0], -1, x.shape[-1])
x_chiral = x_chiral.view(x.shape[0], -1, x.shape[-1])
bn_layer = ChiralBatchNorm1d(num_joints*in_dim,
sym_groupings=sym_groupings,
neg_dim_in=neg_dim_in,
neg_dim_out=neg_dim_out)
# Checks mean is initialize
bn_mean = bn_layer.running_mean_neg1.mean()
assert(torch.eq(bn_mean, torch.zeros_like(bn_mean)))
# Checks variance is initialized.
bn_var = bn_layer.running_var_neg1.mean()
assert(torch.eq(bn_var, torch.ones_like(bn_var)))
bn_layer.train()
for k in range(5):
xx, _ = self._get_input_pairs(batch_size, time_size, num_joints,
in_dim, neg_dim_in, sym_groupings)
bn_layer(x)
# Checks mean is updated, thus not zero.
bn_mean = bn_layer.running_mean_neg1.mean()
assert(not torch.eq(bn_mean, torch.zeros_like(bn_mean)))
# Checks variance is updated, thus not one.
bn_var = bn_layer.running_var_neg1.mean()
assert(not torch.eq(bn_var, torch.ones_like(bn_var)))
def test_single_layer_equi_at_test(self):
"""Performs unittest for equivariance on toy example"""
print('Tests batchnorm equivariance at test time.')
batch_size = 2
time_size = 5
num_joints = 5
in_dim = 2
out_dim = 2
neg_dim_in = 1
neg_dim_out = 1
sym_groupings = ([2, 2, 1], [2, 2, 1])
# Generate chiral pairs.
x, x_chiral = self._get_input_pairs(batch_size, time_size, num_joints,
in_dim, neg_dim_in, sym_groupings)
# Reshape to joints and dim to a column vector.
x = x.view(x.shape[0], -1, x.shape[-1])
x_chiral = x_chiral.view(x.shape[0], -1, x.shape[-1])
bn_layer = ChiralBatchNorm1d(num_joints*in_dim,
sym_groupings=sym_groupings,
neg_dim_in=neg_dim_in,
neg_dim_out=neg_dim_out)
# Perform training to update running stats.
bn_layer.train()
for k in range(5):
xx, _ = self._get_input_pairs(batch_size, time_size, num_joints,
in_dim, neg_dim_in, sym_groupings)
bn_layer(x)
# Perform forward pass at test time.
bn_layer.eval()
y = bn_layer(x)
y_chiral = bn_layer(x_chiral)
# Reshape back to joints, dim representation.
y = y.view(y.shape[0], num_joints, -1, y.shape[-1])
y_chiral = y_chiral.view(y.shape[0], num_joints, -1, y.shape[-1])
# Compare output.
self._checks_chiral_equivariant(y, y_chiral, num_joints, out_dim,
neg_dim_out, sym_groupings[1])
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
44180
|
'''
Etapas do logger
'''
import logging
# Instancia do objeto getLogger()
logger = logging.getLogger()
# Definindo o level do logger
logger.setLevel(logging.DEBUG)
# formatador do log
formatter = logging.Formatter(
'Data/Hora: %(asctime)s | level: %(levelname)s | file: %(filename)s | mensagem: %(message)s',
# Padrão de data: dia/mes/ano
# Padrão de hora: hora/minuto/segundos
# Sistema (am/pm)
datefmt='%d/%m/%Y %H:%M:%S %p'
)
# definido handler
'''
logging.FileHandler() -> Salva em arquivo
logging.StreamHandler() -> Mostra no console
logging.NullHandler -> Manipulador nulo
'''
fh = logging.StreamHandler()
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
# Definindo handler
logger.addHandler(fh)
logger.debug('Olá.')
|
StarcoderdataPython
|
246872
|
<filename>odac_idp/__init__.py
import os
from flask import Flask
# setup configs
env = os.environ.get('FLASK_ENV', 'development')
app = Flask(__name__)
app.config['DEBUG'] = (env != 'production')
import odac_idp.views
|
StarcoderdataPython
|
11242923
|
import random, lists, logging
# Security levels
levels = {
'1' : {'length': 0, 'complex': False},
'2' : {'length': 8, 'complex': False},
'3' : {'length': 8, 'complex': True}
}
# Gen passwords
class password:
def gen_passwd(wordCount=3, separator='-', words=lists.words):
passwd = ''
for current in range(wordCount):
logging.info('Getting a random word...')
randWord = random.choice(words)
if current == wordCount-1:
logging.info('Finishing...')
passwd = passwd + str(randWord)
else:
logging.info('Adding word...')
passwd = passwd + str(randWord) + str(separator)
return(passwd)
def gen_char_passwd(charCount=20, separator='', chars=lists.complex):
passwd = ''
for current in range(charCount):
randWord = random.choice(chars)
if current == charCount-1:
passwd = passwd + str(randWord)
else:
passwd = passwd + str(randWord) + str(separator)
return(passwd)
def gen_complex_passwd(wordCount=3, charCount=3, separator='-', words=lists.words, chars=lists.complex):
passwd = ''
for current in range(wordCount):
randWord = random.choice(words)
if current == wordCount-1:
passwd = passwd + str(randWord)
else:
passwd = passwd + str(randWord) + str(separator)
for current in range(charCount):
randChar = random.choice(chars)
passwd = passwd + str(randChar)
return(passwd)
|
StarcoderdataPython
|
3569185
|
<gh_stars>0
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
class Mail:
"""
ShareNews class by default need the next instances:
-mail = senders_mail
-mail_to = who will receive the mail
-password = <PASSWORD>
-smtp_server = The server of you mail provider
-port = Default 465
Mail Instances:
self.message = MIMEMultipart("alternative")
self.message["Subject"] = "News"
self.message["From"] = self.mail
self.message["To"] = self.destination
"""
def __init__(self, mail, destination, password,
smtp_server, port='465', subject='News'):
# Connection Instances
self.mail = mail
self.destination = destination
self.password = password
self.smtp_server = smtp_server
self.port = port
# two parts of message-> plain text and html content
# change to message content
self.subject = subject
self.html = ''
self.text = ''
def create_message(self):
# create message from mimemultipart obj
# Mail / Message Instances
self.content = MIMEMultipart("alternative")
self.content["Subject"] = self.subject
self.content["From"] = self.mail
self.content["To"] = self.destination
# create message
# Turn these into plain/html MIMEText objects
part1 = MIMEText(self.text, "plain")
part2 = MIMEText(self.html, "html")
# Add HTML/plain-text parts to MIMEMultipart message
# The email client will try to render the last part first
self.content.attach(part1)
self.content.attach(part2)
def send(self):
# creating message
self.create_message()
# connecting to the mail client to send an email
with smtplib.SMTP_SSL(self.smtp_server, self.port) as server:
# login
server.login(self.mail, self.password)
# send
server.sendmail(self.mail, self.destination, self.content.as_string())
def send_to_many():
pass
|
StarcoderdataPython
|
198375
|
<filename>examples/flask/htdocs/main.py
"""
# Python Flask
# http://flask.pocoo.org/docs/1.0/quickstart/#quickstart
# Code from
# @see Rapid Flask [Video], PacktLib
# ---
# @see Learning Flask Framework
# @see ...
# run app server with "python routes.py"
# open browser at "localhost:5000"
# open browser at "localhost:5000/hello"
# open browser at "localhost:5000/goodbye/<your-name>"
"""
# -----------------------------------------
from flask import Flask
from flask import render_template
from flask import make_response
from flask import request
import datetime
import os
import json
import time
import urllib2
# -----------------------------------------
app = Flask(__name__)
# -----------------------------------------
def get_weather(city):
url = "http://api.openweathermap.org/data/2.5/forecast/daily?q={}&cnt=10&mode=json&units=metric".format(city)
response = urllib2.urlopen(url).read()
return response
# -----------------------------------------
@app.route("/")
def index():
searchcity = request.args.get("searchcity")
if not searchcity:
searchcity = request.cookies.get("last_city")
if not searchcity:
searchcity = "London"
data = json.loads(get_weather(searchcity))
try:
city = data['city']['name']
except KeyError:
return render_template("invalid_city.html")
country = data['city']['country']
forecast_list = []
for d in data.get("list"):
day = time.strftime('%d %B', time.localtime(d.get('dt')))
mini = d.get("temp").get("min")
maxi = d.get("temp").get("max")
description = d.get("weather")[0].get("description")
forecast_list.append((day,mini,maxi,description))
response = make_response(render_template("index.html", forecast_list=forecast_list, city=city, country=country))
if request.args.get("remember"):
response.set_cookie("last_city","{},{}".format(city, country), expires=datetime.datetime.today() + datetime.timedelta(days=365))
return response
# -----------------------------------------
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port, debug=True)
|
StarcoderdataPython
|
1871560
|
import pytest # noqa
from django.test import TestCase
import ensure_footer
import json
import os
import django
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'website.settings.dev')
django.setup()
import common.models # noqa
class VariableValuesTest(TestCase):
def test_migrate_html(self):
""" Tests the footer html """
actual_footer = ensure_footer.migrate(dry=True)
html = '''<div class='col-md-4' ><div class="block-rich_text">
<div class="rich-text"><h4>About</h4><p>The Center for Open Science fosters
openness, integrity, and reproducibility of scientific research</p></div>
</div></div>
<div class='col-md-4' ><div class="block-rich_text"><div class="rich-text">
<h4>Contact Us</h4><p>Center for Open Science</p><p>210 Ridge McIntire Road
</p><p>Suite 500</p><p>Charlottesville, VA 22903-5083</p><p>Email:
<EMAIL></p><p><br/></p></div></div>
<div class="block-photo_stream"><h2>Photo Stream</h2>
<div class="blog-photo-stream margin-bottom-30">
<ul id="cbox" class="list-unstyled thumbs">
</ul>
</div>
</div></div>
<div class='col-md-4' ><div class="block-twitter"><div id="twitterbox">
<h2>Twitter Feed</h2>
<a class="twitter-timeline" href="https://twitter.com/OSFramework"
data-widget-id="456100099907547136" data-theme="dark" data-related=
"twitterapi,twitter" data-aria-polite="assertive" height="400" lang="EN"
data-chrome="nofooter transparent noheader noscrollbar noborders"
data-tweet-limit="3">Tweets by @OSFramework</a>
<script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],
p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){
js=d.createElement(s);js.id=id;
js.src=p+"://platform.twitter.com/widgets.js";
fjs.parentNode.insertBefore(js,fjs);}}(document,"script","twitter-wjs");
</script>
</div>
</div></div>
'''
raw_json = json.dumps([{'type': 'raw_html', 'value': html}])
footer = common.models.Footer(title='Main')
footer.content = raw_json
test_footer = print('Dry run, aborting with footer: {} and content: {}'.format(footer, footer.content))
self.assertEqual(actual_footer, test_footer, 'The html does not match')
|
StarcoderdataPython
|
1918542
|
<filename>pkg/005_custom_application/cstraining.web/cstraining/web/__init__.py
#!/usr/bin/env powerscript
# -*- mode: python; coding: utf-8 -*-
import datetime
from cdb import auth
from cdb import util
from cdb.objects.core import Object
from cdb.platform.gui import PythonColumnProvider
class Ticket(Object):
__classname__ = "cst_ticket"
__maps_to__ = "cst_ticket"
def set_raised_by(self, ctx):
ctx.set('raised_by', auth.persno)
def make_number(self, ctx):
self.id = "T%09d" % (util.nextval("CST_TICKET_NR"))
def set_description_mandatory(self, ctx):
if ctx.dialog.priority == 'urgent':
ctx.set_mandatory('cst_ticket_description_txt')
else:
ctx.set_optional('cst_ticket_description_txt')
event_map = {
(('create', 'copy'), 'pre'): ('make_number'),
(('create', 'copy'), 'pre_mask'): ('set_raised_by'),
(('create', 'copy', 'modify'), ('dialogitem_change', 'pre_mask')): ('set_description_mandatory')
}
class TicketLog(Object):
__classname__ = "cst_ticket_log"
__maps_to__ = "cst_ticket_log"
def set_current_date(self, ctx):
ctx.set('cdate', datetime.datetime.now().isoformat())
event_map = {
(('create', 'copy'), 'pre_mask'): ('set_current_date')
}
class TicketLogDescriptionProvider(PythonColumnProvider):
@staticmethod
def getColumnDefinitions(classname, query_args):
return [
{
'column_id': 'cst_ticket_log_notes_txt',
'label': util.get_label('cst_ticket_log_notes_txt'),
'data_type': 'text',
'width': 40,
}
]
@staticmethod
def getColumnData(classname, table_data):
log_ids = [log.get('cdb_object_id') for log in table_data]
logs = TicketLog.KeywordQuery(cdb_object_id=log_ids)
text_cache = {log.cdb_object_id:
log.GetText('cst_ticket_log_notes_txt') for log in logs}
return [
{'cst_ticket_log_notes_txt': text_cache.get(log_id, '')}
for log_id in log_ids
]
@staticmethod
def getRequiredColumns(classname, available_columns):
return ['cdb_object_id']
|
StarcoderdataPython
|
9602176
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import os, threading, json, time, socket
from http.server import BaseHTTPRequestHandler, HTTPServer
import urllib
from socketserver import ThreadingMixIn
from datetime import datetime
from base64 import b64encode, b64decode
from evaluator import get_results
def log(msg):
return "[%s] %s" % (datetime.now(), str(msg))
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
class Server(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Access-Control-Allow-Origin', "*")
self.send_header("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept")
self.send_header('Content-type', 'application/json')
self.end_headers()
def _set_img_headers(self):
self.send_response(200)
self.send_header('Access-Control-Allow-Origin', "*")
self.send_header("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept")
self.send_header('Content-type', 'image/png')
self.end_headers()
def do_OPTIONS(self):
self.send_response(200, "ok")
self.send_header('Access-Control-Allow-Origin', "*")
self.send_header('Access-Control-Allow-Methods', 'POST, OPTIONS')
self.send_header("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept")
self.end_headers()
def do_HEAD(self):
self._set_headers()
def do_POST(self):
results = {'status':False, 'results':[], 'verbose':None}
received = str(int(time.time()*1000))
self._set_img_headers()
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
content_type = 'png' if ('png' in self.headers['Content-Type']) else 'jpg'
try:
raw_data = self.rfile.read(content_length) # <--- Gets the data itself
data = json.loads(raw_data.decode('utf8'))["input_df"][0]['image base64 string']
except Exception as err:
print("[ERROR]", err)
return
results["results"] = get_results(self.evaluator, data, self.cfg)
results["status"] = True
results["verbose"] = {
'received': received,
'resloved': str(int(time.time()*1000)),
'host': socket.gethostname()
}
self.wfile.write(bytes(json.dumps(results), "utf8"))
@classmethod
def bind_evaluator(self, _evaluator, _cfg):
self.evaluator = _evaluator
self.cfg = _cfg
|
StarcoderdataPython
|
3363355
|
<reponame>chubbymaggie/LibRadar
import libradar
import json
import glob
apks = glob.glob("/Volumes/banana/apks/*")
i = -1
total = 10
while i < total:
try:
i += 1
print "Progress %d" % i
apk_path = apks[i]
lrd = libradar.LibRadar(apk_path)
res = lrd.compare()
print(json.dumps(res, indent=4, sort_keys=True))
#for item in res:
# if item["Library"] != "Unknown" and "Standard Package" in item and "Package" in item and item["Standard Package"] != item["Package"]:
# print "NOT_MATCH_PACKAGE_NAME"
# print item["Standard Package"]
# print item["Package"]
except Exception, e:
total += 1
print Exception,":", e
|
StarcoderdataPython
|
172723
|
<reponame>FaceandControl/genshin-parser
from src import api
from src.resources.main import Main
from src.resources.character import Сharacter
from src.resources.characters import Сharacters
#
# Declarations of app routes in type of rest web-architecture
#
# routes
api.add_resource(Main, '/', strict_slashes=False)
api.add_resource(Сharacter, '/<ln>/character/<name>', strict_slashes=False)
api.add_resource(Сharacters, '/<ln>/characters', strict_slashes=False)
|
StarcoderdataPython
|
11258635
|
<reponame>redsnic/WGA-LP
# --- default imports ---
import os
import multiprocessing
# --- load utils ---
from WGALP.utils.commandLauncher import run_sp
from WGALP.utils.genericUtils import *
from WGALP.step import Step
description = """
Run merqury to asses WGA quality
"""
input_description = """
the original fastq files and an assembly
"""
output_description = """
quality reports and plots that will be available in the output folder
"""
### Wrapper
# a kmer size is fine for genomes of 3Mpb (use $MERQURY/best_k.sh <genome_size>)
def merqury(name, rootpath, reference, fastq1, fastq2, kmer=16, execution_mode = "on_demand"):
step = Step(name, rootpath, execution_mode=execution_mode)
step.set_command(merqury_runner)
step_args = {
"assembly": reference,
"fastq1" : fastq1,
"fastq2" : fastq2,
"kmer" : kmer
}
step.run(step_args)
step.set_description(description, input_description, output_description)
return step
### Runner
def merqury_runner(step, args):
"""
input:
assembly : path (.fasta)
fastq1 : path
fastq2 : path
kmer : number (kmer length used by the assembler)
output:
merqury_output_dir : just a link to the output folder
"""
assembly = os.path.abspath(args["assembly"])
f1 = os.path.abspath(args["fastq1"])
f2 = os.path.abspath(args["fastq2"])
k = str(args["kmer"])
# running merqury requires to run meryl
command = "cd " + step.outpath + " && "
command += "meryl k=" + k + " count output FWD.maryl " + f1 + " && "
command += "meryl k=" + k + " count output REV.maryl " + f2 + " && "
command += "meryl union-sum output UNION.maryl FWD.maryl REV.maryl && "
command += "$MERQURY/merqury.sh UNION.maryl " + assembly + " " + os.path.splitext(os.path.basename(assembly))[0] + " ; "
if step.execution_mode != "read":
run_sp(step, command)
print(command)
# organize output
step.outputs = {
"merqury_output_dir" : ""
}
return step
|
StarcoderdataPython
|
291615
|
<gh_stars>1-10
"""
Definition of urls for djangoapp.
"""
from datetime import datetime
from django.conf.urls import url
import django.contrib.auth.views
from django.urls import path,include
from . import views
# Uncomment the next lines to enable the admin:
# from django.conf.urls import include
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = [
# Examples:
#url('',app.views.index),
path('', include('landing.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
]
|
StarcoderdataPython
|
9639230
|
from rest_framework import viewsets
from .models import Category
from .serializers import CategorySerializer
# Create your views here.
class CategoryViewSet(viewsets.ModelViewSet):
# Operations to be performed
queryset = Category.objects.all().order_by('-created_at')
# Class responsible for serializing the data
serializer_class = CategorySerializer
|
StarcoderdataPython
|
1842934
|
<filename>ori/slow_down_cdf.py
import numpy as np
import pickle
import matplotlib.pyplot as plt
import environment
import parameters
import pg_network
import other_agents
from cycler import cycler
def discount(x, gamma):
"""
Given vector x, computes a vector y such that
y[i] = x[i] + gamma * x[i+1] + gamma^2 x[i+2] + ...
"""
out = np.zeros(len(x))
out[-1] = x[-1]
for i in reversed(range(len(x)-1)):
out[i] = x[i] + gamma*out[i+1]
assert x.ndim >= 1
# More efficient version:
# scipy.signal.lfilter([1],[1,-gamma],x[::-1], axis=0)[::-1]
return out
def categorical_sample(prob_n):
"""
Sample from categorical distribution,
specified by a vector of class probabilities
"""
prob_n = np.asarray(prob_n)
csprob_n = np.cumsum(prob_n)
return (csprob_n > np.random.rand()).argmax()
def get_traj(test_type, pa, env, episode_max_length, pg_resume=None, render=False):
"""
Run agent-environment loop for one whole episode (trajectory)
Return dictionary of results
"""
if test_type == 'PG': # load trained parameters
pg_learner = pg_network.PGLearner(pa)
net_handle = open(pg_resume, 'rb')
net_params = pickle.load(net_handle)
pg_learner.set_net_params(net_params)
env.reset()
rews = []
ob = env.observe()
for _ in range(episode_max_length):
if test_type == 'PG':
a = pg_learner.choose_action(ob)
elif test_type == 'Tetris':
a = other_agents.get_packer_action(env.machine, env.job_slot)
elif test_type == 'SJF':
a = other_agents.get_sjf_action(env.machine, env.job_slot)
elif test_type == 'Random':
a = other_agents.get_random_action(env.job_slot)
ob, rew, done, info = env.step(a, repeat=True)
rews.append(rew)
if done: break
if render: env.render()
# env.render()
return np.array(rews), info
def launch(pa, pg_resume=None, render=False, plot=False, repre='image', end='no_new_job'):
# ---- Parameters ----
test_types = ['Tetris', 'SJF', 'Random']
if pg_resume is not None:
test_types = ['PG'] + test_types
env = environment.Env(pa, render, repre=repre, end=end)
all_discount_rews = {}
jobs_slow_down = {}
work_complete = {}
work_remain = {}
job_len_remain = {}
num_job_remain = {}
job_remain_delay = {}
for test_type in test_types:
all_discount_rews[test_type] = []
jobs_slow_down[test_type] = []
work_complete[test_type] = []
work_remain[test_type] = []
job_len_remain[test_type] = []
num_job_remain[test_type] = []
job_remain_delay[test_type] = []
for seq_idx in range(pa.num_ex):
print('\n\n')
print("=============== " + str(seq_idx) + " ===============")
for test_type in test_types:
rews, info = get_traj(test_type, pa, env, pa.episode_max_length, pg_resume)
print ("---------- " + test_type + " -----------")
print ("total discount reward : \t %s" % (discount(rews, pa.discount)[0]))
all_discount_rews[test_type].append(
discount(rews, pa.discount)[0]
)
# ------------------------
# ---- per job stat ----
# ------------------------
enter_time = np.array([info.record[i].enter_time for i in range(len(info.record))])
finish_time = np.array([info.record[i].finish_time for i in range(len(info.record))])
job_len = np.array([info.record[i].len for i in range(len(info.record))])
job_total_size = np.array([np.sum(info.record[i].res_vec) for i in range(len(info.record))])
finished_idx = (finish_time >= 0)
unfinished_idx = (finish_time < 0)
jobs_slow_down[test_type].append(
(finish_time[finished_idx] - enter_time[finished_idx]) / job_len[finished_idx]
)
work_complete[test_type].append(
np.sum(job_len[finished_idx] * job_total_size[finished_idx])
)
work_remain[test_type].append(
np.sum(job_len[unfinished_idx] * job_total_size[unfinished_idx])
)
job_len_remain[test_type].append(
np.sum(job_len[unfinished_idx])
)
num_job_remain[test_type].append(
len(job_len[unfinished_idx])
)
job_remain_delay[test_type].append(
np.sum(pa.episode_max_length - enter_time[unfinished_idx])
)
env.seq_no = (env.seq_no + 1) % env.pa.num_ex
# -- matplotlib colormap no overlap --
if plot:
num_colors = len(test_types)
cm = plt.get_cmap('gist_rainbow')
fig = plt.figure()
ax = fig.add_subplot(111)
colors = [cm(1. * i / num_colors) for i in range(num_colors)]
ax.set_prop_cycle(cycler('color', colors))
for test_type in test_types:
slow_down_cdf = np.sort(np.concatenate(jobs_slow_down[test_type]))
slow_down_yvals = np.arange(len(slow_down_cdf))/float(len(slow_down_cdf))
ax.plot(slow_down_cdf, slow_down_yvals, linewidth=2, label=test_type)
plt.legend(loc=4)
plt.xlabel("job slowdown", fontsize=20)
plt.ylabel("CDF", fontsize=20)
# plt.show()
if pg_resume is not None:
plt.savefig(pg_resume + "_slowdown_fig" + ".pdf")
else:
plt.savefig("missing" + "_slowdown_fig" + ".pdf")
env.plot_state()
return all_discount_rews, jobs_slow_down
def main():
pa = parameters.Parameters()
pa.simu_len = 200 # 5000 # 1000
pa.num_ex = 10 # 100
pa.num_nw = 10
pa.num_seq_per_batch = 20
# pa.max_nw_size = 5
# pa.job_len = 5
pa.new_job_rate = 0.3
pa.discount = 1
pa.episode_max_length = 20000 # 2000
pa.compute_dependent_parameters()
render = False
plot = True # plot slowdown cdf
pg_resume = None
# pg_resume = 'data/pg_re_discount_1_rate_0.3_simu_len_200_num_seq_per_batch_20_ex_10_nw_10_1450.pkl'
# pg_resume = 'data/pg_re_1000_discount_1_5990.pkl'
pa.unseen = True
launch(pa, pg_resume, render, plot, repre='image', end='all_done')
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1656975
|
import json
with open('data/classes.json') as f:
data = json.load(f)
list_of_classes = []
for book_class in data['classes']:
name = book_class['Name']
list_of_classes.append(name)
def get_key_ability(PC_class):
if PC_class in list_of_classes:
index = list_of_classes.index(PC_class)
return data['classes'][index]['Key Ability']
if __name__ == "__main__":
print(list_of_classes)
get_key_ability('Alchemist')
|
StarcoderdataPython
|
11256140
|
"""Module for handling commands which may be attached to BinarySensor class."""
import logging
from typing import TYPE_CHECKING, Any, Awaitable, Callable, Optional
if TYPE_CHECKING:
from xknx.xknx import XKNX
logger = logging.getLogger("xknx.log")
class ActionBase:
"""Base Class for handling commands."""
def __init__(self, xknx: "XKNX", hook: str = "on", counter: Optional[int] = 1):
"""Initialize Action_Base class."""
self.xknx = xknx
self.hook = hook
self.counter = counter
def test_counter(self, counter: Optional[int]) -> bool:
"""Test if action filters for specific counter."""
if self.counter is None:
# no specific counter_filter -> always true
return True
if counter is None:
return True
return counter == self.counter
def test_if_applicable(self, state: bool, counter: Optional[int] = None) -> bool:
"""Test if should be executed for this state and this counter number."""
if state and (self.hook == "on"):
return self.test_counter(counter)
if not state and (self.hook == "off"):
return self.test_counter(counter)
return False
async def execute(self) -> None:
"""Execute action. To be overwritten in derived classes."""
logger.info("Execute not implemented for %s", self.__class__.__name__)
def __str__(self) -> str:
"""Return object as readable string."""
return f'<ActionBase hook="{self.hook}" counter="{self.counter}"/>'
def __eq__(self, other: object) -> bool:
"""Equal operator."""
return self.__dict__ == other.__dict__
class Action(ActionBase):
"""Class for handling commands."""
def __init__(
self,
xknx: "XKNX",
hook: str = "on",
target: Optional[str] = None,
method: Optional[str] = None,
counter: int = 1,
):
"""Initialize Action class."""
# pylint: disable=too-many-arguments
super().__init__(xknx, hook, counter)
self.target = target
self.method = method
@classmethod
def from_config(cls, xknx: "XKNX", config: Any) -> "Action":
"""Initialize object from configuration structure."""
hook = config.get("hook", "on")
target = config.get("target")
method = config.get("method")
counter = config.get("counter", 1)
return cls(xknx, hook=hook, target=target, method=method, counter=counter)
async def execute(self) -> None:
"""Execute action."""
if self.target is not None and self.method is not None:
if self.target not in self.xknx.devices:
logger.warning("Unknown device %s witin action %s.", self.target, self)
return
await self.xknx.devices[self.target].do(self.method)
def __str__(self) -> str:
"""Return object as readable string."""
return '<Action target="{}" method="{}" {}/>'.format(
self.target, self.method, super().__str__()
)
class ActionCallback(ActionBase):
"""Class for handling commands via callbacks."""
def __init__(
self,
xknx: "XKNX",
callback: Callable[[], Awaitable[None]],
hook: str = "on",
counter: int = 1,
):
"""Initialize Action class."""
# pylint: disable=too-many-arguments
super().__init__(xknx, hook, counter)
self.callback = callback
async def execute(self) -> None:
"""Execute callback."""
await self.callback()
def __str__(self) -> str:
"""Return object as readable string."""
return '<ActionCallback callback="{}" {}/>'.format(
self.callback.__name__, super().__str__()
)
|
StarcoderdataPython
|
6594342
|
"""
Module for categorical kernels
Please refer to the following papers and theses for more details:
- <NAME>, <NAME>. "An investigation into new kernels for
categorical variables." Master's thesis, Universitat Politècnica de Catalunya,
2013.
"""
import numpy as np
from kernelmethods.base import BaseKernelFunction
from kernelmethods.utils import check_input_arrays
from kernelmethods import config as cfg
class MatchCountKernel(BaseKernelFunction):
"""
Categorical kernel measuring similarity via the number of matching categorical
dimensions.
Parameters
----------
return_perc : bool
If True, the return value would be normalized by the number of dimensions.
References
----------
<NAME>, <NAME>., "An investigation into new kernels for categorical
variables." Master's thesis, Universitat Politècnica de Catalunya, 2013.
"""
def __init__(self,
return_perc=True,
skip_input_checks=False):
"""Constructor."""
self.return_perc = return_perc
if self.return_perc:
super().__init__('MatchPerc')
else:
super().__init__('MatchCount')
self.skip_input_checks = skip_input_checks
def __call__(self, vec_c, vec_d):
"""
Actual implementation of the kernel func.
Parameters
----------
vec_c, vec_d : array of equal-sized categorical variables
"""
vec_c, vec_d = _check_categorical_arrays(vec_c, vec_d)
if not np.issubdtype(vec_c.dtype, cfg.dtype_categorical) or \
not np.issubdtype(vec_d.dtype, cfg.dtype_categorical):
raise TypeError('Categorical kernels require str or unicode dtype')
match_count = np.sum(vec_c==vec_d)
if self.return_perc:
return match_count / len(vec_d)
else:
return match_count
def __str__(self):
"""human readable repr"""
return self.name
def _check_categorical_arrays(x, y):
"""
Ensures the inputs are
1) 1D arrays (not matrices)
2) with compatible size
3) of categorical data type
and hence are safe to operate on.
This is a variation of utils.check_input_arrays() to accommodate the special
needs for categorical dtype, where we do not have lists of
originally numbers/bool data to be converted to strings, and assume they are
categorical.
Parameters
----------
x : iterable
y : iterable
Returns
-------
x : ndarray
y : ndarray
"""
x = _ensure_type_size(x, ensure_num_dim=1)
y = _ensure_type_size(y, ensure_num_dim=1)
if x.size != y.size:
raise ValueError('x (n={}) and y (n={}) differ in size! '
'They must be of same length'.format(x.size, y.size))
return x, y
def _ensure_type_size(array, ensure_num_dim=1):
"""Checking type and size of arrays"""
if not isinstance(array, np.ndarray):
array = np.squeeze(np.asarray(array))
if array.ndim != ensure_num_dim:
raise ValueError('array must be {}-dimensional! '
'It has {} dims with shape {} '
''.format(ensure_num_dim, array.ndim, array.shape))
return array
|
StarcoderdataPython
|
5145715
|
from docker import DockerClient
from aavm.utils.progress_bar import ProgressBar
from cpk.types import Machine, DockerImageName
ALL_STATUSES = [
"created", "restarting", "running", "removing", "paused", "exited", "dead"
]
STOPPED_STATUSES = [
"created", "exited", "dead"
]
UNSTABLE_STATUSES = [
"restarting", "removing"
]
RUNNING_STATUSES = [
"running", "paused"
]
# noinspection DuplicatedCode
def pull_image(machine: Machine, image: str, progress: bool = True):
client: DockerClient = machine.get_client()
layers = set()
pulled = set()
pbar = ProgressBar() if progress else None
for line in client.api.pull(image, stream=True, decode=True):
if "id" not in line or "status" not in line:
continue
layer_id = line["id"]
layers.add(layer_id)
if line["status"] in ["Already exists", "Pull complete"]:
pulled.add(layer_id)
# update progress bar
if progress:
percentage = max(0.0, min(1.0, len(pulled) / max(1.0, len(layers)))) * 100.0
pbar.update(percentage)
if progress:
pbar.done()
def remove_image(machine: Machine, image: str):
client: DockerClient = machine.get_client()
client.images.remove(image)
def merge_container_configs(*args) -> dict:
out = {}
for arg in args:
assert isinstance(arg, dict)
for k, v in arg.items():
if k not in out:
out[k] = v
else:
if not isinstance(arg[k], type(out[k])):
raise ValueError(f"Type clash '{type(out[k])}' !== '{type(arg[k])}' "
f"for key '{k}'.")
if isinstance(out[k], list):
out[k].extend(arg[k])
elif isinstance(out[k], dict):
out[k].update(arg[k])
else:
out[k] = arg[k]
return out
def sanitize_image_name(image: str) -> str:
return DockerImageName.from_image_name(image).compile(allow_defaults=True)
|
StarcoderdataPython
|
1947887
|
<reponame>giaccone/the_kirchhoff_bot
from util.decorators import restricted
@restricted
def execute(update, context):
"""
'kick' kick user out from the group
:param update: bot update
:param context: CallbackContext
:return: None
"""
user_id = update.message.reply_to_message.from_user.id
context.bot.kickChatMember(chat_id=update.message.chat_id, user_id=user_id)
context.bot.unbanChatMember(chat_id=update.message.chat_id, user_id=user_id)
# remove command
context.bot.delete_message(chat_id=update.message.chat_id, message_id=update.message.message_id)
|
StarcoderdataPython
|
4960838
|
<reponame>ADACS-Australia/SS2021B-DBrown
from tempfile import TemporaryDirectory
from unittest import mock
from finorch.config.config import _ClientConfigManager, WrapperConfigManager
from finorch.utils.cd import cd
def test_client_get_port():
with TemporaryDirectory() as tmp:
with mock.patch('appdirs.user_config_dir', lambda *args: tmp):
mgr = _ClientConfigManager()
assert mgr.get_port() is None
mgr.set_port(1234)
assert int(mgr.get_port()) == 1234
def test_wrapper_get_port():
with TemporaryDirectory() as tmp:
with cd(tmp):
mgr = WrapperConfigManager()
assert mgr.get_port() is None
mgr.set_port(1234)
assert int(mgr.get_port()) == 1234
|
StarcoderdataPython
|
3513007
|
'''
Created on 29 de out de 2017
@author: gustavosaquetta
'''
from src.model.cadastro.pessoa import Pessoa
from src.controller.lib.ssqt import SSQt
class PessoaController:
def formata_cpf_cnpj(self, view):
if view:
#SSQt().pdb()
if hasattr(view, 'tipo_1'):
if view.tipo_1.isChecked():
view.cpf.setInputMask('999.999.99-99')
if view.tipo_2.isChecked():
view.cpf.setInputMask('99.999.999/9999-99')
if view.cpf.text():
if not Cpf(view.cpf.text()).validate():
view.cpf.setText('')
def get_data_finderdialog(self, filtro=None):
C = Pessoa
campo = None
clausula = None
if filtro and filtro.get('valor'):
if filtro.get('cb_campo') == 0:
campo = 'nome'
elif filtro.get('cb_campo') == 1:
campo = 'codigo'
if filtro.get('cb_clausula') == 0:
clausula = 'ilike'
filtro['valor'] = filtro['valor']+'%%'
elif filtro.get('cb_clausula') == 1:
clausula = '=='
elif filtro.get('cb_clausula')==2:
clausula = 'ilike'
filtro['valor'] = '%%'+filtro['valor']+'%%'
if campo and clausula:
sql = "select * from pessoa where %s::text %s '%s' order by codigo limit %s" % (campo, clausula, filtro['valor'], filtro['limit'])
return C.raw(sql).dicts()
elif filtro.get('valor'):
sql = "select * from pessoa where %s::text %s '%s' limit 1" % (filtro['campo'], filtro['clausula'], filtro['valor'])
return C.raw(sql).dicts()
return C.select().dicts()
class Cnpj(object):
def __init__(self, cnpj):
"""
Class to interact with cnpj brazilian numbers
"""
self.cnpj = cnpj
def calculating_digit(self, result):
result = result % 11
if result < 2:
digit = 0
else:
digit = 11 - result
return str(digit)
def calculating_first_digit(self):
one_validation_list = [5, 4, 3, 2, 9, 8, 7, 6, 5, 4 , 3, 2]
result = 0
pos = 0
for number in self.cnpj:
try:
one_validation_list[pos]
except IndexError:
break
result += int(number) * int(one_validation_list[pos])
pos += 1
return self.calculating_digit(result)
def calculating_second_digit(self):
two_validation_list = [6, 5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2]
result = 0
pos = 0
for number in self.cnpj:
try:
two_validation_list[pos]
except IndexError:
break
result += int(number) * int(two_validation_list[pos])
pos += 1
return self.calculating_digit(result)
def validate(self):
"""
Method to validate brazilian cnpjs
"""
self.cnpj = self.cleaning()
if len(self.cnpj) != 14:
return False
checkers = self.cnpj[-2:]
digit_one = self.calculating_first_digit()
digit_two = self.calculating_second_digit()
return bool(checkers == digit_one + digit_two)
def cleaning(self):
return self.cnpj.replace('-', '').replace('.', '').replace('/', '')
def format(self):
"""
Method to format cnpj numbers.
"""
return '%s.%s.%s/%s-%s' % (self.cnpj[0:2], self.cnpj[2:5],
self.cnpj[5:8], self.cnpj[8:12], self.cnpj[12:14])
class Cpf(object):
def __init__(self, cpf):
self.cpf = cpf
def validate_size(self):
cpf = self.cleaning()
if len(cpf) > 11 or len(cpf) < 11:
return False
return True
def validate(self):
if self.validate_size():
digit_1 = 0
digit_2 = 0
i = 0
cpf = self.cleaning()
while i < 10:
digit_1 = ((digit_1 + (int(cpf[i]) * (11-i-1))) % 11
if i < 9 else digit_1)
digit_2 = (digit_2 + (int(cpf[i]) * (11-i))) % 11
i += 1
return ((int(cpf[9]) == (11 - digit_1 if digit_1 > 1 else 0)) and
(int(cpf[10]) == (11 - digit_2 if digit_2 > 1 else 0)))
return False
def cleaning(self):
return self.cpf.replace('.', '').replace('-', '')
def format(self):
return '%s.%s.%s-%s' % (
self.cpf[0:3], self.cpf[3:6], self.cpf[6:9], self.cpf[9:11])
|
StarcoderdataPython
|
9629636
|
import unittest
from logging import Logger, getLogger
from numpy import ndarray, array, arange, allclose
from freq_used.logging_utils import set_logging_basic_config
from optmlstat.linalg.basic_operators.matrix_multiplication_operator import MatrixMultiplicationOperator
logger: Logger = getLogger()
class TestMatrixMultiplicationOperator(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
set_logging_basic_config(__file__)
def test_matrix_multiplication_operator(self):
array_2d: ndarray = array(arange(4), float).reshape((2, 2))
array_1d: ndarray = array([1, 10], float)
logger.info(array_2d)
matrix_multiplication_operator: MatrixMultiplicationOperator = MatrixMultiplicationOperator(array_2d)
logger.info(allclose(matrix_multiplication_operator.transform(array_1d), [10, 32]))
self.assertTrue(allclose(matrix_multiplication_operator.transform(array_1d), [10, 32]))
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
1653713
|
<reponame>GameDungeon/DocStats<gh_stars>1-10
"""Generate Callgraphs for documentation."""
import os
import subprocess
from typing import List
from docutils.nodes import Node
from docutils.parsers.rst import Directive
from sphinx.ext.graphviz import graphviz
from sphinx.util.typing import OptionSpec
callgraph_count = 0
class CallGraphException(Exception):
"""Exception for callgraph class."""
class call_graph(graphviz):
"""A docutils node to use as a placeholder for the call graph."""
class CallGraph(Directive):
"""Generate a callgraph."""
has_content = False
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec: OptionSpec = {}
def run(self) -> List[Node]:
"""Run the directive."""
global callgraph_count
if not os.path.exists("source/graphs"):
os.makedirs("source/graphs")
subprocess.run(
[
"ls",
"../",
]
)
subprocess.run(
[
"code2flow",
"../docstats",
"-o",
f"source/graphs/callgraph_{callgraph_count}.dot",
"--language",
"py",
# "--quiet",
]
)
with open(
f"source/graphs/callgraph_{callgraph_count}.dot", encoding="utf-8"
) as fp:
dotcode = fp.read()
callgraph_count += 1
node = graphviz()
node["code"] = dotcode
node["options"] = {}
return [node]
|
StarcoderdataPython
|
1631923
|
<reponame>OLC-LOC-Bioinformatics/AzureStorage<filename>tests/test_azure_7_delete.py
from azure_storage.methods import client_prep, delete_container, delete_file, delete_folder, extract_account_name
from azure_storage.azure_delete import AzureDelete, cli, container_delete, file_delete, \
folder_delete
from unittest.mock import patch
import argparse
import pytest
import azure
import os
@pytest.fixture(name='variables', scope='module')
def setup():
class Variables:
def __init__(self):
self.passphrase = '<PASSWORD>'
self.container_name = '000000container'
self.account_name = extract_account_name(passphrase=self.passphrase)
return Variables()
def test_client_prep(variables):
variables.container_name, variables.connect_str, variables.blob_service_client, variables.container_client = \
client_prep(container_name=variables.container_name,
passphrase=variables.passphrase,
account_name=variables.account_name)
assert variables.connect_str.startswith('DefaultEndpointsProtocol')
@pytest.mark.parametrize('file_name',
['file_1.txt',
'container_integration/file_2.txt',
'nested_container/nested_folder/nested_folder_2/nested_folder_test_1.txt',
'ABC/123/nested_folder_test_1.txt'])
def test_delete_file(variables, file_name):
delete_file(container_client=variables.container_client,
object_name=file_name,
blob_service_client=variables.blob_service_client,
container_name=variables.container_name)
blobs = variables.container_client.list_blobs()
assert file_name not in [blob.name for blob in blobs]
@pytest.mark.parametrize('file_name',
['file_3.txt',
'container_integration/file_2.txt',
'nested_container/nested_folder/nested_folder_2/nested_folder_test_1.txt',
'ABC/123/nested_folder_test_1.txt'])
def test_delete_file_missing(variables, file_name):
with pytest.raises(SystemExit):
delete_file(container_client=variables.container_client,
object_name=file_name,
blob_service_client=variables.blob_service_client,
container_name=variables.container_name)
def test_delete_file_invalid_category(variables):
with pytest.raises(SystemExit):
del_file = AzureDelete(object_name='file_1.txt',
container_name=variables.container_name,
account_name=variables.account_name,
passphrase=variables.passphrase,
retention_time=8,
category='container')
del_file.main()
@patch('argparse.ArgumentParser.parse_args')
def test_delete_file_integration(mock_args, variables):
file_name = 'nested/file_2.txt'
mock_args.return_value = argparse.Namespace(passphrase=variables.passphrase,
account_name=variables.account_name,
container_name=variables.container_name,
verbosity='info',
file=file_name,
retention_time=1)
arguments = cli()
file_delete(arguments)
blobs = variables.container_client.list_blobs()
assert os.path.basename(file_name) not in [blob.name for blob in blobs]
@pytest.mark.parametrize('retention_time',
[0,
1000])
@patch('argparse.ArgumentParser.parse_args')
def test_delete_file_integration_invalid_retention_time(mock_args, variables, retention_time):
file_name = 'nested/file_2.txt'
with pytest.raises(SystemExit):
mock_args.return_value = argparse.Namespace(passphrase=variables.passphrase,
account_name=variables.account_name,
container_name=variables.container_name,
verbosity='info',
file=file_name,
retention_time=retention_time)
arguments = cli()
file_delete(arguments)
@patch('argparse.ArgumentParser.parse_args')
def test_delete_file_integration_missing(mock_args, variables):
file_name = 'nested/file_2.txt'
with pytest.raises(SystemExit):
mock_args.return_value = argparse.Namespace(passphrase=variables.passphrase,
account_name=variables.account_name,
container_name=variables.container_name,
verbosity='info',
file=file_name,
retention_time=1)
arguments = cli()
file_delete(arguments)
@pytest.mark.parametrize('folder_name,check_file',
[('container_integration/', 'nested_folder_test_1.txt'),
('nested_container/nested_folder/', 'nested_file_2.txt'),
('ABC/', 'nested_folder_test_1.txt')])
def test_delete_folder(variables, folder_name, check_file):
delete_folder(container_client=variables.container_client,
object_name=folder_name,
blob_service_client=variables.blob_service_client,
container_name=variables.container_name,
account_name=variables.account_name)
blobs = variables.container_client.list_blobs()
assert os.path.join(folder_name, check_file) not in [blob.name for blob in blobs]
@pytest.mark.parametrize('folder_name,check_file',
[('container_integration/', 'nested_folder_test_1.txt'),
('nested_container/nested_folder/', 'nested_file_2.txt'),
('ABC/', 'nested_folder_test_1.txt')])
def test_delete_folder_missing(variables, folder_name, check_file):
with pytest.raises(SystemExit):
delete_folder(container_client=variables.container_client,
object_name=folder_name,
blob_service_client=variables.blob_service_client,
container_name=variables.container_name,
account_name=variables.account_name)
@patch('argparse.ArgumentParser.parse_args')
def test_delete_folder_integration(mock_args, variables):
folder_name = 'nested_folder_3'
mock_args.return_value = argparse.Namespace(passphrase=variables.passphrase,
account_name=variables.account_name,
container_name=variables.container_name,
verbosity='info',
folder=folder_name,
retention_time=1)
arguments = cli()
folder_delete(arguments)
blobs = variables.container_client.list_blobs()
assert os.path.join(folder_name, 'nested_folder_test_1.txt') not in [blob.name for blob in blobs]
@patch('argparse.ArgumentParser.parse_args')
def test_delete_folder_integration_missing(mock_args, variables):
folder_name = 'nested_folder_3'
with pytest.raises(SystemExit):
mock_args.return_value = argparse.Namespace(passphrase=variables.passphrase,
account_name=variables.account_name,
container_name=variables.container_name,
verbosity='info',
folder=folder_name,
retention_time=1)
arguments = cli()
folder_delete(arguments)
def test_delete_container_missing(variables):
with pytest.raises(SystemExit):
delete_container(blob_service_client=variables.blob_service_client,
container_name='000000000container',
account_name=variables.account_name)
@patch('argparse.ArgumentParser.parse_args')
def test_delete_container_integration(mock_args, variables):
mock_args.return_value = argparse.Namespace(passphrase=variables.passphrase,
account_name=variables.account_name,
container_name=variables.container_name,
verbosity='info')
arguments = cli()
container_delete(arguments)
with pytest.raises(azure.core.exceptions.ResourceExistsError):
variables.blob_service_client.create_container(variables.container_name)
|
StarcoderdataPython
|
3216999
|
<reponame>ridi/django-shard-library
from django.db import connections, transaction
from shard.exceptions import QueryExecuteFailureException
from shard.services.execute_query_service import ExecuteQueryService
from shard.utils.database import get_master_databases_by_shard_group
class QueryExecutor:
"""
해당 클래스는 DDL 같은 Master 샤드들에 일괄적으로 실행될 필요가 있는 쿼리들을 실행하는 유틸입니다.
"""
def __init__(self, shard_group: str):
self._shard_group = shard_group
self._shards = get_master_databases_by_shard_group(shard_group=shard_group)
def run_query(self, query: str):
executed = []
try:
for shard in self._shards:
with transaction.atomic(shard):
cursor = connections[shard].cursor()
ExecuteQueryService.execute_query(cursor, query)
executed.append(shard)
except Exception as e:
raise QueryExecuteFailureException(shard_group=self._shard_group, executed=executed, exception=e)
return executed
def get_shards(self):
return self._shards
|
StarcoderdataPython
|
3224347
|
import traceback
import training_code_metric_processing
import os
import subprocess
import argparse
import json
import sys
from timeit import default_timer as timer
import running_utils
def main():
# read the parameter argument parsing
parser = argparse.ArgumentParser(
description='Modify the training code')
parser.add_argument('nas_dir', help="the path of the shared folder")
parser.add_argument('done_filename', help="the done filename")
parser.add_argument('json_filename', help="the json filename")
args = parser.parse_args()
nas_dir = args.nas_dir
done_filename = args.done_filename
json_filename = args.json_filename
with open(json_filename) as config_file:
configs = json.load(config_file)
run_set, done_f = running_utils.setup_done_file(nas_dir, done_filename, ["training_type", "dataset", "random_seed"])
for config in configs:
conf = (config['training_type'], config['dataset'], config['random_seed'], config['network'])
# Check if this config has been done before
if conf in run_set:
continue
source_dir = '%s/%s' % (config['shared_dir'], config['source_dir'])
if "modified_target_dir" in config:
modified_dir = '%s/%s/%s_%s_%s_%s' % (config['modified_target_dir'], "modified_training_files", config['network'], config['training_type'], config['dataset'], config['random_seed'])
else:
modified_dir = '%s/%s/%s_%s_%s_%s' % (config['shared_dir'], "modified_training_files", config['network'], config['training_type'], config['dataset'], config['random_seed'])
filename = '%s/%s' % (source_dir, config['main_file'])
outfilename = '%s/%s' % (modified_dir, config['main_file'])
logfilename = '%s/DLVarLog.csv' % modified_dir
try:
#subprocess.call('rm -rf %s' % modified_dir, shell=True)
#os.makedirs(modified_dir, exist_ok=True)
#subprocess.call('cp -a %s/. %s/' % (source_dir, modified_dir), shell=True)
subprocess.call('rsync -a %s/. %s/' % (source_dir, modified_dir), shell=True)
begin = timer()
'''
# modify the training code
print("Begin loops dectetion")
sys.stdout.flush()
# loops = training_code_metric_processing.extract_metric(filename)
print("End loops dectetion and begin modification")
sys.stdout.flush()
subprocess.call('python 0_1_0_single_file_modify.py ' +
filename + ' ' + outfilename + ' ' + logfilename + ' loops',
stdout=sys.stdout, stderr=sys.stderr, shell=True)
# training_code_metric_processing.modify_file(filename, None, outfilename, logfilename)
print("End modification")
sys.stdout.flush()
print('Done: ' + outfilename)
# modify evaluation code
# TODO: check config['eval_file'] is a list or not
for evalfile in config['eval_file']:
evalfilename = '%s/%s' % (source_dir, evalfile)
evaloutfilename = '%s/%s' % (modified_dir, evalfile)
evallogfilename = '%s/DLVarLogNoLoop.csv' % modified_dir
print("Begin metrics dectetion")
sys.stdout.flush()
# funcs = training_code_metric_processing.extract_metric_without_main_loop(filename)
print("End metrics dectetion and begin modification")
sys.stdout.flush()
subprocess.call('python 0_1_0_single_file_modify.py ' +
evalfilename + ' ' + evaloutfilename + ' ' + evallogfilename + ' metrics',
stdout=sys.stdout, stderr=sys.stderr, shell=True)
# training_code_metric_processing.modify_file_with_metrics(evalfilename, None, evaloutfilename, evallogfilename)
print("End modification")
sys.stdout.flush()
print('Done: ' + evaloutfilename)
'''
subprocess.call('cp ' + 'dl_logging_helper.py ' + os.path.dirname(outfilename), shell=True)
#subprocess.call('cp ' + 'dl_logging_helper_no_loop.py ' + os.path.dirname(evaloutfilename), shell=True)
subprocess.call('cp ' + '1_0_execute_single_run.sh ' + modified_dir, shell=True)
subprocess.call('cp ' + '1_0_1_execute_single_run_no_conda.sh ' + modified_dir, shell=True)
end = timer()
done_f.write('%s,%s,%s,%s,%.5f\n' % (config['network'], config['training_type'], config['dataset'], config['random_seed'], (end - begin)))
done_f.flush()
# del loops
except Exception:
print(filename)
print(traceback.format_exc())
done_f.close()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
11270325
|
<reponame>GeorgeDavis-TM/aws-cloudwatch-logs-retention<filename>handler.py
from distutils.command.config import config
import os
import json
import boto3
def getLogGroupsDict(logsClient):
describeLogGroupsResponse = logsClient.describe_log_groups()
logGroupsDict = {}
for logGroup in describeLogGroupsResponse["logGroups"]:
logGroupsDict.update({logGroup["logGroupName"]:logGroup["arn"]})
return logGroupsDict
def getRetentionPolicyDays(configDict, logGroupName):
for logGroupType in configDict["logGroupRetentionConfig"].keys():
if logGroupType in logGroupName:
return configDict["logGroupRetentionConfig"][logGroupType]
return configDict["default"]
def setLogGroupRetentionPolicy(logsClient, logGroupName, retentionPolicyDays):
putLogGroupRetentionPolicyResponse = logsClient.put_retention_policy(
logGroupName=logGroupName,
retentionInDays=retentionPolicyDays
)
print(str(putLogGroupRetentionPolicyResponse))
def main(event, context):
regionList = str(os.environ.get("regionList"))
exceptionLogGroups = str(os.environ.get("exceptionLogGroups"))
f = open('config.json', 'r')
configDict = json.loads(f.read())
f.close()
if regionList[-1] == ",":
regionList = regionList[:-1].replace(" ", "").split(",")
else:
regionList = regionList.replace(" ", "").split(",")
if exceptionLogGroups[-1] == ",":
exceptionLogGroups = exceptionLogGroups[:-1].replace(" ", "").split(",")
else:
exceptionLogGroups = exceptionLogGroups.replace(" ", "").split(",")
for regionName in regionList:
print("Starting in Region ", str(regionName), "...")
logsClient = boto3.client('logs', region_name=regionName)
logGroupsDict = getLogGroupsDict(logsClient)
for logGroupName in logGroupsDict.keys():
retentionPolicyDays = getRetentionPolicyDays(configDict, logGroupName)
setLogGroupRetentionPolicy(logsClient, logGroupName, retentionPolicyDays)
body = {
"message": "Go Serverless v3.0! Your function executed successfully!",
"input": event,
}
return {"statusCode": 200, "body": json.dumps(body)}
|
StarcoderdataPython
|
11305592
|
"""
Settings for slackbot
"""
import os
TRUE_VALUES = ('true', 'yes', 1)
def is_true(arg):
if str(arg).lower() in TRUE_VALUES:
return True
return False
##### SLACK #####
# SlackのAPIトークン
# https://my.slack.com/services/new/bot で生成
API_TOKEN = os.environ['SLACK_API_TOKEN']
# 読み込むpluginのリスト
PLUGINS = [
'haro.plugins',
]
# コマンドの接頭語
ALIASES = '$'
# コマンド失敗時のエラー通知
if os.getenv('SLACK_ERRORS_TO'):
ERRORS_TO = os.environ['SLACK_ERRORS_TO']
# Slack ファイルアップロードAPI
FILE_UPLOAD_URL = 'https://slack.com/api/files.upload'
# Redmine チケットAPI
REDMINE_URL = os.environ['REDMINE_URL']
REDMINE_API_KEY = os.environ['REDMINE_API_KEY']
##### HARO #####
# デバッグモードにするとログが出るので、開発時には便利
DEBUG = is_true(os.environ['HARO_DEBUG'])
if DEBUG:
import logging
logging.basicConfig(level=logging.DEBUG)
# haroのプロジェクトルート
PROJECT_ROOT = os.environ['PROJECT_ROOT']
##### DB #####
SQLALCHEMY_URL = os.environ['SQLALCHEMY_URL']
SQLALCHEMY_ECHO = os.environ['SQLALCHEMY_ECHO']
SQLALCHEMY_POOL_SIZE = os.environ.get('SQLALCHEMY_POOL_SIZE')
# Waterコマンドメンション先
WATER_EMPTY_TO = os.environ.get('WATER_EMPTY_TO')
WATER_ORDER_NUM = os.environ.get('WATER_ORDER_NUM', 2)
|
StarcoderdataPython
|
11315138
|
<reponame>zyedidia/boolector
#!/usr/bin/env python
import sys, getopt
QUEENS_MODE = 0
QUEENS_MODE_NP1 = 1
QUEENS_MODE_GTN = 2
NO_THREE_IN_LINE_MODE = 3
NO_THREE_IN_LINE_MODE_2NP1 = 4
NO_THREE_IN_LINE_MODE_GT2N = 5
SEQ_ADDER_ENCODING = 0
PAR_ADDER_ENCODING = 1
ITE_ENCODING = 2
LOOKUP_ENCODING = 3
SHIFTER_ENCODING = 4
def usage():
print ("usage: queensbv [-h] [-s <size>] [-m <mode>] [-e <encoding>]")
print ("")
print (" available modes: ")
print (" 0: regular queens mode (default)")
print (" 1: try to place n + 1 queens on an n x n board")
print (" 2: try to place m queens on an n x n board with m > n")
print (" 3: regular no-3-in-line mode")
print (" 4: no-3-in-line mode with 2 * n + 1 queens on an n x n board")
print (" 5: no-3-in-line mode with m queens on an n x n board with m > 2n")
print ("")
print (" available encodings: ")
print (" 0: simple adder encoding (default)")
print (" 1: parallel adder encoding")
print (" 2: if-then-else encoding")
print (" 3: lookup encoding")
print (" 4: shifter encoding")
print ("")
sys.exit(0)
def die(msg):
assert msg != None
print (msg)
sys.exit(1)
def is_power_of_2 (x):
assert x > 0
return (x & (x - 1)) == 0
def next_power_of_2 (x):
assert x > 0
x -= 1
i = 1
while i < 32:
x = x | (x >> i)
i *= 2
return x + 1
def log2 (x):
result = 0
assert x > 0
assert is_power_of_2 (x)
while x > 1:
x >>= 1
result += 1
assert result >= 0
return result
def board_field (x, y):
assert x >= 0
assert y >= 0
return "board" + str(x) + "_" + str(y)
mode = QUEENS_MODE
encoding = SEQ_ADDER_ENCODING
size = 8
id = 1
constraints = []
num_bits_size = 0
num_bits_fields = 0
shiftvarslistmap = {}
shiftvarscounter = 0
logsize = -1
def add_seq (list, ext):
global id
assert list != None
assert len (list) >= 2
assert ext >= 0
print ("(let (?e" + str(id) + " (zero_extend[" + str(ext) + \
"] " + list[0] + "))")
last = "?e" + str(id)
id += 1
for i in range(1, len(list)):
print ("(let (?e" + str(id) + " (bvadd " + last + " (zero_extend[" + \
str(ext) + "] " + list[i] + ")))")
last = "?e" + str(id)
id += 1
return last, ext + 1
def add_par (list, bw):
global id
assert list != None
assert len (list) >= 2
assert bw > 0
while len(list) != 1:
i = 0
next = []
while i < len(list):
if i != len(list) - 1:
print ("(let (?e" + str(id) + " (bvadd (zero_extend[1] " + \
list[i] + ") (zero_extend[1] " + list[i + 1] + ")))")
else:
print ("(let (?e" + str(id) + " (zero_extend[1] " + list[i] + "))")
last = "?e" + str(id)
next.append(last)
id += 1
i += 2
list = next
bw += 1
return last, bw
def or_par (list, bw):
global id
assert list != None
assert len (list) >= 2
assert bw > 0
while len(list) != 1:
i = 0
next = []
while i < len(list):
if i != len(list) - 1:
print ("(let (?e" + str(id) + " (bvor " + list[i] + " " + list[i + 1] + \
"))")
else:
print ("(let (?e" + str(id) + " (bvor " + list[i] + " " + \
"bv0[" + str(bw) + "]))")
last = "?e" + str(id)
next.append(last)
id += 1
i += 2
list = next
return last
def and_par (list, bw):
global id
assert list != None
assert len (list) >= 2
assert bw > 0
bits = ""
for i in range(bw):
bits += "1"
while len(list) != 1:
i = 0
next = []
while i < len(list):
if i != len(list) - 1:
print ("(let (?e" + str(id) + " (bvand " + list[i] + " " + \
list[i + 1] + "))")
else:
print ("(let (?e" + str(id) + " (bvand " + list[i] + " " + \
"bv" + str(int(bits, 2)) + "[" + str(bw) + "]))")
last = "?e" + str(id)
next.append(last)
id += 1
i += 2
list = next
return last
def add_lookup_8_4 (list):
global id
global lookup
assert list != None
assert len(list) != 1
addlist = []
numloops = len(list) / 8
if (len(list) % 8) > 0:
numloops += 1
for i in range(numloops):
concatlist = []
for j in range(8):
if i * 8 + j < len(list):
concatlist.append (list[i * 8 + j])
else:
concatlist.append ("bv0[1]")
last = concat_list (concatlist)
print ("(let (?e" + str(id) + " (select " + lookup + " " + last + "))")
last = "?e" + str(id)
id += 1
addlist.append (last)
assert len(addlist) > 0
if len(addlist) == 1:
return addlist[0], 4
else:
return add_par (addlist, 4)
def ite_encode_eq_rec (list, pos, k):
assert list != None
assert pos >= 0
if pos == len(list):
if k == 0:
return "true"
return "false"
if len(list) - pos < k or k < 0:
return "false"
result = "(if_then_else (= " + list[pos] + " bv1[1]) "
result += ite_encode_eq_rec (list, pos + 1, k - 1) + " "
result += ite_encode_eq_rec (list, pos + 1, k) + ")"
return result
def ite_encode_eq (list, k):
global id
assert list != None
assert len(list) >= 2
assert k > 0
result = ite_encode_eq_rec (list, 0, k)
sys.stdout.write("(flet ($e" + str(id) + " " + result +")\n")
def ite_encode_lt_rec (list, pos, counter, k):
assert list != None
assert pos >= 0
assert counter >= 0
if len(list) - pos + counter < k:
return "true"
if counter >= k:
return "false"
result = "(if_then_else (= " + list[pos] + " bv1[1]) "
result += ite_encode_lt_rec (list, pos + 1, counter + 1, k) + " "
result += ite_encode_lt_rec (list, pos + 1, counter, k) + ")"
return result
def ite_encode_lt (list, k):
global id
assert list != None
assert len(list) >= 2
assert k > 0
result = ite_encode_lt_rec (list, 0, 0, k)
sys.stdout.write("(flet ($e" + str(id) + " " + result +")\n")
def ite_encode_ge (list, k):
global id
assert list != None
assert len(list) >= 2
assert k > 0
result = ite_encode_lt_rec (list, 0, 0, k)
sys.stdout.write("(flet ($e" + str(id) + " (not " + result +"))\n")
def concat_list (list):
global id
assert list != None
assert len(list) >= 2
while len(list) != 1:
i = 0
next = []
while i < len(list):
if i != len(list) - 1:
print ("(let (?e" + str(id) + " (concat " + list[i] + " " + \
list[i + 1] + "))")
else:
next.pop()
print ("(let (?e" + str(id) + " (concat " + last + " " + list[i] + "))")
last = "?e" + str(id)
next.append(last)
id += 1
i += 2
list = next
return last
def shift_encode_eq_1 (list, shiftvarlist):
global id
global logsize
assert list != None
assert len(list) >= 2
assert shiftvarlist != None
assert len(shiftvarlist) >= 1
listlen = len(list)
print ("(let (?e" + str(id) + " (bvshl bv1[" + str(listlen) + "] " + \
"(zero_extend[" + str(listlen - logsize) + "] " + \
shiftvarlist.pop() + ")))")
last = "?e" + str(id)
id += 1
vec = concat_list (list)
print ("(flet ($e" + str(id) + " (= " + last + " " + vec + "))")
def shift_encode_eq_2 (list, shiftvarlist):
global id
global logsize
assert list != None
assert len(list) >= 2
assert shiftvarlist != None
assert len(shiftvarlist) >= 1
listlen = len(list)
print ("(let (?e" + str(id) + " (bvshl bv1[" + str(listlen) + "] " + \
"(zero_extend[" + str(listlen - logsize) + "] " + \
shiftvarlist.pop() + ")))")
shift1 = "?e" + str(id)
id += 1
print ("(let (?e" + str(id) + " (bvshl bv1[" + str(listlen) + "] " + \
"(zero_extend[" + str(listlen - logsize) + "] " + \
shiftvarlist.pop() + ")))")
shift2 = "?e" + str(id)
id += 1
print ("(let (?e" + str(id) + " (bvor " + shift1 + " " + shift2 + "))")
orshift = "?e" + str(id)
id += 1
vec = concat_list (list)
print ("(flet ($e" + str(id) + " (= " + orshift + " " + vec + "))")
and1 = "$e" + str(id)
id += 1
print ("(flet ($e" + str(id) + " (not (= " + shift1 + " " + shift2 + ")))")
and2 = "$e" + str(id)
id += 1
print ("(flet ($e" + str(id) + " (and " + and1 + " " + and2 + "))")
def shift_encode_eq_k (list, shiftvarlist, k):
global id
assert list != None
assert len(list) >= 2
assert shiftvarlist != None
assert k > 2
assert len(shiftvarlist) >= k
listlen = len(list)
log2listlen = log2(listlen)
orlist = []
for i in range (k):
print ("(let (?e" + str(id) + " (bvshl bv1[" + str(listlen) + "] " + \
"(zero_extend[" + str(listlen - log2listlen) + "] " + \
shiftvarlist.pop() + ")))")
last = "?e" + str(id)
id += 1
orlist.append (last)
orshift = or_par (orlist, listlen)
vec = concat_list (list)
print ("(flet ($e" + str(id) + " (= " + orshift + " " + vec + "))")
and1 = "$e" + str(id)
id += 1
print ("(flet ($e" + str(id) + " (distinct")
for i in range(len(orlist)):
print (orlist[i])
print ("))")
and2 = "$e" + str(id)
id += 1
print ("(flet ($e" + str(id) + " (and " + and1 + " " + and2 + "))")
def shift_encode_gt_k (list, shiftvarlist, shiftvarlistone, k):
global id
assert list != None
assert len(list) >= 2
assert shiftvarlist != None
assert shiftvarlistone != None
assert len(shiftvarlistone) > 0
assert k > 2
assert len(shiftvarlist) >= len(list) - k - 1 - 1
listlen = len(list)
log2listlen = log2(listlen)
andlist = []
bits = "10"
for i in range(2, listlen):
bits += "1"
print ("(let (?e" + str(id) + " (concat " + shiftvarlistone.pop() + " bv" + \
str(2 ** (listlen - 2)) + "[" + str(listlen - 1) + "]))")
last = "?e" + str(id)
id += 1
andlist.append (last)
for i in range (1, len(list) - k - 1):
print ("(let (?e" + str(id) + " (bvashr bv" + str(int(bits, 2)) + "[" + \
str(listlen) + "] " + shiftvarlist.pop() + "))")
last = "?e" + str(id)
id += 1
andlist.append (last)
andshift = and_par (andlist, listlen)
vec = concat_list (list)
print ("(flet ($e" + str(id) + " (= " + andshift + " " + vec + "))")
def shift_encode_le_1 (list, shiftvarlist):
global id
assert list != None
assert len(list) >= 2
assert shiftvarlist != None
assert len(shiftvarlist) >= 1
listlen = len(list)
print ("(let (?e" + str(id) + " (bvshl bv1[" + str(listlen) + "] " + \
shiftvarlist.pop() + "))")
last = "?e" + str(id)
id += 1
vec = concat_list (list)
print ("(flet ($e" + str(id) + " (= " + last + " " + vec + "))")
def shift_encode_le_2 (list, shiftvarlist):
global id
assert list != None
assert len(list) >= 2
assert shiftvarlist != None
assert len(shiftvarlist) >= 1
listlen = len(list)
print ("(let (?e" + str(id) + " (bvshl bv1[" + str(listlen) + "] " + \
shiftvarlist.pop() + "))")
shift1 = "?e" + str(id)
id += 1
print ("(let (?e" + str(id) + " (bvshl bv1[" + str(listlen) + "] " + \
shiftvarlist.pop() + "))")
shift2 = "?e" + str(id)
id += 1
print ("(let (?e" + str(id) + " (bvor " + shift1 + " " + shift2 + "))" )
orshift = "?e" + str(id)
id += 1
vec = concat_list (list)
print ("(flet ($e" + str(id) + " (= " + orshift + " " + vec + "))")
and1 = "$e" + str(id)
id += 1
print ("(flet ($e" + str(id) + " (not (= " + shift1 + " " + shift2 + ")))")
and2 = "$e" + str(id)
id += 1
print ("(flet ($e" + str(id) + " (and " + and1 + " " + and2 + "))")
try:
opts, args = getopt.getopt(sys.argv[1:], "hm:s:e:")
except getopt.GetoptError as err:
print (str(err))
usage()
for o, a in opts:
if o in ("-h"):
usage()
elif o in ("-m"):
if a == "0":
mode = QUEENS_MODE
elif a == "1":
mode = QUEENS_MODE_NP1
elif a == "2":
mode = QUEENS_MODE_GTN
elif a == "3":
mode = NO_THREE_IN_LINE_MODE
elif a == "4":
mode = NO_THREE_IN_LINE_MODE_2NP1
elif a == "5":
mode = NO_THREE_IN_LINE_MODE_GT2N
else:
die ("mode must be >= 0 and <= 5")
elif o in ("-e"):
if a == "0":
encoding = SEQ_ADDER_ENCODING
elif a == "1":
encoding = PAR_ADDER_ENCODING
elif a == "2":
encoding = ITE_ENCODING
elif a == "3":
encoding = LOOKUP_ENCODING
elif a == "4":
encoding = SHIFTER_ENCODING
else:
die ("encoding must be >= 0 and <= 4")
elif o in ("-s"):
size = int (a)
if size < 4:
die ("size must be >= 4")
if encoding == SHIFTER_ENCODING:
if not is_power_of_2 (size):
die ("shifter encoding needs that the board size is a power of two")
logsize = log2(size)
sizesqr = size * size
num_bits_size = log2 (next_power_of_2 (size + 1))
num_bits_fields = log2 (next_power_of_2 (sizesqr + 1))
if mode == NO_THREE_IN_LINE_MODE or mode == NO_THREE_IN_LINE_MODE_2NP1 or \
mode == NO_THREE_IN_LINE_MODE_GT2N:
print ("(benchmark queensNoThreeInLine" + str(size) + "x" + str(size) )
else:
print ("(benchmark queens" + str(size) + "x" + str(size))
print (":source {")
if mode == NO_THREE_IN_LINE_MODE or mode == NO_THREE_IN_LINE_MODE_2NP1 or \
mode == NO_THREE_IN_LINE_MODE_GT2N:
print ("BV encoding of no three-in-line problem")
else:
print ("BV encoding of n-queens problem")
if mode == QUEENS_MODE:
print ("We try to place " + str(size) + \
" queens on a " + str(size) + " x " + str(size) + " board")
elif mode == QUEENS_MODE_NP1:
print ("We try to place n + 1 queens on an n x n board")
elif mode == QUEENS_MODE_GTN:
print ("We try to place m queens on an n x n board with m > n")
elif mode == NO_THREE_IN_LINE_MODE:
print ("We try to place " + str(2 * size) + \
" queens on a " + str(size) + " x " + str(size) + " board")
elif mode == NO_THREE_IN_LINE_MODE_2NP1:
print ("We try to place " + str(2 * size + 1) + \
" queens on a " + str(size) + " x " + str(size) + " board")
elif mode == NO_THREE_IN_LINE_MODE_GT2N:
print ("We try to place m queens on an n x n board with m > 2n")
if encoding == SEQ_ADDER_ENCODING:
print ("Cardinality constraints are encoded by simple adder circuits")
elif encoding == PAR_ADDER_ENCODING:
print ("Cardinality constraints are encoded by parallel adder circuits")
elif encoding == ITE_ENCODING:
print ("Cardinality constraints are encoded by ITEs")
elif encoding == SHIFTER_ENCODING:
print ("Cardinality constraints are encoded by shifters")
else:
assert encoding == LOOKUP_ENCODING
print ("Cardinality constraints are encoded by lookups and parallel adders")
print ("Contributed by <NAME> (<EMAIL>)")
print ("}")
if mode == QUEENS_MODE:
print (":status sat")
elif mode == NO_THREE_IN_LINE_MODE:
print (":status unknown")
else:
print (":status unsat")
if encoding == LOOKUP_ENCODING:
print (":logic QF_AUFBV")
print (":extrafuns ((lookup Array[8:4]))")
else:
print (":logic QF_BV")
for i in range(size):
for j in range(size):
print (":extrafuns ((" + board_field(i, j) + " BitVec[1]))")
#generate additional variables for shifters
if encoding == SHIFTER_ENCODING:
varlist = []
assert is_power_of_2 (size)
if mode == QUEENS_MODE or mode == QUEENS_MODE_NP1 or \
mode == QUEENS_MODE_GTN:
#generate variables for rows and cols
for i in range(2 * size):
var = "v" + str(shiftvarscounter)
print (":extrafuns ((" + var + " BitVec[" + str(logsize) + "]))")
shiftvarscounter += 1
varlist.append(var)
shiftvarslistmap[str(logsize)] = varlist
for i in range (2, size + 1):
istring = str(i)
if shiftvarslistmap.has_key (istring):
varlist = shiftvarslistmap[istring]
else:
varlist = []
if i == size:
limit = 2
else:
limit = 4
for j in range(limit):
var = "v" + str(shiftvarscounter)
print (":extrafuns ((" + var + " BitVec[" + istring + "]))")
shiftvarscounter += 1
varlist.append(var)
shiftvarslistmap[istring] = varlist
if mode == QUEENS_MODE_NP1:
log2sizesqr = log2 (sizesqr)
if shiftvarslistmap.has_key (str(log2sizesqr)):
varlist = shiftvarslistmap[str(log2sizesqr)]
else:
varlist = []
for i in range(size + 1):
var = "v" + str(shiftvarscounter)
print (":extrafuns ((" + var + " BitVec[" + str(log2sizesqr) + "]))")
shiftvarscounter += 1
varlist.append(var)
shiftvarslistmap[str(log2sizesqr)] = varlist
elif mode == QUEENS_MODE_GTN:
if shiftvarslistmap.has_key ("1"):
varlist = shiftvarslistmap["1"]
else:
varlist = []
var = "v" + str(shiftvarscounter)
print (":extrafuns ((" + var + " BitVec[1]))")
shiftvarscounter += 1
varlist.append(var)
shiftvarslistmap["1"] = varlist
if shiftvarslistmap.has_key (str(sizesqr)):
varlist = shiftvarslistmap[str(sizesqr)]
else:
varlist = []
for i in range(1, sizesqr - size - 1):
var = "v" + str(shiftvarscounter)
print (":extrafuns ((" + var + " BitVec[" + str(sizesqr) + "]))")
shiftvarscounter += 1
varlist.append(var)
shiftvarslistmap[str(sizesqr)] = varlist
else:
assert mode == NO_THREE_IN_LINE_MODE or \
mode == NO_THREE_IN_LINE_MODE_2NP1 or \
mode == NO_THREE_IN_LINE_MODE_GT2N
#generate variables for rows and cols
for i in range(4 * size):
var = "v" + str(shiftvarscounter)
print (":extrafuns ((" + var + " BitVec[" + str(logsize) + "]))")
shiftvarscounter += 1
varlist.append(var)
shiftvarslistmap[str(logsize)] = varlist
for i in range (3, size + 1):
istring = str(i)
if shiftvarslistmap.has_key (istring):
varlist = shiftvarslistmap[istring]
else:
varlist = []
if i == size:
limit = 4
else:
limit = 8
for j in range(limit):
var = "v" + str(shiftvarscounter)
print (":extrafuns ((" + var + " BitVec[" + istring + "]))")
shiftvarscounter += 1
varlist.append(var)
shiftvarslistmap[istring] = varlist
if mode == NO_THREE_IN_LINE_MODE_2NP1:
log2sizesqr = log2 (sizesqr)
if shiftvarslistmap.has_key (str(log2sizesqr)):
varlist = shiftvarslistmap[str(log2sizesqr)]
else:
varlist = []
for i in range(2 * size + 1):
var = "v" + str(shiftvarscounter)
print (":extrafuns ((" + var + " BitVec[" + str(log2sizesqr) + "]))")
shiftvarscounter += 1
varlist.append(var)
shiftvarslistmap[str(log2sizesqr)] = varlist
elif mode == NO_THREE_IN_LINE_MODE_GT2N:
if shiftvarslistmap.has_key ("1"):
varlist = shiftvarslistmap["1"]
else:
varlist = []
var = "v" + str(shiftvarscounter)
print (":extrafuns ((" + var + " BitVec[1]))")
shiftvarscounter += 1
varlist.append(var)
shiftvarslistmap["1"] = varlist
if shiftvarslistmap.has_key (str(sizesqr)):
varlist = shiftvarslistmap[str(sizesqr)]
else:
varlist = []
for i in range(1, sizesqr - 2 * size - 1):
var = "v" + str(shiftvarscounter)
print (":extrafuns ((" + var + " BitVec[" + str(sizesqr) + "]))")
shiftvarscounter += 1
varlist.append(var)
shiftvarslistmap[str(sizesqr)] = varlist
print (":formula")
#generate lookup table
if encoding == LOOKUP_ENCODING:
last = "lookup"
for i in range(2):
for j in range(2):
for k in range(2):
for l in range(2):
for m in range(2):
for n in range(2):
for o in range(2):
for p in range(2):
index = str(i) + str(j) + str(k) + str(l) + str(m) + \
str(n) + str(o) + str(p)
sum = 0
for bit in index:
if bit == '1':
sum += 1
print ("(let (?e" + str(id) + " (store " + last + " bv" + \
str(int(index, 2)) + "[8]" + " bv" + str(sum) + \
"[4]))")
last = "?e" + str(id)
id += 1
lookup = last
# generate row constraints
for i in range(size):
list = []
for j in range(size):
list.append(board_field(i, j))
if encoding == ITE_ENCODING:
if mode == NO_THREE_IN_LINE_MODE or mode == NO_THREE_IN_LINE_MODE_2NP1 or \
mode == NO_THREE_IN_LINE_MODE_GT2N:
ite_encode_eq (list, 2);
else:
ite_encode_eq (list, 1);
elif encoding == SHIFTER_ENCODING:
if mode == NO_THREE_IN_LINE_MODE or mode == NO_THREE_IN_LINE_MODE_2NP1 or \
mode == NO_THREE_IN_LINE_MODE_GT2N:
shift_encode_eq_2 (list, shiftvarslistmap[str(logsize)]);
else:
shift_encode_eq_1 (list, shiftvarslistmap[str(logsize)]);
else:
if encoding == SEQ_ADDER_ENCODING:
last, bw_adder = add_seq (list, num_bits_size - 1)
elif encoding == LOOKUP_ENCODING:
last, bw_adder = add_lookup_8_4 (list)
else:
assert encoding == PAR_ADDER_ENCODING
last, bw_adder = add_par (list, 1)
if mode == NO_THREE_IN_LINE_MODE or mode == NO_THREE_IN_LINE_MODE_2NP1 or \
mode == NO_THREE_IN_LINE_MODE_GT2N:
print ("(flet ($e" + str(id) + " (= " + last + " bv2[" + \
str(bw_adder) + "]))")
else:
print ("(flet ($e" + str(id) + " (= " + last + " bv1[" + \
str(bw_adder) + "]))")
constraints.append ("$e" + str(id))
id += 1
# generate col constraints
for i in range(size):
list = []
for j in range(size):
list.append(board_field(j, i))
if encoding == ITE_ENCODING:
if mode == NO_THREE_IN_LINE_MODE or mode == NO_THREE_IN_LINE_MODE_2NP1 or \
mode == NO_THREE_IN_LINE_MODE_GT2N:
ite_encode_eq (list, 2)
else:
ite_encode_eq (list, 1)
elif encoding == SHIFTER_ENCODING:
if mode == NO_THREE_IN_LINE_MODE or mode == NO_THREE_IN_LINE_MODE_2NP1 or \
mode == NO_THREE_IN_LINE_MODE_GT2N:
shift_encode_eq_2 (list, shiftvarslistmap[str(logsize)]);
else:
shift_encode_eq_1 (list, shiftvarslistmap[str(logsize)]);
else:
if encoding == SEQ_ADDER_ENCODING:
last, bw_adder = add_seq (list, num_bits_size - 1)
elif encoding == LOOKUP_ENCODING:
last, bw_adder = add_lookup_8_4 (list)
else:
assert encoding == PAR_ADDER_ENCODING
last, bw_adder = add_par (list, 1)
if mode == NO_THREE_IN_LINE_MODE or mode == NO_THREE_IN_LINE_MODE_2NP1 or \
mode == NO_THREE_IN_LINE_MODE_GT2N:
print ("(flet ($e" + str(id) + " (= " + last + " bv2[" + \
str(bw_adder) + "]))")
else:
print ("(flet ($e" + str(id) + " (= " + last + " bv1[" + \
str(bw_adder) + "]))")
constraints.append ("$e" + str(id))
id += 1
#generate diagonal constraints
for i in range(1, size):
list = []
list.append (board_field(i, 0))
row = i - 1
col = 1
assert row >= 0 and col < size
while row >= 0 and col < size:
list.append(board_field(row, col))
row -= 1
col += 1
if (mode == NO_THREE_IN_LINE_MODE or mode == NO_THREE_IN_LINE_MODE_2NP1 or \
mode == NO_THREE_IN_LINE_MODE_GT2N) and len(list) < 3:
continue
if encoding == ITE_ENCODING:
if mode == NO_THREE_IN_LINE_MODE or mode == NO_THREE_IN_LINE_MODE_2NP1 or \
mode == NO_THREE_IN_LINE_MODE_GT2N:
ite_encode_lt (list, 3)
else:
ite_encode_lt (list, 2)
elif encoding == SHIFTER_ENCODING:
if mode == NO_THREE_IN_LINE_MODE or mode == NO_THREE_IN_LINE_MODE_2NP1 or \
mode == NO_THREE_IN_LINE_MODE_GT2N:
shift_encode_le_2 (list, shiftvarslistmap[str(len(list))])
else:
shift_encode_le_1 (list, shiftvarslistmap[str(len(list))])
else:
if encoding == SEQ_ADDER_ENCODING:
last, bw_adder = add_seq (list, num_bits_size - 1)
elif encoding == LOOKUP_ENCODING:
last, bw_adder = add_lookup_8_4 (list)
else:
assert encoding == PAR_ADDER_ENCODING
last, bw_adder = add_par (list, 1)
if mode == NO_THREE_IN_LINE_MODE or mode == NO_THREE_IN_LINE_MODE_2NP1 or \
mode == NO_THREE_IN_LINE_MODE_GT2N:
print ("(flet ($e" + str(id) + " (bvult " + last + " bv3[" + \
str(bw_adder) + "]))")
else:
print ("(flet ($e" + str(id) + " (bvule " + last + " bv1[" + \
str(bw_adder) + "]))")
constraints.append ("$e" + str(id))
id += 1
for i in range(1, size - 1):
list = []
list.append(board_field(size - 1, i))
row = size - 1 - 1
col = i + 1
assert row >= 0 and col < size
while row >= 0 and col < size:
list.append(board_field(row, col))
row -= 1
col += 1
if (mode == NO_THREE_IN_LINE_MODE or mode == NO_THREE_IN_LINE_MODE_2NP1 or \
mode == NO_THREE_IN_LINE_MODE_GT2N) and len(list) < 3:
continue
if encoding == ITE_ENCODING:
if mode == NO_THREE_IN_LINE_MODE or mode == NO_THREE_IN_LINE_MODE_2NP1 or \
mode == NO_THREE_IN_LINE_MODE_GT2N:
ite_encode_lt (list, 3)
else:
ite_encode_lt (list, 2)
elif encoding == SHIFTER_ENCODING:
if mode == NO_THREE_IN_LINE_MODE or mode == NO_THREE_IN_LINE_MODE_2NP1 or \
mode == NO_THREE_IN_LINE_MODE_GT2N:
shift_encode_le_2 (list, shiftvarslistmap[str(len(list))])
else:
shift_encode_le_1 (list, shiftvarslistmap[str(len(list))])
else:
if encoding == SEQ_ADDER_ENCODING:
last, bw_adder = add_seq (list, num_bits_size - 1)
elif encoding == LOOKUP_ENCODING:
last, bw_adder = add_lookup_8_4 (list)
else:
assert encoding == PAR_ADDER_ENCODING
last, bw_adder = add_par (list, 1)
if mode == NO_THREE_IN_LINE_MODE or mode == NO_THREE_IN_LINE_MODE_2NP1 or \
mode == NO_THREE_IN_LINE_MODE_GT2N:
print ("(flet ($e" + str(id) + " (bvult " + last + " bv3[" + \
str(bw_adder) + "]))")
else:
print ("(flet ($e" + str(id) + " (bvule " + last + " bv1[" + \
str(bw_adder) + "]))")
constraints.append ("$e" + str(id))
id += 1
for i in range(1, size):
list = []
list.append (board_field(i, size - 1))
row = i - 1
col = size - 1 - 1
assert row >= 0 and col >= 0
while row >= 0 and col >= 0:
list.append (board_field(row, col))
row -= 1
col -= 1
if (mode == NO_THREE_IN_LINE_MODE or mode == NO_THREE_IN_LINE_MODE_2NP1 or \
mode == NO_THREE_IN_LINE_MODE_GT2N) and len(list) < 3:
continue
if encoding == ITE_ENCODING:
if mode == NO_THREE_IN_LINE_MODE or mode == NO_THREE_IN_LINE_MODE_2NP1 or \
mode == NO_THREE_IN_LINE_MODE_GT2N:
ite_encode_lt (list, 3)
else:
ite_encode_lt (list, 2)
elif encoding == SHIFTER_ENCODING:
if mode == NO_THREE_IN_LINE_MODE or mode == NO_THREE_IN_LINE_MODE_2NP1 or \
mode == NO_THREE_IN_LINE_MODE_GT2N:
shift_encode_le_2 (list, shiftvarslistmap[str(len(list))])
else:
shift_encode_le_1 (list, shiftvarslistmap[str(len(list))])
else:
if encoding == SEQ_ADDER_ENCODING:
last, bw_adder = add_seq (list, num_bits_size - 1)
elif encoding == LOOKUP_ENCODING:
last, bw_adder = add_lookup_8_4 (list)
else:
assert encoding == PAR_ADDER_ENCODING
last, bw_adder = add_par (list, 1)
if mode == NO_THREE_IN_LINE_MODE or mode == NO_THREE_IN_LINE_MODE_2NP1 or \
mode == NO_THREE_IN_LINE_MODE_GT2N:
print ("(flet ($e" + str(id) + " (bvult " + last + " bv3[" + \
str(bw_adder) + "]))")
else:
print ("(flet ($e" + str(id) + " (bvule " + last + " bv1[" + \
str(bw_adder) + "]))")
constraints.append ("$e" + str(id))
id += 1
for i in range(1, size - 1):
list = []
list.append (board_field(size - 1, size - 1 - i))
row = size - 1 - 1
col = size - 1 - i - 1
assert row >= 0 and col >= 0
while row >= 0 and col >= 0:
list.append (board_field(row, col))
row -= 1
col -= 1
if (mode == NO_THREE_IN_LINE_MODE or mode == NO_THREE_IN_LINE_MODE_2NP1 or \
mode == NO_THREE_IN_LINE_MODE_GT2N) and len(list) < 3:
continue
if encoding == ITE_ENCODING:
if mode == NO_THREE_IN_LINE_MODE or mode == NO_THREE_IN_LINE_MODE_2NP1 or \
mode == NO_THREE_IN_LINE_MODE_GT2N:
ite_encode_lt (list, 3)
else:
ite_encode_lt (list, 2)
elif encoding == SHIFTER_ENCODING:
if mode == NO_THREE_IN_LINE_MODE or mode == NO_THREE_IN_LINE_MODE_2NP1 or \
mode == NO_THREE_IN_LINE_MODE_GT2N:
shift_encode_le_2 (list, shiftvarslistmap[str(len(list))])
else:
shift_encode_le_1 (list, shiftvarslistmap[str(len(list))])
else:
if encoding == SEQ_ADDER_ENCODING:
last, bw_adder = add_seq (list, num_bits_size - 1)
elif encoding == LOOKUP_ENCODING:
last, bw_adder = add_lookup_8_4 (list)
else:
assert encoding == PAR_ADDER_ENCODING
last, bw_adder = add_par (list, 1)
if mode == NO_THREE_IN_LINE_MODE or mode == NO_THREE_IN_LINE_MODE_2NP1 or \
mode == NO_THREE_IN_LINE_MODE_GT2N:
print ("(flet ($e" + str(id) + " (bvult " + last + " bv3[" + \
str(bw_adder) + "]))")
else:
print ("(flet ($e" + str(id) + " (bvule " + last + " bv1[" + \
str(bw_adder) + "]))")
constraints.append ("$e" + str(id))
id += 1
# generate additional constraints
if mode == QUEENS_MODE_NP1 or mode == QUEENS_MODE_GTN or \
mode == NO_THREE_IN_LINE_MODE_2NP1 or mode == NO_THREE_IN_LINE_MODE_GT2N:
list = []
for i in range(size):
for j in range(size):
list.append (board_field(i, j))
if encoding == ITE_ENCODING:
if mode == QUEENS_MODE_NP1:
ite_encode_eq (list, size + 1)
elif mode == QUEENS_MODE_GTN:
ite_encode_ge (list, size + 1)
elif mode == NO_THREE_IN_LINE_MODE_2NP1:
ite_encode_eq (list, 2 * size + 1)
else:
ite_encode_ge (list, 2 * size + 1)
assert mode == NO_THREE_IN_LINE_MODE_GT2N
elif encoding == SHIFTER_ENCODING:
if mode == QUEENS_MODE_NP1:
shift_encode_eq_k (list, shiftvarslistmap[str(log2(sizesqr))], size + 1)
elif mode == QUEENS_MODE_GTN:
shift_encode_gt_k (list, shiftvarslistmap[str(sizesqr)], \
shiftvarslistmap["1"], size)
elif mode == NO_THREE_IN_LINE_MODE_2NP1:
shift_encode_eq_k (list, shiftvarslistmap[str(log2(sizesqr))], \
2 *size + 1)
else:
assert mode == NO_THREE_IN_LINE_MODE_GT2N
shift_encode_gt_k (list, shiftvarslistmap[str(sizesqr)], \
shiftvarslistmap["1"], 2 * size)
else:
if encoding == SEQ_ADDER_ENCODING:
last, bw_adder = add_seq (list, num_bits_fields - 1)
elif encoding == LOOKUP_ENCODING:
last, bw_adder = add_lookup_8_4 (list)
else:
assert encoding == PAR_ADDER_ENCODING
last, bw_adder = add_par (list, 1)
if mode == QUEENS_MODE_NP1:
print ("(flet ($e" + str(id) + " (= " + last + " bv" + str(size + 1) + \
"[" + str(bw_adder) + "]))")
elif mode == QUEENS_MODE_GTN:
print ("(flet ($e" + str(id) + " (bvugt " + last + " bv" + str(size) + \
"[" + str(bw_adder) + "]))")
elif mode == NO_THREE_IN_LINE_MODE_2NP1:
print ("(flet ($e" + str(id) + " (= " + last + " bv" + \
str(2 * size + 1) + "[" + str(bw_adder) + "]))")
else:
assert mode == NO_THREE_IN_LINE_MODE_GT2N
print ("(flet ($e" + str(id) + " (bvugt " + last + " bv" + \
str(2 * size) + "[" + str(bw_adder) + "]))")
constraints.append ("$e" + str(id))
id += 1
# combine all constraints by AND
assert len(constraints) >= 2
last = constraints[0]
for i in range(1, len(constraints)):
print ("(flet ($e" + str(id) + " (and " + last + " " + constraints[i] + "))")
last = "$e" + str(id)
id += 1
print (last)
pars = ""
for i in range(id):
pars = pars + ")"
print (pars)
if encoding == SHIFTER_ENCODING:
assert shiftvarslistmap != None
for list in shiftvarslistmap.values():
assert len(list) == 0
|
StarcoderdataPython
|
8189313
|
from registry.extensions import db
from registry.list.models import DonationCenter, Medals
from registry.utils import capitalize, format_postal_code
class Batch(db.Model):
__tablename__ = "batches"
id = db.Column(db.Integer, primary_key=True)
donation_center_id = db.Column(db.ForeignKey(DonationCenter.id))
donation_center = db.relationship("DonationCenter")
imported_at = db.Column(db.DateTime, nullable=False)
def __repr__(self):
return f"<Batch({self.id}) from {self.imported_at}>"
class Record(db.Model):
__tablename__ = "records"
id = db.Column(db.Integer, primary_key=True)
batch_id = db.Column(db.ForeignKey(Batch.id, ondelete="CASCADE"), nullable=False)
batch = db.relationship("Batch")
rodne_cislo = db.Column(db.String(10), index=True, nullable=False)
first_name = db.Column(db.String, nullable=False)
last_name = db.Column(db.String, nullable=False)
address = db.Column(db.String, nullable=False)
city = db.Column(db.String, nullable=False)
postal_code = db.Column(db.String(5), nullable=False)
kod_pojistovny = db.Column(db.String(3), nullable=False)
donation_count = db.Column(db.Integer, nullable=False)
def __repr__(self):
return f"<Record({self.id}) {self.rodne_cislo} from Batch {self.batch}>"
@classmethod
def from_list(cls, list):
return cls(
batch_id=list[0],
rodne_cislo=list[1],
first_name=list[2],
last_name=list[3],
address=list[4],
city=list[5],
postal_code=list[6],
kod_pojistovny=list[7],
donation_count=list[8],
)
def as_original(self, donation_count=None):
fields = [
"rodne_cislo",
"first_name",
"last_name",
"address",
"city",
"postal_code",
"kod_pojistovny",
"donation_count",
]
values = [str(getattr(self, field)) for field in fields]
if donation_count:
values[-1] = donation_count
line = ";".join(values)
line += "\r\n"
return line
class IgnoredDonors(db.Model):
__tablename__ = "ignored_donors"
rodne_cislo = db.Column(db.String(10), primary_key=True)
reason = db.Column(db.String, nullable=False)
ignored_since = db.Column(db.DateTime, nullable=False)
class AwardedMedals(db.Model):
__tablename__ = "awarded_medals"
rodne_cislo = db.Column(db.String(10), index=True, nullable=False)
medal_id = db.Column(db.ForeignKey(Medals.id))
medal = db.relationship("Medals")
# NULL means unknown data - imported from the old system
awarded_at = db.Column(db.DateTime, nullable=True)
__tableargs__ = (db.PrimaryKeyConstraint(rodne_cislo, medal_id),)
class DonorsOverview(db.Model):
__tablename__ = "donors_overview"
rodne_cislo = db.Column(db.String(10), primary_key=True)
first_name = db.Column(db.String, nullable=False)
last_name = db.Column(db.String, nullable=False)
address = db.Column(db.String, nullable=False)
city = db.Column(db.String, nullable=False)
postal_code = db.Column(db.String(5), nullable=False)
kod_pojistovny = db.Column(db.String(3), nullable=False)
donation_count_fm = db.Column(db.Integer, nullable=False)
donation_count_fm_bubenik = db.Column(db.Integer, nullable=False)
donation_count_trinec = db.Column(db.Integer, nullable=False)
donation_count_mp = db.Column(db.Integer, nullable=False)
donation_count_manual = db.Column(db.Integer, nullable=False)
donation_count_total = db.Column(db.Integer, nullable=False)
awarded_medal_br = db.Column(db.Boolean, nullable=False)
awarded_medal_st = db.Column(db.Boolean, nullable=False)
awarded_medal_zl = db.Column(db.Boolean, nullable=False)
awarded_medal_kr3 = db.Column(db.Boolean, nullable=False)
awarded_medal_kr2 = db.Column(db.Boolean, nullable=False)
awarded_medal_kr1 = db.Column(db.Boolean, nullable=False)
awarded_medal_plk = db.Column(db.Boolean, nullable=False)
note = db.relationship(
"Note",
uselist=False,
primaryjoin="foreign(DonorsOverview.rodne_cislo) == Note.rodne_cislo",
)
frontend_column_names = {
"rodne_cislo": "Rodné číslo",
"first_name": "Jméno",
"last_name": "Příjmení",
"address": "Adresa",
"city": "Město",
"postal_code": "PSČ",
"kod_pojistovny": "Pojišťovna",
"donations": "Darování Celkem",
"last_award": "Ocenění",
"note": "Pozn.",
}
# Fields for frontend not calculated from multiple columns
basic_fields = [
c
for c in frontend_column_names.keys()
if c not in ("donations", "last_award", "note")
]
def __repr__(self):
return f"<DonorsOverview ({self.rodne_cislo})>"
@classmethod
def get_filter_for_search(cls, search_str):
conditions_all = []
for part in search_str.split():
conditions_all.append([])
for column_name in cls.frontend_column_names.keys():
if hasattr(cls, column_name):
if column_name == "note":
column = Note.note
else:
column = getattr(cls, column_name)
contains = getattr(column, "contains")
conditions_all[-1].append(contains(part, autoescape=True))
return db.and_(*[db.or_(*conditions) for conditions in conditions_all])
@classmethod
def get_order_by_for_column_id(cls, column_id, direction):
column_name = list(cls.frontend_column_names.keys())[column_id]
if hasattr(cls, column_name):
column = getattr(cls, column_name)
return (getattr(column, direction)(),)
elif column_name == "donations":
column_name = "donation_count_total"
column = getattr(cls, column_name)
return (getattr(column, direction)(),)
elif column_name == "last_award":
order_by = []
for medal in Medals.query.order_by(Medals.id.asc()).all():
column = getattr(cls, "awarded_medal_" + medal.slug)
order_by.append(getattr(column, direction)())
return order_by
def dict_for_frontend(self):
# All standard attributes
donor_dict = {}
for name in self.frontend_column_names.keys():
donor_dict[name] = getattr(self, name, None)
# Note is special because note column contains
# Note object but we need to get its text which
# is in Note.note attr.
if donor_dict[name] is not None and name == "note":
donor_dict[name] = donor_dict[name].note
elif donor_dict[name] is not None and name in (
"first_name",
"last_name",
"address",
"city",
):
donor_dict[name] = capitalize(donor_dict[name])
# Highest awarded medal
for medal in Medals.query.order_by(Medals.id.desc()).all():
if getattr(self, "awarded_medal_" + medal.slug):
donor_dict["last_award"] = medal.title
break
else:
donor_dict["last_award"] = "Žádné"
# Dict with all donations which we use on frontend
# to generate tooltip
donor_dict["donations"] = {
dc.slug: {
"count": getattr(self, "donation_count_" + dc.slug),
"name": dc.title,
}
for dc in DonationCenter.query.all()
}
donor_dict["donations"]["total"] = self.donation_count_total
# Format the donor's postal code
donor_dict["postal_code"] = format_postal_code(self.postal_code)
return donor_dict
@classmethod
def refresh_overview(cls, rodne_cislo=None):
if rodne_cislo:
row = cls.query.get(rodne_cislo)
if row is not None:
db.session.delete(row)
record = Record.query.filter(
Record.rodne_cislo == rodne_cislo
).first_or_404()
# Thanks to the lines above, we know that it's safe to create this small
# part of the SQL query manually. Usually it's a bad idea due to possible
# SQL injection, but, we know that rodne_cislo is valid and exists in
# other parts of this database so it should be fine to use it like this.
sql_condition = "records.rodne_cislo = :rodne_cislo AND "
params = {"rodne_cislo": record.rodne_cislo}
else:
cls.query.delete()
sql_condition = ""
params = {}
db.session.commit()
full_query = f"""INSERT INTO "donors_overview"
(
"rodne_cislo",
"first_name",
"last_name",
"address",
"city",
"postal_code",
"kod_pojistovny",
"donation_count_fm",
"donation_count_fm_bubenik",
"donation_count_trinec",
"donation_count_mp",
"donation_count_manual",
"donation_count_total",
"awarded_medal_br",
"awarded_medal_st",
"awarded_medal_zl",
"awarded_medal_kr3",
"awarded_medal_kr2",
"awarded_medal_kr1",
"awarded_medal_plk"
)
SELECT
-- "rodne_cislo" uniquely identifies a person.
"records"."rodne_cislo",
-- Personal data from the person’s most recent batch
-- or from manual overrides.
COALESCE(
"donors_override"."first_name",
"records"."first_name"
),
COALESCE(
"donors_override"."last_name",
"records"."last_name"
),
COALESCE(
"donors_override"."address",
"records"."address"
),
COALESCE(
"donors_override"."city",
"records"."city"
),
COALESCE(
"donors_override"."postal_code",
"records"."postal_code"
),
COALESCE(
"donors_override"."kod_pojistovny",
"records"."kod_pojistovny"
),
-- Total donation counts for each donation center. The value in
-- a record is incremental. Thus retrieving the one from the most
-- recent batch that belongs to the donation center. Coalescing to
-- 0 for cases when there is no record from the donation center.
COALESCE(
(
SELECT "records"."donation_count"
FROM "records"
JOIN "batches"
ON "batches"."id" = "records"."batch_id"
JOIN "donation_centers"
ON "donation_centers"."id" = "batches"."donation_center_id"
WHERE "records"."rodne_cislo" = "recent_records"."rodne_cislo"
AND "donation_centers"."slug" = 'fm'
ORDER BY "batches"."imported_at" DESC,
"records"."donation_count" DESC
LIMIT 1
),
0
) AS "donation_count_fm",
COALESCE(
(
SELECT "records"."donation_count"
FROM "records"
JOIN "batches"
ON "batches"."id" = "records"."batch_id"
JOIN "donation_centers"
ON "donation_centers"."id" = "batches"."donation_center_id"
WHERE "records"."rodne_cislo" = "recent_records"."rodne_cislo"
AND "donation_centers"."slug" = 'fm_bubenik'
ORDER BY "batches"."imported_at" DESC,
"records"."donation_count" DESC
LIMIT 1
),
0
) AS "donation_count_fm_bubenik",
COALESCE(
(
SELECT "records"."donation_count"
FROM "records"
JOIN "batches"
ON "batches"."id" = "records"."batch_id"
JOIN "donation_centers"
ON "donation_centers"."id" = "batches"."donation_center_id"
WHERE "records"."rodne_cislo" = "recent_records"."rodne_cislo"
AND "donation_centers"."slug" = 'trinec'
ORDER BY "batches"."imported_at" DESC,
"records"."donation_count" DESC
LIMIT 1
),
0
) AS "donation_count_trinec",
COALESCE(
(
SELECT "records"."donation_count"
FROM "records"
JOIN "batches"
ON "batches"."id" = "records"."batch_id"
JOIN "donation_centers"
ON "donation_centers"."id" = "batches"."donation_center_id"
WHERE "records"."rodne_cislo" = "recent_records"."rodne_cislo"
AND "donation_centers"."slug" = 'mp'
ORDER BY "batches"."imported_at" DESC,
"records"."donation_count" DESC
LIMIT 1
),
0
) AS "donation_count_mp",
COALESCE(
(
SELECT "records"."donation_count"
FROM "records"
JOIN "batches"
ON "batches"."id" = "records"."batch_id"
WHERE "records"."rodne_cislo" = "recent_records"."rodne_cislo"
AND "batches"."donation_center_id" IS NULL
ORDER BY "batches"."imported_at" DESC,
"records"."donation_count" DESC
LIMIT 1
),
0
) AS "donation_count_manual",
-- The grand total of the donation counts. Sums the most recent
-- counts from all the donation centers and the most recent manual
-- donation count without a donation center. Not coalescing this
-- one, because it is not possible for a person not no have any
-- donation record at all.
(
-- Sum all the respective donation counts including manual
-- entries.
SELECT SUM("donation_count"."donation_count")
FROM (
SELECT (
-- Loads the most recent donation count for the
-- donation center.
SELECT "records"."donation_count"
FROM "records"
JOIN "batches"
ON "batches"."id" = "records"."batch_id"
WHERE "records"."rodne_cislo" = "recent_records"."rodne_cislo"
AND (
-- NULL values represent manual entries and
-- cannot be compared by =.
"batches"."donation_center_id" =
"donation_center_null"."donation_center_id"
OR (
"batches"."donation_center_id" IS NULL AND
"donation_center_null"."donation_center_id" IS NULL
)
)
ORDER BY "batches"."imported_at" DESC,
"records"."donation_count" DESC
LIMIT 1
) AS "donation_count"
FROM (
-- All possible donation centers including NULL
-- for manual entries.
SELECT "donation_centers"."id" AS "donation_center_id"
FROM "donation_centers"
UNION
SELECT NULL AS "donation_centers"
) AS "donation_center_null"
-- Removes donation centers from which the person does
-- not have any records. This removes the need for
-- coalescing the value to 0 before summing.
WHERE "donation_count" IS NOT NULL
) AS "donation_count"
) AS "donation_count_total",
-- Awarded medals checks. Just simply query whether there is a
-- record for the given combination of "rodne_cislo" and "medal".
EXISTS(
SELECT 1
FROM "awarded_medals"
JOIN "medals"
ON "medals"."id" = "awarded_medals"."medal_id"
WHERE "awarded_medals"."rodne_cislo" = "records"."rodne_cislo"
AND "medals"."slug" = 'br'
) AS "awarded_medal_br",
EXISTS(
SELECT 1
FROM "awarded_medals"
JOIN "medals"
ON "medals"."id" = "awarded_medals"."medal_id"
WHERE "awarded_medals"."rodne_cislo" = "records"."rodne_cislo"
AND "medals"."slug" = 'st'
) AS "awarded_medal_st",
EXISTS(
SELECT 1
FROM "awarded_medals"
JOIN "medals"
ON "medals"."id" = "awarded_medals"."medal_id"
WHERE "awarded_medals"."rodne_cislo" = "records"."rodne_cislo"
AND "medals"."slug" = 'zl'
) AS "awarded_medal_zl",
EXISTS(
SELECT 1
FROM "awarded_medals"
JOIN "medals"
ON "medals"."id" = "awarded_medals"."medal_id"
WHERE "awarded_medals"."rodne_cislo" = "records"."rodne_cislo"
AND "medals"."slug" = 'kr3'
) AS "awarded_medal_kr3",
EXISTS(
SELECT 1
FROM "awarded_medals"
JOIN "medals"
ON "medals"."id" = "awarded_medals"."medal_id"
WHERE "awarded_medals"."rodne_cislo" = "records"."rodne_cislo"
AND "medals"."slug" = 'kr2'
) AS "awarded_medal_kr2",
EXISTS(
SELECT 1
FROM "awarded_medals"
JOIN "medals"
ON "medals"."id" = "awarded_medals"."medal_id"
WHERE "awarded_medals"."rodne_cislo" = "records"."rodne_cislo"
AND "medals"."slug" = 'kr1'
) AS "awarded_medal_kr1",
EXISTS(
SELECT 1
FROM "awarded_medals"
JOIN "medals"
ON "medals"."id" = "awarded_medals"."medal_id"
WHERE "awarded_medals"."rodne_cislo" = "records"."rodne_cislo"
AND "medals"."slug" = 'plk'
) AS "awarded_medal_plk"
FROM (
SELECT
"rodna_cisla"."rodne_cislo",
(
-- Looks up the most recently imported batch for a given
-- person, regardless of the donation center. This is used
-- only to link the most recent personal data as the
-- combination of "rodne_cislo" and "batch" is unique.
SELECT "records"."id"
FROM "records"
JOIN "batches"
ON "batches"."id" = "records"."batch_id"
WHERE "records"."rodne_cislo" = "rodna_cisla"."rodne_cislo"
ORDER BY "batches"."imported_at" DESC,
"records"."donation_count" DESC
LIMIT 1
) AS "record_id"
FROM (
-- The ultimate core. We need all people, not records or
-- batches. People are uniquely identified by their
-- "rodne_cislo".
SELECT DISTINCT "rodne_cislo"
FROM "records"
WHERE {sql_condition} "records"."rodne_cislo" NOT IN (
SELECT "rodne_cislo" FROM "ignored_donors"
)
) AS "rodna_cisla"
) AS "recent_records"
JOIN "records"
ON "records"."id" = "recent_records"."record_id"
LEFT JOIN "donors_override"
ON "donors_override"."rodne_cislo" = "records"."rodne_cislo";
""" # nosec - see above
db.session.execute(full_query, params)
db.session.commit()
class Note(db.Model):
__tablename__ = "notes"
rodne_cislo = db.Column(db.String(10), primary_key=True)
note = db.Column(db.Text)
class DonorsOverride(db.Model):
__tablename__ = "donors_override"
rodne_cislo = db.Column(db.String(10), primary_key=True)
first_name = db.Column(db.String)
last_name = db.Column(db.String)
address = db.Column(db.String)
city = db.Column(db.String)
postal_code = db.Column(db.String(5))
kod_pojistovny = db.Column(db.String(3))
def to_dict(self):
result = {}
for field in DonorsOverview.basic_fields:
if getattr(self, field) is not None:
result[field] = str(getattr(self, field))
else:
result[field] = None
return result
|
StarcoderdataPython
|
5117973
|
<reponame>TadeasPilar/KiKit
from pcbnewTransition import pcbnew, isV6
import tempfile
import re
from dataclasses import dataclass, field
from kikit.drc_ui import ReportLevel
import os
@dataclass
class Violation:
type: str
description: str
rule: str
severity: str
objects: list = field(default_factory=list)
def __str__(self):
head = f"[{self.type}]: {self.description} Severity: {self.severity}\n {self.rule}"
tail = "\n".join([" " + x for x in self.objects])
return "\n".join([head] + [tail])
def readViolations(reportFile):
violations = []
line = reportFile.readline()
while True:
headerMatch = re.match(r'\[(.*)\]: (.*)\n', line)
if headerMatch is None:
break
line = reportFile.readline()
bodyMatch = re.match(r'\s*(.*); Severity: (.*)', line)
if bodyMatch is None:
break
v = Violation(
type = headerMatch.group(1),
description = headerMatch.group(2),
rule = bodyMatch.group(1),
severity = bodyMatch.group(2))
line = reportFile.readline()
while line.startswith(" "):
v.objects.append(line.strip())
line = reportFile.readline()
violations.append(v)
return line, violations
def readReport(reportFile):
report = {}
line = reportFile.readline()
while True:
if len(line) == 0:
break
if re.match(r'\*\* Found \d+ DRC violations \*\*', line):
line, v = readViolations(reportFile)
report["drc"] = v
continue
if re.match(r'\*\* Found \d+ unconnected pads \*\*', line):
line, v = readViolations(reportFile)
report["unconnected"] = v
continue
if re.match(r'\*\* Found \d+ Footprint errors \*\*', line):
line, v = readViolations(reportFile)
report["footprint"] = v
line = reportFile.readline()
return report
def runImpl(board, useMm, strict, level, yieldViolation):
units = pcbnew.EDA_UNITS_MILLIMETRES if useMm else EDA_UNITS_INCHES
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as tmpFile:
try:
tmpFile.close()
result = pcbnew.WriteDRCReport(board, tmpFile.name, units, strict)
if not result:
raise RuntimeError("Cannot run DRC: Unspecified KiCAD error")
with open(tmpFile.name) as f:
report = readReport(f)
finally:
tmpFile.close()
os.unlink(tmpFile.name)
failed = False
errorName = {
"drc": "DRC violations",
"unconnected": "unconnected pads",
"footprint": "footprints errors"
}
for k, v in report.items():
if len(v) == 0:
continue
failed = False
failedCases = []
for x in v:
thisFailed = False
if level == ReportLevel.warning and x.severity == "warning":
thisFailed = True
if x.severity == "error":
thisFailed = True
if thisFailed:
failedCases.append(x)
failed = failed or thisFailed
if failedCases:
msg = f"** Found {len(failedCases)} {errorName[k]}: **\n"
msg += "\n".join([str(x) for x in failedCases])
yieldViolation(msg)
return failed
|
StarcoderdataPython
|
3255703
|
<reponame>danoliveiradev/PythonExercicios<filename>ex096.py
def area(l, c):
a = l * c
print(f'A área de um terreno {l}x{c} é de {a:.1f}m².')
# Programa Principal
print(f'{"CONTROLE DE TERRENO":^30}')
print('-'*30)
largura = float(input('Largura (m): '))
comprimento = float(input('Comprimento (m): '))
area(largura, comprimento)
|
StarcoderdataPython
|
9765413
|
from django.conf.urls import patterns, url
from views import *
urlpatterns = patterns('',
url(r'factura/lista_ventas/$', ListaVentas.as_view(), name = 'lista_ventas'),
url(r'^factura/venta$', 'apps.factura.views.facturaCrear',
name="factura_venta"),
url(r'^factura/buscar_cliente$', 'apps.factura.views.buscarCliente'),
url(r'^factura/buscar_producto$', 'apps.factura.views.buscarProducto'),
url(r'^factura/consultar$', 'apps.factura.views.consultarFactura', name="consultar_factura"),
url (r'factura/generar_reporte_factura/$', generar_pdf, name = 'generar_reporte_factura'),
url(r'factura/reporte_ventas/(?P<pk>\d+)/$', 'apps.factura.views.reporteventas', name='reporte_ventas'),
)
|
StarcoderdataPython
|
1622625
|
<reponame>vinthedark/snet-marketplace-service
import json
import uuid
from enum import Enum
import web3
from eth_account.messages import defunct_hash_message
from web3 import Web3
from common.logger import get_logger
logger = get_logger(__name__)
class ContractType(Enum):
REGISTRY = "REGISTRY"
MPE = "MPE"
RFAI = "RFAI"
class BlockChainUtil(object):
def __init__(self, provider_type, provider):
if provider_type == "HTTP_PROVIDER":
self.provider = Web3.HTTPProvider(provider)
elif provider_type == "WS_PROVIDER":
self.provider = web3.providers.WebsocketProvider(provider)
else:
raise Exception("Only HTTP_PROVIDER and WS_PROVIDER provider type are supported.")
self.web3_object = Web3(self.provider)
def load_contract(self, path):
with open(path) as f:
contract = json.load(f)
return contract
def read_contract_address(self, net_id, path, key):
contract = self.load_contract(path)
return Web3.toChecksumAddress(contract[str(net_id)][key])
def contract_instance(self, contract_abi, address):
return self.web3_object.eth.contract(abi=contract_abi, address=address)
def get_contract_instance(self, base_path, contract_name, net_id):
contract_network_path, contract_abi_path = self.get_contract_file_paths(base_path, contract_name)
contract_address = self.read_contract_address(net_id=net_id, path=contract_network_path,
key='address')
contract_abi = self.load_contract(contract_abi_path)
logger.debug(f"contract address is {contract_address}")
contract_instance = self.contract_instance(contract_abi=contract_abi, address=contract_address)
return contract_instance
def generate_signature(self, data_types, values, signer_key):
signer_key = "0x" + signer_key if not signer_key.startswith("0x") else signer_key
message = web3.Web3.soliditySha3(data_types, values)
signature = self.web3_object.eth.account.signHash(defunct_hash_message(message), signer_key)
return signature.signature.hex()
def generate_signature_bytes(self, data_types, values, signer_key):
signer_key = "0x" + signer_key if not signer_key.startswith("0x") else signer_key
message = web3.Web3.soliditySha3(data_types, values)
signature = self.web3_object.eth.account.signHash(defunct_hash_message(message), signer_key)
return bytes(signature.signature)
def get_nonce(self, address):
""" transaction count includes pending transaction also. """
nonce = self.web3_object.eth.getTransactionCount(address)
return nonce
def sign_transaction_with_private_key(self, private_key, transaction_object):
return self.web3_object.eth.account.signTransaction(transaction_object, private_key).rawTransaction
def create_transaction_object(self, *positional_inputs, method_name, address, contract_path, contract_address_path,
net_id):
nonce = self.get_nonce(address=address)
self.contract = self.load_contract(path=contract_path)
self.contract_address = self.read_contract_address(net_id=net_id, path=contract_address_path, key='address')
self.contract_instance = self.contract_instance(contract_abi=self.contract, address=self.contract_address)
print("gas_price == ", self.web3_object.eth.gasPrice)
print("nonce == ", nonce)
gas_price = 3 * (self.web3_object.eth.gasPrice)
transaction_object = getattr(self.contract_instance.functions, method_name)(
*positional_inputs).buildTransaction({
"from": address,
"nonce": nonce,
"gasPrice": gas_price,
"chainId": net_id
})
return transaction_object
def process_raw_transaction(self, raw_transaction):
return self.web3_object.eth.sendRawTransaction(raw_transaction).hex()
def create_account(self):
account = self.web3_object.eth.account.create(uuid.uuid4().hex)
return account.address, account.privateKey.hex()
def get_current_block_no(self):
return self.web3_object.eth.blockNumber
def get_transaction_receipt_from_blockchain(self, transaction_hash):
return self.web3_object.eth.getTransactionReceipt(transaction_hash)
def get_contract_file_paths(self, base_path, contract_name):
if contract_name == ContractType.REGISTRY.value:
json_file = "Registry.json"
elif contract_name == ContractType.MPE.value:
json_file = "MultiPartyEscrow.json"
elif contract_name == ContractType.RFAI.value:
json_file = "ServiceRequest.json"
else:
raise Exception("Invalid contract Type {}".format(contract_name))
contract_network_path = base_path + "/{}/{}".format("networks", json_file)
contract_abi_path = base_path + "/{}/{}".format("abi", json_file)
return contract_network_path, contract_abi_path
|
StarcoderdataPython
|
1826219
|
# flake8: noqa E501
from dataclasses import dataclass, field
from enum import Enum
from typing import Any, Dict, List, Literal, Optional, Union
import pytest
from robotcode.language_server.common.lsp_types import (
CallHierarchyClientCapabilities,
ClientCapabilities,
ClientCapabilitiesWindow,
ClientCapabilitiesWorkspace,
ClientCapabilitiesWorkspaceFileOperationsWorkspaceClientCapabilities,
ClientInfo,
CodeActionClientCapabilities,
CodeActionClientCapabilitiesCodeActionLiteralSupport,
CodeActionClientCapabilitiesCodeActionLiteralSupportCodeActionKind,
CodeActionClientCapabilitiesResolveSupport,
CodeLensClientCapabilities,
CodeLensWorkspaceClientCapabilities,
CompletionClientCapabilities,
CompletionClientCapabilitiesCompletionItem,
CompletionClientCapabilitiesCompletionItemInsertTextModeSupport,
CompletionClientCapabilitiesCompletionItemKind,
CompletionClientCapabilitiesCompletionItemResolveSupport,
CompletionClientCapabilitiesCompletionItemTagSupport,
CompletionItemKind,
CompletionItemTag,
DeclarationClientCapabilities,
DefinitionClientCapabilities,
DiagnosticTag,
DidChangeConfigurationClientCapabilities,
DidChangeWatchedFilesClientCapabilities,
DocumentColorClientCapabilities,
DocumentFormattingClientCapabilities,
DocumentHighlightClientCapabilities,
DocumentLinkClientCapabilities,
DocumentOnTypeFormattingClientCapabilities,
DocumentRangeFormattingClientCapabilities,
DocumentSymbolClientCapabilities,
DocumentSymbolClientCapabilitiesSymbolKind,
DocumentSymbolClientCapabilitiesTagSupport,
ExecuteCommandClientCapabilities,
FailureHandlingKind,
FoldingRangeClientCapabilities,
HoverClientCapabilities,
ImplementationClientCapabilities,
InitializeParams,
InsertTextMode,
LinkedEditingRangeClientCapabilities,
MarkupKind,
PrepareSupportDefaultBehavior,
PublishDiagnosticsClientCapabilities,
PublishDiagnosticsClientCapabilitiesTagSupport,
ReferenceClientCapabilities,
RenameClientCapabilities,
ResourceOperationKind,
SelectionRangeClientCapabilities,
SemanticTokensClientCapabilities,
SemanticTokensClientCapabilitiesRequests,
SemanticTokensClientCapabilitiesRequestsFull,
SemanticTokensWorkspaceClientCapabilities,
ShowMessageRequestClientCapabilities,
ShowMessageRequestClientCapabilitiesMessageActionItem,
SignatureHelpClientCapabilities,
SignatureHelpClientCapabilitiesSignatureInformation,
SignatureHelpClientCapabilitiesSignatureInformationParameterInformation,
SymbolKind,
SymbolTag,
TextDocumentClientCapabilities,
TextDocumentSyncClientCapabilities,
TokenFormat,
TraceValue,
TypeDefinitionClientCapabilities,
WorkspaceEditClientCapabilities,
WorkspaceEditClientCapabilitiesChangeAnnotationSupport,
WorkspaceFolder,
WorkspaceSymbolClientCapabilities,
WorkspaceSymbolClientCapabilitiesSymbolKind,
WorkspaceSymbolClientCapabilitiesTagSupport,
)
from robotcode.utils.dataclasses import as_json, from_json, to_camel_case, to_snake_case
class EnumData(Enum):
FIRST = "first"
SECOND = "second"
@pytest.mark.parametrize(
("expr", "expected", "indent", "compact"),
[
(1, "1", None, None),
(True, "true", None, None),
(False, "false", None, None),
("Test", '"Test"', None, None),
([], "[]", None, None),
(["Test"], '["Test"]', None, None),
(["Test", 1], '["Test", 1]', None, None),
({}, "{}", None, None),
({"a": 1}, '{"a": 1}', None, None),
({"a": 1}, '{\n "a": 1\n}', True, None),
({"a": 1, "b": True}, '{\n "a": 1,\n "b": true\n}', True, None),
({"a": 1, "b": True}, '{"a":1,"b":true}', None, True),
((), "[]", None, None),
((1, 2, 3), "[1, 2, 3]", None, None),
(set(), "[]", None, None),
({1, 2}, "[1, 2]", None, None),
([EnumData.FIRST, EnumData.SECOND], '["first", "second"]', None, None),
],
)
def test_encode_simple(expr: Any, expected: str, indent: Optional[bool], compact: Optional[bool]) -> None:
assert as_json(expr, indent, compact) == expected
@dataclass
class SimpleItem:
a: int
b: int
def test_encode_simple_dataclass() -> None:
assert as_json(SimpleItem(1, 2)) == '{"a": 1, "b": 2}'
@dataclass
class ComplexItem:
list_field: List[Any]
dict_field: Dict[Any, Any]
@pytest.mark.parametrize(
("expr", "expected"),
[
(ComplexItem([], {}), '{"list_field": [], "dict_field": {}}'),
(
ComplexItem([1, "2", 3], {"a": "hello", 1: True}),
'{"list_field": [1, "2", 3], "dict_field": {"a": "hello", "1": true}}',
),
],
)
def test_encode_complex_dataclass(expr: Any, expected: str) -> None:
assert as_json(expr) == expected
@dataclass
class ComplexItemWithConfigEncodeCase(ComplexItem):
@classmethod
def _encode_case(cls, s: str) -> str:
return to_camel_case(s)
@classmethod
def _decode_case(cls, s: str) -> str:
return to_snake_case(s)
@pytest.mark.parametrize(
("expr", "expected"),
[
(ComplexItemWithConfigEncodeCase([], {}), '{"listField": [], "dictField": {}}'),
(
ComplexItemWithConfigEncodeCase([1, "2", 3], {"a": "hello", 1: True}),
'{"listField": [1, "2", 3], "dictField": {"a": "hello", "1": true}}',
),
],
)
def test_encode_complex_dataclass_with_config_encode_case(expr: Any, expected: str) -> None:
assert as_json(expr) == expected
@dataclass
class SimpleItemWithOptionalField:
a: Optional[int]
def test_encode_with_optional_field() -> None:
assert as_json(SimpleItemWithOptionalField(1)) == '{"a": 1}'
assert as_json(SimpleItemWithOptionalField(None)) == '{"a": null}'
@dataclass
class SimpleItemWithOptionalFieldAndNoneAsDefaultValue:
a: Optional[int] = None
def test_encode_with_optional_field_and_none_as_default_value() -> None:
assert as_json(SimpleItemWithOptionalFieldAndNoneAsDefaultValue(1)) == '{"a": 1}'
assert as_json(SimpleItemWithOptionalFieldAndNoneAsDefaultValue(None)) == "{}"
@pytest.mark.parametrize(
("expr", "type", "expected"),
[
("1", int, 1),
('"str"', str, "str"),
("1.0", float, 1.0),
("true", bool, True),
("false", bool, False),
('"str"', (str, int), "str"),
("1", (int, str), 1),
("[]", (int, str, List[int]), []),
("[1]", (int, List[int]), [1]),
("1", Any, 1),
("[]", Union[int, str, List[int]], []),
('"first"', EnumData, EnumData.FIRST),
('"second"', EnumData, EnumData.SECOND),
('["first", "second"]', List[EnumData], [EnumData.FIRST, EnumData.SECOND]),
],
)
def test_decode_simple(expr: Any, type: Any, expected: str) -> None:
assert from_json(expr, type) == expected
@pytest.mark.parametrize(
("expr", "type", "expected"),
[
("{}", dict, {}),
('{"a": 1}', dict, {"a": 1}),
('{"a": 1}', Dict[str, int], {"a": 1}),
('{"a": 1, "b": 2}', Dict[str, int], {"a": 1, "b": 2}),
('{"a": 1, "b": null}', Dict[str, Union[int, str, None]], {"a": 1, "b": None}),
('{"a": {}, "b": {"a": 2}}', Dict[str, Dict[str, Any]], {"a": {}, "b": {"a": 2}}),
],
)
def test_decode_dict(expr: Any, type: Any, expected: str) -> None:
assert from_json(expr, type) == expected
@pytest.mark.parametrize(
("expr", "type", "expected"),
[
('{"a": 1, "b": 2}', SimpleItem, SimpleItem(1, 2)),
('{"b": 2, "a": 1}', SimpleItem, SimpleItem(1, 2)),
('{"b": 2, "a": 1}', Optional[SimpleItem], SimpleItem(1, 2)),
],
)
def test_decode_simple_class(expr: Any, type: Any, expected: str) -> None:
assert from_json(expr, type) == expected
def test_decode_optional_simple_class() -> None:
assert from_json("null", Optional[SimpleItem]) is None # type: ignore
with pytest.raises(TypeError):
assert from_json("null", SimpleItem) is None
@dataclass
class SimpleItemWithNoFields:
pass
def test_decode_with_no_fields() -> None:
assert from_json("{}", SimpleItemWithNoFields) == SimpleItemWithNoFields()
@dataclass
class SimpleItemWithOnlyOptionalFields:
a: int = 1
b: int = 2
def test_decode_with_only_optional_fields() -> None:
assert from_json("{}", SimpleItemWithOnlyOptionalFields) == SimpleItemWithOnlyOptionalFields()
@pytest.mark.parametrize(
("expr", "type", "expected"),
[
(
'{"listField": [], "dictField": {}}',
ComplexItemWithConfigEncodeCase,
ComplexItemWithConfigEncodeCase([], {}),
),
(
'{"listField": [1,2], "dictField": {"a": 1, "b": "2"}}',
ComplexItemWithConfigEncodeCase,
ComplexItemWithConfigEncodeCase([1, 2], {"a": 1, "b": "2"}),
),
],
)
def test_decode_complex_class_with_encoding(expr: Any, type: Any, expected: str) -> None:
assert from_json(expr, type) == expected
@dataclass
class SimpleItemWithOptionalFields:
first: int
second: bool = True
third: Optional[str] = None
forth: Optional[float] = None
@pytest.mark.parametrize(
("expr", "type", "expected"),
[
('{"first": 1}', SimpleItemWithOptionalFields, SimpleItemWithOptionalFields(first=1)),
(
'{"first": 1, "third": "Hello"}',
SimpleItemWithOptionalFields,
SimpleItemWithOptionalFields(first=1, third="Hello"),
),
('{"first": 1, "forth": 1.0}', SimpleItemWithOptionalFields, SimpleItemWithOptionalFields(first=1, forth=1.0)),
],
)
def test_decode_simple_item_with_optional_field(expr: Any, type: Any, expected: str) -> None:
assert from_json(expr, type) == expected
@dataclass
class SimpleItem1:
d: int
e: int
f: int = 1
@dataclass
class ComplexItemWithUnionType:
a_union_field: Union[SimpleItem, SimpleItem1]
@pytest.mark.parametrize(
("expr", "type", "expected"),
[
('{"a_union_field":{"a":1, "b":2}}', ComplexItemWithUnionType, ComplexItemWithUnionType(SimpleItem(1, 2))),
('{"a_union_field":{"d":1, "e":2}}', ComplexItemWithUnionType, ComplexItemWithUnionType(SimpleItem1(1, 2))),
(
'{"a_union_field":{"d":1, "e":2, "f": 3}}',
ComplexItemWithUnionType,
ComplexItemWithUnionType(SimpleItem1(1, 2, 3)),
),
],
)
def test_decode_with_union_and_different_keys(expr: Any, type: Any, expected: str) -> None:
assert from_json(expr, type) == expected
@dataclass
class SimpleItem2:
a: int
b: int
c: int = 1
@dataclass
class ComplexItemWithUnionTypeWithSameProperties:
a_union_field: Union[SimpleItem, SimpleItem2]
def test_decode_with_union_and_some_same_keys() -> None:
assert from_json(
'{"a_union_field": {"a": 1, "b":2, "c":3}}', ComplexItemWithUnionTypeWithSameProperties
) == ComplexItemWithUnionTypeWithSameProperties(SimpleItem2(1, 2, 3))
def test_decode_with_union_and_same_keys_should_raise_typeerror() -> None:
with pytest.raises(TypeError):
from_json('{"a_union_field": {"a": 1, "b":2}}', ComplexItemWithUnionTypeWithSameProperties)
def test_decode_with_union_and_no_keys_should_raise_typeerror() -> None:
with pytest.raises(TypeError):
from_json('{"a_union_field": {}}', ComplexItemWithUnionTypeWithSameProperties)
def test_decode_with_union_and_no_match_should_raise_typeerror() -> None:
with pytest.raises(TypeError):
from_json('{"a_union_field": {"x": 1, "y":2}}', ComplexItemWithUnionTypeWithSameProperties)
@dataclass
class SimpleItem3:
a: int
b: int
c: int
@pytest.mark.parametrize(
("expr", "type", "expected"),
[
('{"a":1, "b": 2}', (SimpleItem, SimpleItem3), SimpleItem(1, 2)),
('{"a":1, "b": 2, "c": 3}', (SimpleItem, SimpleItem3), SimpleItem3(1, 2, 3)),
],
)
def test_decode_with_some_same_fields(expr: Any, type: Any, expected: str) -> None:
assert from_json(expr, type) == expected
def test_decode_with_some_unambigous_fields_should_raise_typeerror() -> None:
with pytest.raises(TypeError):
from_json('{"a":1, "b": 2}', (SimpleItem, SimpleItem2)) # type: ignore
@dataclass
class ComplexItemWithUnionTypeWithSimpleAndComplexTypes:
a_union_field: Union[bool, SimpleItem, SimpleItem1]
@pytest.mark.parametrize(
("expr", "type", "expected"),
[
(
'{"a_union_field": true}',
ComplexItemWithUnionTypeWithSimpleAndComplexTypes,
ComplexItemWithUnionTypeWithSimpleAndComplexTypes(True),
),
(
'{"a_union_field": {"a":1, "b":2}}',
ComplexItemWithUnionTypeWithSimpleAndComplexTypes,
ComplexItemWithUnionTypeWithSimpleAndComplexTypes(SimpleItem(1, 2)),
),
],
)
def test_decode_union_with_simple_and_complex_types(expr: Any, type: Any, expected: str) -> None:
assert from_json(expr, type) == expected
def test_decode_union_with_unknown_keys_should_raise_typeerror() -> None:
with pytest.raises(TypeError):
from_json(
'{"a_union_field": {"d":1, "ef":2}}', ComplexItemWithUnionTypeWithSimpleAndComplexTypes
) == ComplexItemWithUnionTypeWithSimpleAndComplexTypes(SimpleItem(1, 2))
@pytest.mark.parametrize(
("expr", "type", "expected"),
[
('{"a":1, "b":2, "c":3}', SimpleItem, SimpleItem(1, 2)),
('{"a":1, "b":2, "c":3}', SimpleItemWithOnlyOptionalFields, SimpleItemWithOnlyOptionalFields(1, 2)),
('{"a":1}', SimpleItemWithOnlyOptionalFields, SimpleItemWithOnlyOptionalFields(1)),
("{}", SimpleItemWithOnlyOptionalFields, SimpleItemWithOnlyOptionalFields()),
("{}", SimpleItemWithNoFields, SimpleItemWithNoFields()),
('{"a": 1}', SimpleItemWithNoFields, SimpleItemWithNoFields()),
('{"a":1, "b":2, "c": 3}', (SimpleItemWithNoFields, SimpleItem), SimpleItem(1, 2)),
],
)
def test_decode_non_strict_should_work(expr: Any, type: Any, expected: str) -> None:
assert from_json(expr, type) == expected
@pytest.mark.parametrize(
("expr", "type", "expected"),
[
('{"a":1, "b":2}', SimpleItem, SimpleItem(1, 2)),
('{"a":1, "b":2}', SimpleItemWithOnlyOptionalFields, SimpleItemWithOnlyOptionalFields(1, 2)),
('{"a":1}', SimpleItemWithOnlyOptionalFields, SimpleItemWithOnlyOptionalFields(1)),
("{}", SimpleItemWithOnlyOptionalFields, SimpleItemWithOnlyOptionalFields()),
("{}", SimpleItemWithNoFields, SimpleItemWithNoFields()),
("{}", (SimpleItemWithNoFields, SimpleItem), SimpleItemWithNoFields()),
('{"a":1, "b":2}', (SimpleItemWithNoFields, SimpleItem), SimpleItem(1, 2)),
],
)
def test_decode_strict_should_work(expr: Any, type: Any, expected: str) -> None:
assert from_json(expr, type, strict=True) == expected
@pytest.mark.parametrize(
("expr", "type"),
[
('{"a":1, "b": 2, "c": 3}', SimpleItem),
('{"a":1, "b": 2, "c": 3}', SimpleItemWithOnlyOptionalFields),
('{"a":1, "c": 3}', SimpleItemWithOnlyOptionalFields),
('{"c": 3}', SimpleItemWithOnlyOptionalFields),
('{"c": 3}', SimpleItemWithNoFields),
],
)
def test_decode_strict_with_invalid_data_should_raise_typeerror(expr: Any, type: Any) -> None:
with pytest.raises(TypeError):
from_json(expr, type, strict=True)
@pytest.mark.parametrize(
("expr", "type", "expected"),
[
('"test"', Literal["test", "blah", "bluff"], "test"),
('"bluff"', Literal["test", "blah", "bluff"], "bluff"),
('"dada"', (Literal["test", "blah", "bluff"], str), "dada"),
("1", (Literal["test", "blah", "bluff"], int), 1),
],
)
def test_literal_should_work(expr: Any, type: Any, expected: str) -> None:
assert from_json(expr, type) == expected
@pytest.mark.parametrize(
("expr", "type"),
[
('"dada"', Literal["test", "blah", "bluff"]),
('"dada"', (Literal["test", "blah", "bluff"], int)),
],
)
def test_literal_with_invalid_args_should_raise_typerror(expr: Any, type: Any) -> None:
with pytest.raises(TypeError):
from_json(expr, type)
@dataclass
class SimpleItemWithAlias:
a: int = field(metadata={"alias": "a_test"})
def test_encode_decode_with_field_alias_should_work() -> None:
assert from_json('{"a_test": 2}', SimpleItemWithAlias) == SimpleItemWithAlias(2)
assert as_json(SimpleItemWithAlias(2)) == '{"a_test": 2}'
def test_really_complex_data() -> None:
data = """\
{
"processId": 17800,
"clientInfo": {
"name": "Visual Studio Code - Insiders",
"version": "1.62.0-insider"
},
"locale": "de",
"rootPath": "c:\\\\tmp\\\\robottest\\\\dummy\\\\testprj",
"rootUri": "file:///c%3A/tmp/robottest/dummy/testprj",
"capabilities": {
"workspace": {
"applyEdit": true,
"workspaceEdit": {
"documentChanges": true,
"resourceOperations": [
"create",
"rename",
"delete"
],
"failureHandling": "textOnlyTransactional",
"normalizesLineEndings": true,
"changeAnnotationSupport": {
"groupsOnLabel": true
}
},
"didChangeConfiguration": {
"dynamicRegistration": true
},
"didChangeWatchedFiles": {
"dynamicRegistration": true
},
"symbol": {
"dynamicRegistration": true,
"symbolKind": {
"valueSet": [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26
]
},
"tagSupport": {
"valueSet": [
1
]
}
},
"codeLens": {
"refreshSupport": true
},
"executeCommand": {
"dynamicRegistration": true
},
"configuration": true,
"workspaceFolders": true,
"semanticTokens": {
"refreshSupport": true
},
"fileOperations": {
"dynamicRegistration": true,
"didCreate": true,
"didRename": true,
"didDelete": true,
"willCreate": true,
"willRename": true,
"willDelete": true
}
},
"textDocument": {
"publishDiagnostics": {
"relatedInformation": true,
"versionSupport": false,
"tagSupport": {
"valueSet": [
1,
2
]
},
"codeDescriptionSupport": true,
"dataSupport": true
},
"synchronization": {
"dynamicRegistration": true,
"willSave": true,
"willSaveWaitUntil": true,
"didSave": true
},
"completion": {
"dynamicRegistration": true,
"contextSupport": true,
"completionItem": {
"snippetSupport": true,
"commitCharactersSupport": true,
"documentationFormat": [
"markdown",
"plaintext"
],
"deprecatedSupport": true,
"preselectSupport": true,
"tagSupport": {
"valueSet": [
1
]
},
"insertReplaceSupport": true,
"resolveSupport": {
"properties": [
"documentation",
"detail",
"additionalTextEdits"
]
},
"insertTextModeSupport": {
"valueSet": [
1,
2
]
}
},
"completionItemKind": {
"valueSet": [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25
]
}
},
"hover": {
"dynamicRegistration": true,
"contentFormat": [
"markdown",
"plaintext"
]
},
"signatureHelp": {
"dynamicRegistration": true,
"signatureInformation": {
"documentationFormat": [
"markdown",
"plaintext"
],
"parameterInformation": {
"labelOffsetSupport": true
},
"activeParameterSupport": true
},
"contextSupport": true
},
"definition": {
"dynamicRegistration": true,
"linkSupport": true
},
"references": {
"dynamicRegistration": true
},
"documentHighlight": {
"dynamicRegistration": true
},
"documentSymbol": {
"dynamicRegistration": true,
"symbolKind": {
"valueSet": [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26
]
},
"hierarchicalDocumentSymbolSupport": true,
"tagSupport": {
"valueSet": [
1
]
},
"labelSupport": true
},
"codeAction": {
"dynamicRegistration": true,
"isPreferredSupport": true,
"disabledSupport": true,
"dataSupport": true,
"resolveSupport": {
"properties": [
"edit"
]
},
"codeActionLiteralSupport": {
"codeActionKind": {
"valueSet": [
"",
"quickfix",
"refactor",
"refactor.extract",
"refactor.inline",
"refactor.rewrite",
"source",
"source.organizeImports"
]
}
},
"honorsChangeAnnotations": false
},
"codeLens": {
"dynamicRegistration": true
},
"formatting": {
"dynamicRegistration": true
},
"rangeFormatting": {
"dynamicRegistration": true
},
"onTypeFormatting": {
"dynamicRegistration": true
},
"rename": {
"dynamicRegistration": true,
"prepareSupport": true,
"prepareSupportDefaultBehavior": 1,
"honorsChangeAnnotations": true
},
"documentLink": {
"dynamicRegistration": true,
"tooltipSupport": true
},
"typeDefinition": {
"dynamicRegistration": true,
"linkSupport": true
},
"implementation": {
"dynamicRegistration": true,
"linkSupport": true
},
"colorProvider": {
"dynamicRegistration": true
},
"foldingRange": {
"dynamicRegistration": true,
"rangeLimit": 5000,
"lineFoldingOnly": true
},
"declaration": {
"dynamicRegistration": true,
"linkSupport": true
},
"selectionRange": {
"dynamicRegistration": true
},
"callHierarchy": {
"dynamicRegistration": true
},
"semanticTokens": {
"dynamicRegistration": true,
"tokenTypes": [
"namespace",
"type",
"class",
"enum",
"interface",
"struct",
"typeParameter",
"parameter",
"variable",
"property",
"enumMember",
"event",
"function",
"method",
"macro",
"keyword",
"modifier",
"comment",
"string",
"number",
"regexp",
"operator"
],
"tokenModifiers": [
"declaration",
"definition",
"readonly",
"static",
"deprecated",
"abstract",
"async",
"modification",
"documentation",
"defaultLibrary"
],
"formats": [
"relative"
],
"requests": {
"range": true,
"full": {
"delta": true
}
},
"multilineTokenSupport": false,
"overlappingTokenSupport": false
},
"linkedEditingRange": {
"dynamicRegistration": true
}
},
"window": {
"showMessage": {
"messageActionItem": {
"additionalPropertiesSupport": true
}
}
}
},
"initializationOptions": {
"storageUri": "file:///c%3A/Users/daniel/AppData/Roaming/Code%20-%20Insiders/User/workspaceStorage/1ab0e3033b053a024fb7cbf9068380d1/d-biehl.robotcode",
"globalStorageUri": "file:///c%3A/Users/daniel/AppData/Roaming/Code%20-%20Insiders/User/globalStorage/d-biehl.robotcode"
},
"trace": "off",
"workspaceFolders": [
{
"uri": "file:///c%3A/tmp/robottest/dummy/testprj",
"name": "testprj"
}
],
"workDoneToken": "7<PASSWORD>"
}
"""
assert from_json(data, InitializeParams) == InitializeParams(
capabilities=ClientCapabilities(
workspace=ClientCapabilitiesWorkspace(
apply_edit=True,
workspace_edit=WorkspaceEditClientCapabilities(
document_changes=True,
resource_operations=[
ResourceOperationKind.CREATE,
ResourceOperationKind.RENAME,
ResourceOperationKind.DELETE,
],
failure_handling=FailureHandlingKind.TEXT_ONLY_TRANSACTIONAL,
normalizes_line_endings=True,
change_annotation_support=WorkspaceEditClientCapabilitiesChangeAnnotationSupport(
groups_on_label=True
),
),
did_change_configuration=DidChangeConfigurationClientCapabilities(dynamic_registration=True),
did_change_watched_files=DidChangeWatchedFilesClientCapabilities(dynamic_registration=True),
symbol=WorkspaceSymbolClientCapabilities(
dynamic_registration=True,
symbol_kind=WorkspaceSymbolClientCapabilitiesSymbolKind(
value_set=[
SymbolKind.FILE,
SymbolKind.MODULE,
SymbolKind.NAMESPACE,
SymbolKind.PACKAGE,
SymbolKind.CLASS,
SymbolKind.METHOD,
SymbolKind.PROPERTY,
SymbolKind.FIELD,
SymbolKind.CONSTRUCTOR,
SymbolKind.ENUM,
SymbolKind.INTERFACE,
SymbolKind.FUNCTION,
SymbolKind.VARIABLE,
SymbolKind.CONSTANT,
SymbolKind.STRING,
SymbolKind.NUMBER,
SymbolKind.BOOLEAN,
SymbolKind.ARRAY,
SymbolKind.OBJECT,
SymbolKind.KEY,
SymbolKind.NULL,
SymbolKind.ENUMMEMBER,
SymbolKind.STRUCT,
SymbolKind.EVENT,
SymbolKind.OPERATOR,
SymbolKind.TYPEPARAMETER,
]
),
tag_support=WorkspaceSymbolClientCapabilitiesTagSupport(value_set=[SymbolTag.Deprecated]),
),
execute_command=ExecuteCommandClientCapabilities(dynamic_registration=True),
workspace_folders=True,
configuration=True,
semantic_tokens=SemanticTokensWorkspaceClientCapabilities(refresh_support=True),
code_lens=CodeLensWorkspaceClientCapabilities(refresh_support=True),
file_operations=ClientCapabilitiesWorkspaceFileOperationsWorkspaceClientCapabilities(
dynamic_registration=True,
did_create=True,
will_create=True,
did_rename=True,
will_rename=True,
did_delete=True,
will_delete=True,
),
),
text_document=TextDocumentClientCapabilities(
synchronization=TextDocumentSyncClientCapabilities(
dynamic_registration=True, will_save=True, will_save_wait_until=True, did_save=True
),
completion=CompletionClientCapabilities(
dynamic_registration=True,
completion_item=CompletionClientCapabilitiesCompletionItem(
snippet_support=True,
commit_characters_support=True,
documentation_format=[MarkupKind.MARKDOWN, MarkupKind.PLAINTEXT],
deprecated_support=True,
preselect_support=True,
tag_support=CompletionClientCapabilitiesCompletionItemTagSupport(
value_set=[CompletionItemTag.Deprecated]
),
insert_replace_support=True,
resolve_support=CompletionClientCapabilitiesCompletionItemResolveSupport(
properties=["documentation", "detail", "additionalTextEdits"]
),
insert_text_mode_support=CompletionClientCapabilitiesCompletionItemInsertTextModeSupport(
value_set=[InsertTextMode.AS_IS, InsertTextMode.ADJUST_INDENTATION]
),
),
completion_item_kind=CompletionClientCapabilitiesCompletionItemKind(
value_set=[
CompletionItemKind.TEXT,
CompletionItemKind.METHOD,
CompletionItemKind.FUNCTION,
CompletionItemKind.CONSTRUCTOR,
CompletionItemKind.FIELD,
CompletionItemKind.VARIABLE,
CompletionItemKind.CLASS,
CompletionItemKind.INTERFACE,
CompletionItemKind.MODULE,
CompletionItemKind.PROPERTY,
CompletionItemKind.UNIT,
CompletionItemKind.VALUE,
CompletionItemKind.ENUM,
CompletionItemKind.KEYWORD,
CompletionItemKind.SNIPPET,
CompletionItemKind.COLOR,
CompletionItemKind.FILE,
CompletionItemKind.REFERENCE,
CompletionItemKind.FOLDER,
CompletionItemKind.ENUM_MEMBER,
CompletionItemKind.CONSTANT,
CompletionItemKind.STRUCT,
CompletionItemKind.EVENT,
CompletionItemKind.OPERATOR,
CompletionItemKind.TYPE_PARAMETER,
]
),
context_support=True,
),
hover=HoverClientCapabilities(
dynamic_registration=True, content_format=[MarkupKind.MARKDOWN, MarkupKind.PLAINTEXT]
),
signature_help=SignatureHelpClientCapabilities(
dynamic_registration=True,
signature_information=SignatureHelpClientCapabilitiesSignatureInformation(
documentation_format=[MarkupKind.MARKDOWN, MarkupKind.PLAINTEXT],
parameter_information=SignatureHelpClientCapabilitiesSignatureInformationParameterInformation(
label_offset_support=True
),
active_parameter_support=True,
),
context_support=True,
),
declaration=DeclarationClientCapabilities(dynamic_registration=True, link_support=True),
definition=DefinitionClientCapabilities(dynamic_registration=True, link_support=True),
type_definition=TypeDefinitionClientCapabilities(dynamic_registration=True, link_support=True),
implementation=ImplementationClientCapabilities(dynamic_registration=True, link_support=True),
references=ReferenceClientCapabilities(dynamic_registration=True),
document_highlight=DocumentHighlightClientCapabilities(dynamic_registration=True),
document_symbol=DocumentSymbolClientCapabilities(
dynamic_registration=True,
symbol_kind=DocumentSymbolClientCapabilitiesSymbolKind(
value_set=[
SymbolKind.FILE,
SymbolKind.MODULE,
SymbolKind.NAMESPACE,
SymbolKind.PACKAGE,
SymbolKind.CLASS,
SymbolKind.METHOD,
SymbolKind.PROPERTY,
SymbolKind.FIELD,
SymbolKind.CONSTRUCTOR,
SymbolKind.ENUM,
SymbolKind.INTERFACE,
SymbolKind.FUNCTION,
SymbolKind.VARIABLE,
SymbolKind.CONSTANT,
SymbolKind.STRING,
SymbolKind.NUMBER,
SymbolKind.BOOLEAN,
SymbolKind.ARRAY,
SymbolKind.OBJECT,
SymbolKind.KEY,
SymbolKind.NULL,
SymbolKind.ENUMMEMBER,
SymbolKind.STRUCT,
SymbolKind.EVENT,
SymbolKind.OPERATOR,
SymbolKind.TYPEPARAMETER,
]
),
hierarchical_document_symbol_support=True,
tag_support=DocumentSymbolClientCapabilitiesTagSupport(value_set=[SymbolTag.Deprecated]),
label_support=True,
),
code_action=CodeActionClientCapabilities(
dynamic_registration=True,
code_action_literal_support=CodeActionClientCapabilitiesCodeActionLiteralSupport(
code_action_kind=CodeActionClientCapabilitiesCodeActionLiteralSupportCodeActionKind(
value_set=[
"",
"quickfix",
"refactor",
"refactor.extract",
"refactor.inline",
"refactor.rewrite",
"source",
"source.organizeImports",
]
)
),
is_preferred_support=True,
disabled_support=True,
data_support=True,
resolve_support=CodeActionClientCapabilitiesResolveSupport(properties=["edit"]),
honors_change_annotations=False,
),
code_lens=CodeLensClientCapabilities(dynamic_registration=True),
document_link=DocumentLinkClientCapabilities(dynamic_registration=True, tooltip_support=True),
color_provider=DocumentColorClientCapabilities(dynamic_registration=True),
formatting=DocumentFormattingClientCapabilities(dynamic_registration=True),
range_formatting=DocumentRangeFormattingClientCapabilities(dynamic_registration=True),
on_type_formatting=DocumentOnTypeFormattingClientCapabilities(dynamic_registration=True),
rename=RenameClientCapabilities(
dynamic_registration=True,
prepare_support=True,
prepare_support_default_behavior=PrepareSupportDefaultBehavior.Identifier,
honors_change_annotations=True,
),
publish_diagnostics=PublishDiagnosticsClientCapabilities(
related_information=True,
tag_support=PublishDiagnosticsClientCapabilitiesTagSupport(
value_set=[DiagnosticTag.Unnecessary, DiagnosticTag.Deprecated]
),
version_support=False,
code_description_support=True,
data_support=True,
),
folding_range=FoldingRangeClientCapabilities(
dynamic_registration=True, range_limit=5000, line_folding_only=True
),
selection_range=SelectionRangeClientCapabilities(dynamic_registration=True),
linked_editing_range=LinkedEditingRangeClientCapabilities(dynamic_registration=True),
call_hierarchy=CallHierarchyClientCapabilities(dynamic_registration=True),
semantic_tokens=SemanticTokensClientCapabilities(
requests=SemanticTokensClientCapabilitiesRequests(
range=True, full=SemanticTokensClientCapabilitiesRequestsFull(delta=True)
),
token_types=[
"namespace",
"type",
"class",
"enum",
"interface",
"struct",
"typeParameter",
"parameter",
"variable",
"property",
"enumMember",
"event",
"function",
"method",
"macro",
"keyword",
"modifier",
"comment",
"string",
"number",
"regexp",
"operator",
],
token_modifiers=[
"declaration",
"definition",
"readonly",
"static",
"deprecated",
"abstract",
"async",
"modification",
"documentation",
"defaultLibrary",
],
formats=[TokenFormat.Relative],
overlapping_token_support=False,
multiline_token_support=False,
dynamic_registration=True,
),
moniker=None,
),
window=ClientCapabilitiesWindow(
work_done_progress=None,
show_message=ShowMessageRequestClientCapabilities(
message_action_item=ShowMessageRequestClientCapabilitiesMessageActionItem(
additional_properties_support=True
)
),
show_document=None,
),
general=None,
experimental=None,
),
process_id=17800,
client_info=ClientInfo(name="Visual Studio Code - Insiders", version="1.62.0-insider"),
locale="de",
root_path="c:\\tmp\\robottest\\dummy\\testprj",
root_uri="file:///c%3A/tmp/robottest/dummy/testprj",
initialization_options={
"storageUri": "file:///c%3A/Users/daniel/AppData/Roaming/Code%20-%20Insiders/User/workspaceStorage/1ab0e3033b053a024fb7cbf9068380d1/d-biehl.robotcode",
"globalStorageUri": "file:///c%3A/Users/daniel/AppData/Roaming/Code%20-%20Insiders/User/globalStorage/d-biehl.robotcode",
},
trace=TraceValue.OFF,
workspace_folders=[WorkspaceFolder(uri="file:///c%3A/tmp/robottest/dummy/testprj", name="testprj")],
work_done_token="<PASSWORD>",
)
|
StarcoderdataPython
|
1807692
|
from django.urls import path
from .views import (
DashboardView, CandidateListView, CandidateCreateView, CandidateDeleteView, CandidateUpdateView, CandidateLikeView
)
app_name = 'customer'
urlpatterns = [
path('', DashboardView.as_view(), name='dashboard'),
path('job/', CandidateListView.as_view(), name='application-list'),
path('job/<int:pk>/apply/', CandidateCreateView.as_view(), name='application-apply'),
path('job/<int:pk>/like/', CandidateLikeView.as_view(), name='job-like'),
path('job/application/<int:pk>/update/', CandidateUpdateView.as_view(), name='application-update'),
path('job/application/<int:pk>/delete/', CandidateDeleteView.as_view(), name='application-delete'),
]
|
StarcoderdataPython
|
3374582
|
<filename>files2.py
#!/usr/bin/env python
# coding: utf-8
"""
Simple program to iterate thru all files, fetch 1st digit from the
file size and plot the distribution of the different values as
an animated graph.
This version will trigger a repaint not with frame rate, as a game,
but when a certain amount of files have been processed.
"""
# Created: 17.11.20
import pygame
import sys
import os
import random
from pathlib import Path
from time import perf_counter
from collections import defaultdict
# decorator to support static variables in functions
def static_vars(**kwargs):
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
@static_vars(_paths=None, _last_parent=None)
def next() -> int:
"""
Digit producer. May raise StopIteration when done.
:return: another digit, value 0..9
"""
if not next._paths:
next._paths = Path(os.environ["HOME"]).glob('**/*')
while True:
p = next._paths.__next__()
parent = p.parent
if parent != next._last_parent:
next._last_parent = parent
# print(parent)
s = str(p)
# directories have 96 bytes -> systematic "fraud"
if not p.is_file():
continue
# # many files with lengths like 2*
# if ".Office/Outlook/" in s:
# continue
# if not "/.git/" in s:
# continue
break
n = p.stat().st_size
s = str(n)
r = int(s[0])
# if r == 2 and graph.cnt > 200000 and graph.values[2] > graph.values[1]:
# print(n, p)
return r
"""
without outlook and git files:
n = 9240
0 - 2 = 0.000
1 - 2884 = 0.312
2 - 1557 = 0.169
3 - 990 = 0.107
4 - 1303 = 0.141
5 - 1110 = 0.120
6 - 379 = 0.041
7 - 430 = 0.047
8 - 347 = 0.038
9 - 238 = 0.026
all files:
n = 992493
0 - 7351 = 0.007
1 - 265084 = 0.267
2 - 242976 = 0.245
3 - 120200 = 0.121
4 - 90655 = 0.091
5 - 79231 = 0.080
6 - 63066 = 0.064
7 - 46323 = 0.047
8 - 42172 = 0.042
9 - 35435 = 0.036
"""
def color(n):
# https://www.pygame.org/docs/ref/color.html#pygame.Color
return f"0x{n:06x}"
def random_color() -> pygame.Color:
"""
Generate a random (but bright) color.
:return: the Color
"""
return pygame.Color(random.randint(128, 255), random.randint(128, 255), random.randint(128, 255))
WIDTH = 1000
BARWIDTH = 100 # 10 bars 0..9
HEIGHT = 600
class Graph:
"""
Simple class to maintaion and pait a bar chart with 10 columns.
"""
def __init__(self):
self.cnt = 0
self.values = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.pc0 = perf_counter()
def count(self, x: int) -> None:
"""
Count an occurence of a digit.
:param x: the digit
"""
self.cnt += 1
self.values[x] += 1
def paint(self) -> pygame.Surface:
"""
Paint this Graph.
:return: a Surface with the painted graph on black background
"""
surface = pygame.Surface((WIDTH, HEIGHT))
surface.fill(pygame.Color(0,0,0))
if self.cnt == 0:
return
for x in range(0, 10):
h = self.values[x] / self.cnt
top = round(HEIGHT * (1 - h), 0)
bar = pygame.Rect(BARWIDTH * x, top, BARWIDTH, 5)
# pygame.draw.rect(surface, pygame.Color(255,255,255), bar)
pygame.draw.rect(surface, random_color(), bar)
return surface
def stat(self):
print()
print("n =", self.cnt)
for i, v in enumerate(self.values):
print(f"{i} - {v:10d} = {v/self.cnt:.3f}")
dpc = perf_counter() - self.pc0
print(f"elapsed ~ {dpc:.1f} s")
class Timer:
def __init__(self):
self.timers = defaultdict(float)
self.counters = defaultdict(int)
def log(self, k, dt):
self.timers[k] += dt
self.counters[k] += 1
# if self.counters[k] == 1:
# print(f"{k} .. {dt}")
def stat(self):
for k in self.timers:
print(f"{k} - {self.timers[k]:.3f} ({self.counters[k]})")
def update(graph, n) -> None:
"""
Process many values into a Graph.
:param graph: the Graph
:param n: process that number of values
"""
pc0 = perf_counter()
for i in range(0, n):
graph.count(next())
TIMER.log("calc", perf_counter() - pc0)
# print(".", end="", flush=True)
if __name__ == "__main__":
pygame.init()
DISPLAYSURF = pygame.display.set_mode((WIDTH, HEIGHT))
FPS = 1
FramePerSec = pygame.time.Clock()
INCREMENT = 1000 # iterate that man times per painting of the Graph
pc0 = perf_counter()
GRAPH = Graph()
TIMER = Timer()
done = False
cnt = 0
while not done:
try:
update(GRAPH, INCREMENT)
except StopIteration:
done = True
except KeyboardInterrupt:
done = True
cnt += 1
# print(cnt, end=" ", flush=True)
# print(graph.cnt, end=" ", flush=True) # because it's so tedious to print in a Surface
pc_paint = perf_counter()
surf = GRAPH.paint()
DISPLAYSURF.blit(surf, (0,0))
pygame.display.update()
TIMER.log("paint", perf_counter() - pc_paint)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
GRAPH.stat()
TIMER.stat()
sys.exit()
# FramePerSec.tick(FPS)
# when done
pygame.quit()
GRAPH.stat()
TIMER.stat()
sys.exit()
|
StarcoderdataPython
|
12864934
|
<reponame>zenranda/proj5-map<filename>flask_map.py
import flask
from flask import render_template
from flask import request
from flask import url_for
import json
import logging
###
# Globals
###
app = flask.Flask(__name__)
import CONFIG
###
# Pages
###
@app.route("/")
@app.route("/index")
@app.route("/map")
def index():
app.logger.debug("Main page entry")
if 'map' not in flask.session:
app.logger.debug("Sending map file")
app.logger.debug("Sending keys...")
with open('SECRETS.py') as key: #sends access token to the page
ent = "" #in theory, sensitive information
for line in key:
while ent == "":
ent = line
flask.session['confidental'] = ent
app.logger.debug("Sending loc data...")
with open('POI.txt') as points:
data = [] #reads the list of points
for line in points:
item = []
line = line.strip()
k = line.split("|")
item.append(k[0]) #puts each part of the point (name, lat, long) into a list
item.append(k[1])
item.append(k[2])
data.append(item) #adds the list with the data to another list
flask.session['points'] = data #sends that list to jinja
return flask.render_template('map.html')
@app.errorhandler(404)
def page_not_found(error):
app.logger.debug("Page not found")
flask.session['linkback'] = flask.url_for("index")
return flask.render_template('page_not_found.html'), 404
#############
#
# Set up to run from cgi-bin script, from
# gunicorn, or stand-alone.
#
app.secret_key = CONFIG.secret_key
app.debug=CONFIG.DEBUG
app.logger.setLevel(logging.DEBUG)
if __name__ == "__main__":
print("Opening for global access on port {}".format(CONFIG.PORT))
app.run(port=CONFIG.PORT, host="0.0.0.0")
|
StarcoderdataPython
|
3507162
|
from ReferenceManual import createReferenceManual
from ReferenceManual import printReferenceManual
import ColorPair_test_data as td
if __name__ == '__main__':
td.test_functionalities()
print('Reference Manual')
printReferenceManual(createReferenceManual())
print('Done :)')
|
StarcoderdataPython
|
388572
|
<reponame>shingarov/cle
"""
CLE is an extensible binary loader. Its main goal is to take an executable program and any libraries it depends on and
produce an address space where that program is loaded and ready to run.
The primary interface to CLE is the Loader class.
"""
__version__ = (8, 20, 1, 7)
if bytes is str:
raise Exception("This module is designed for python 3 only. Please install an older version to use python 2.")
import logging
logging.getLogger(name=__name__).addHandler(logging.NullHandler())
# pylint: disable=wildcard-import
from . import utils
from .loader import *
from .memory import *
from .errors import *
from .backends import *
from .backends.tls import *
from .backends.externs import *
from .patched_stream import *
from .gdb import *
|
StarcoderdataPython
|
8017080
|
'''
Bet365 D_ Token Fetcher
Author: @ElJaviLuki
'''
import subprocess
import re
PATH = './deobfuscator365/'
def fetch_D_token(bootjs_code: str):
filename = PATH + 'd_fetcher.js'
file = open(filename, "w")
file.write(
"""try {
const jsdom = require("jsdom");
const { JSDOM } = jsdom;
const { window: bootjs } = new JSDOM('', {
url: "https://www.bet365.com",
runScripts: "outside-only"
});
"""
# Fetch D_ token
"""
function fetchHandshakeD_token(bootjsCode){
bootjs.eval(bootjsCode);
var fetchComposedToken = function(window) {
finalQ = '';
var tags = [
"Anchor",
"Audio",
"Body",
"Button",
"Canvas",
"Div",
"Form",
"Heading",
"IFrame",
"Image",
"Input",
"Label",
"Link",
"Media",
"Option",
"Paragraph",
"Pre",
"Select",
"Span",
"Style",
"TableCell",
"TableCol",
"Table",
"TableRow",
"TableSection",
"TextArea",
"Video"
];
for (var index = 0; index < tags.length; index++) {
var thisTag = tags[index]
, winElement = window["HTML" + thisTag + "Element"]
, theCaught = winElement.prototype.Caught;
if (!theCaught)
continue;
var ownPropNames = Object.getOwnPropertyNames(Object.getPrototypeOf(theCaught));
for (var propIndex in ownPropNames) {
var thisPropName = ownPropNames[propIndex]
, protoFromThisCaught = Object.getPrototypeOf(theCaught[thisPropName])
, subOPNs = Object.getOwnPropertyNames(protoFromThisCaught);
for (var subOPNindex in subOPNs) {
var thisSubOPN = subOPNs[subOPNindex];
if (thisSubOPN in Object)
continue;
if (protoFromThisCaught[thisSubOPN] && protoFromThisCaught[thisSubOPN]()) {
var composedToken = protoFromThisCaught[thisSubOPN]();
finalQ = composedToken[0],
initialN = composedToken[1];
break;
}
}
if (finalQ)
break;
}
delete winElement.prototype.Caught;
if (finalQ)
break;
}
return finalQ;
}
var transformNtoken = function(bootjs, URIComponent) {
decURI = decodeURIComponent(URIComponent);
var b64decodedSc = -bootjs.atob(initialN) % 0x40;
finalN = '';
for (var chIndex = 0; chIndex < decURI.length; chIndex++) {
var charCode = decURI.charCodeAt(chIndex);
var fromChar = String.fromCharCode((charCode+b64decodedSc) % 0x100);
finalN += fromChar;
}
return finalN;
}
var initialN = ''
var finalQ = fetchComposedToken(bootjs);
var HandJ = finalQ.split('.');
"""
# NOTE: N Token can be also fetched by Base-64 encoding the evaluated value in the expression
# 'ns_weblib_util.WebsiteConfig.SERVER_TIME+300'
"""
var finalN = transformNtoken(bootjs, HandJ[0]);
var finalJ = HandJ[1];
var D_token = [finalN, '.', finalJ].join('');
return D_token;
}
"""
# String.raw`···`: avoids string escaping problems.
"""
d_token = fetchHandshakeD_token(String.raw
`
"""
# Cancel some unneeded built-in functions that cause bugs during evaluation.
# Note: if you happen to find another bug that can be fixed by modifying a dependency, override
# that dependency here:
"""
overrideFunction = function() {}
XMLHttpRequest.prototype.open = overrideFunction;
XMLHttpRequest.prototype.send = overrideFunction;
""" + str(bootjs_code) + """
`
)
console.log("<D_TOKEN>" + d_token + "</D_TOKEN>");
} catch (error) {
console.error(error);
} finally {
process.exit();
}"""
)
file.close()
result = subprocess.run(
['node', filename],
text=True,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
if result.returncode == 0:
delimitedToken = re.search("<D_TOKEN>(.*?)</D_TOKEN>", result.stdout).group()
D_token = delimitedToken[len("<D_TOKEN>"):-len("</D_TOKEN>")]
else:
raise Exception('Error during token generation script evaluation or execution: ' + result.stderr)
return D_token
|
StarcoderdataPython
|
12828890
|
from django.contrib.auth.models import (
AbstractBaseUser,
BaseUserManager,
PermissionsMixin,
)
from django.db import models
from django.utils import timezone
class AdvancedUserManager(BaseUserManager):
def create_user(self, email, username, password=<PASSWORD>, **extra_fields):
if not email:
raise ValueError("User must have an email")
email = self.normalize_email(email)
user = self.model(email=email, username=username, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, username, email, password=<PASSWORD>, **extra_fields):
user = self.create_user(
username, email, password=password, **extra_fields
)
user.is_active = True
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class AdvancedUser(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(
max_length=255,
unique=True,
blank=False,
verbose_name="Электронная почта",
)
username = models.CharField(
max_length=255, unique=True, verbose_name="Имя пользователя"
)
first_name = models.CharField(max_length=255, verbose_name="Имя")
last_name = models.CharField(max_length=255, verbose_name="Фамилия")
is_active = models.BooleanField(
default=True, verbose_name="Статус активности"
)
is_staff = models.BooleanField(
default=False, verbose_name="Статус администратора"
)
is_superuser = models.BooleanField(
default=False, verbose_name="Статус суперпользователя"
)
date_joined = models.DateTimeField(
default=timezone.now, verbose_name="Дата регистрации"
)
last_login = models.DateTimeField(
null=True, verbose_name="Последнее посещение"
)
objects = AdvancedUserManager()
USERNAME_FIELD = "email"
REQUIRED_FIELDS = ["username", "first_name", "last_name"]
class Meta:
verbose_name = "Пользователь"
verbose_name_plural = "Пользователи"
def get_full_name(self):
return f"{self.first_name} - {self.last_name}"
def get_short_name(self):
return self.username
def has_perm(self, perm, obj=None):
return True
def has_module_perms(self, app_label):
return True
def __str__(self):
return self.email
|
StarcoderdataPython
|
3229185
|
import os
import duckdb
import pandas as pd
from dagster import Field, check, io_manager
from dagster.seven.temp_dir import get_system_temp_directory
from .parquet_io_manager import PartitionedParquetIOManager
class DuckDBPartitionedParquetIOManager(PartitionedParquetIOManager):
"""Stores data in parquet files and creates duckdb views over those files."""
def handle_output(self, context, obj):
if obj is not None: # if this is a dbt output, then the value will be None
yield from super().handle_output(context, obj)
con = self._connect_duckdb(context)
path = self._get_path(context)
if context.has_asset_partitions:
to_scan = os.path.join(os.path.dirname(path), "*.pq", "*.parquet")
else:
to_scan = path
con.execute("create schema if not exists hackernews;")
con.execute(
f"create or replace view {self._table_path(context)} as "
f"select * from parquet_scan('{to_scan}');"
)
def load_input(self, context):
check.invariant(not context.has_asset_partitions, "Can't load partitioned inputs")
if context.dagster_type.typing_type == pd.DataFrame:
con = self._connect_duckdb(context)
return con.execute(f"SELECT * FROM {self._table_path(context)}").fetchdf()
check.failed(
f"Inputs of type {context.dagster_type} not supported. Please specify a valid type "
"for this input either on the argument of the @asset-decorated function."
)
def _table_path(self, context):
return f"hackernews.{context.asset_key.path[-1]}"
def _connect_duckdb(self, context):
return duckdb.connect(database=context.resource_config["duckdb_path"], read_only=False)
@io_manager(
config_schema={"base_path": Field(str, is_required=False), "duckdb_path": str},
required_resource_keys={"pyspark"},
)
def duckdb_partitioned_parquet_io_manager(init_context):
return DuckDBPartitionedParquetIOManager(
base_path=init_context.resource_config.get("base_path", get_system_temp_directory())
)
|
StarcoderdataPython
|
381440
|
<gh_stars>0
from data import *
import numpy as np
import matplotlib.pyplot as plt
from sklearn.learning_curve import learning_curve
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
pipe_lr = Pipeline([
('scl', StandardScaler()),
('clf', LogisticRegression(penalty='l2', random_state=0))
])
train_sizes, train_scores, test_scores = \
learning_curve(
estimator=pipe_lr,
X = X_train,
y = y_train,
train_sizes=np.linspace(0.1, 1.0, 10),
cv = 10,
n_jobs=1
)
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
plt.plot(train_sizes, train_mean, color='blue', marker='o',
markersize=5, label='training accuracy')
plt.fill_between(train_sizes,
train_mean + train_std,
train_mean - train_std,
alpha=0.15, color='blue')
plt.plot(train_sizes, test_mean,
color='green', linestyle='--',
marker='s', markersize=5, label='validation accuracy')
plt.fill_between(train_sizes,
test_mean + test_std,
test_mean - test_std,
alpha=0.15, color='green')
plt.grid()
plt.xlabel('Number of training samples')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.ylim([0.8, 1.0])
plt.title('learning curve')
plt.show()
from sklearn.learning_curve import validation_curve
param_range = [0.001, 0.01, 0.1, 1.0, 10.0, 100.0]
train_scores, test_scores = validation_curve(
estimator=pipe_lr,
X = X_train,
y = y_train,
param_name='clf__C',
param_range = param_range,
cv = 10
)
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
plt.plot(param_range, train_mean, color='blue',
marker='o', markersize=5, label='training accuracy')
plt.fill_between(
param_range,
train_mean + train_std,
train_mean - train_std,
alpha=0.15, color = 'blue'
)
plt.plot(
param_range, test_mean, color='green', linestyle='--',
marker='s', markersize=5, label='validation accuracy'
)
plt.fill_between(
param_range,
test_mean + test_std,
test_mean - test_std,
alpha=0.15, color='green'
)
plt.grid()
plt.xscale('log')
plt.title('validation curve')
plt.legend(loc='lower right')
plt.xlabel('Parameter C')
plt.ylabel('Accuracy')
plt.ylim([0.8, 1.0])
plt.show()
|
StarcoderdataPython
|
170567
|
import numpy as np
class RolloutWorker:
def __init__(self, env, policy, cfg, env_params, language_conditioned=False):
self.env = env
self.policy = policy
self.cfg = cfg
self.env_params = env_params
self.language_conditioned = language_conditioned
self.timestep_counter = 0
def generate_rollout(self, train_mode=False, animated=False):
episodes = []
for _ in range(self.cfg.num_rollouts_per_mpi):
ep_obs, ep_actions, ep_success, ep_rewards = [], [], [], []
observation = self.env.reset()
obs = observation['observation']
if self.language_conditioned:
instruction = observation['instruction']
ep_instructions, ep_hinsight_instruction = [], []
else:
ag = observation['achieved_goal']
g = observation['desired_goal']
ep_ag, ep_g = [], []
for _ in range(self.env_params['max_timesteps']):
if self.language_conditioned:
action = self.policy.act(obs.copy(), instruction.copy(), train_mode)
else:
action = self.policy.act(obs.copy(), ag.copy(), g.copy(), train_mode)
if animated:
self.env.render()
observation_new, reward, _, info = self.env.step(action)
self.timestep_counter += 1
obs_new = observation_new['observation']
if self.language_conditioned:
instruction_new = observation_new['instruction']
hindsight_instr = info['hindsight_instruction'] if 'hindsight_instruction' in info.keys(
) else np.zeros_like(instruction_new)
else:
ag_new = observation_new['achieved_goal']
ep_obs.append(obs.copy())
ep_actions.append(action.copy())
ep_rewards.append([reward])
if self.language_conditioned:
ep_instructions.append(instruction.copy())
ep_hinsight_instruction.append(hindsight_instr.copy())
else:
ep_ag.append(ag.copy())
ep_g.append(g.copy())
obs = obs_new
if self.language_conditioned:
instruction = instruction_new
else:
ag = ag_new
ep_success.append(info['is_success'])
ep_obs.append(obs.copy())
if not self.language_conditioned:
ep_ag.append(ag.copy())
episode_data = dict(obs=np.array(ep_obs).copy(),
action=np.array(ep_actions).copy(),
reward=np.array(ep_rewards).copy(),
success=np.array(ep_success).copy(),
timesteps=self.timestep_counter)
if self.language_conditioned:
episode_data['instruction'] = np.array(ep_instructions).copy()
episode_data['hindsight_instruction'] = np.array(ep_hinsight_instruction).copy()
else:
episode_data['g'] = np.array(ep_g).copy()
episode_data['ag'] = np.array(ep_ag).copy()
episodes.append(episode_data)
return episodes
def generate_test_rollout(self, animated=False):
rollout_data = []
for _ in range(self.cfg.n_test_rollouts):
rollout = self.generate_rollout(train_mode=False, animated=animated)
rollout_data.append(rollout)
# only take the last step to calculate success rate
success_rate = np.mean([_rd['success'][-1] for rd in rollout_data for _rd in rd])
rewards = np.sum([_rd['reward'] for rd in rollout_data for _rd in rd], 1).mean()
return success_rate, rewards
|
StarcoderdataPython
|
8063075
|
<reponame>atpaino/stocktradinganalysis<gh_stars>1-10
#Contains functions that compute a statistic for a single HistoricData item from time
#offset+n through time offset.
import scipy.stats as sts
def variation_wrapper(hd, n=20, offset=0):
"""
Calculates the variation on the closing price of hd from offset:offset+n
"""
return sts.variation(hd.close[offset:offset+n])
def gain(hd, n=20, offset=0):
"""
Calculates the gain over offset:offset+n in hd's closing prices
"""
return ((hd.close[offset] - hd.close[offset+n]) / hd.close[offset+n])
def gain_vs_avg(hd, n=20, offset=0):
"""
Calculates the gain of the closing price at offset vs the moving avg.
"""
return ((hd.close[offset] - sma(hd, offset=offset)) / sma(hd, offset=offset))
def sma(hd, time_period=90, offset=0):
"""
Returns the simple moving average for the stock over the specified period of time.
Note: time_period is used instead of n since typically the time period here being
used is greater than n.
"""
if len(hd.close) >= offset+time_period:
return sts.tmean(hd.close[offset:offset+time_period])
else:
return sts.tmean(hd.close[offset:])
|
StarcoderdataPython
|
4826152
|
<filename>test/optimization/test_slsqp.py
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test SLSQP Optimizer """
import unittest
from test.optimization.optimization_test_case import QiskitOptimizationTestCase
import numpy as np
from qiskit.optimization import INFINITY
from qiskit.optimization.algorithms import SlsqpOptimizer
from qiskit.optimization.problems import QuadraticProgram
class TestSlsqpOptimizer(QiskitOptimizationTestCase):
"""SLSQP Optimizer Tests. """
def test_slsqp_optimizer(self):
""" Generic SLSQP Optimizer Test. """
problem = QuadraticProgram()
problem.continuous_var(upperbound=4)
problem.continuous_var(upperbound=4)
problem.linear_constraint(linear=[1, 1], sense='=', rhs=2)
problem.minimize(linear=[2, 2], quadratic=[[2, 0.25], [0.25, 0.5]])
# solve problem with SLSQP
slsqp = SlsqpOptimizer(trials=3)
result = slsqp.solve(problem)
self.assertAlmostEqual(result.fval, 5.8750)
def test_slsqp_optimizer_full_output(self):
""" Generic SLSQP Optimizer Test. """
problem = QuadraticProgram()
problem.continuous_var(upperbound=4)
problem.continuous_var(upperbound=4)
problem.linear_constraint(linear=[1, 1], sense='=', rhs=2)
problem.minimize(linear=[2, 2], quadratic=[[2, 0.25], [0.25, 0.5]])
# solve problem with SLSQP
slsqp = SlsqpOptimizer(trials=3, full_output=True)
result = slsqp.solve(problem)
self.assertAlmostEqual(result.fval, 5.8750)
self.assertAlmostEqual(result.fx, 5.8750)
self.assertGreaterEqual(result.its, 1)
self.assertEqual(result.imode, 0)
self.assertIsNotNone(result.smode)
def test_slsqp_unbounded(self):
"""Unbounded test for optimization"""
problem = QuadraticProgram()
problem.continuous_var(name="x")
problem.continuous_var(name="y")
problem.maximize(linear=[2, 0], quadratic=[[-1, 2], [0, -2]])
slsqp = SlsqpOptimizer()
solution = slsqp.solve(problem)
self.assertIsNotNone(solution)
self.assertIsNotNone(solution.x)
np.testing.assert_almost_equal([2., 1.], solution.x, 3)
self.assertIsNotNone(solution.fval)
np.testing.assert_almost_equal(2., solution.fval, 3)
def test_slsqp_unbounded_with_trials(self):
"""Unbounded test for optimization"""
problem = QuadraticProgram()
problem.continuous_var(name="x", lowerbound=-INFINITY, upperbound=INFINITY)
problem.continuous_var(name="y", lowerbound=-INFINITY, upperbound=INFINITY)
problem.maximize(linear=[2, 0], quadratic=[[-1, 2], [0, -2]])
slsqp = SlsqpOptimizer(trials=3)
solution = slsqp.solve(problem)
self.assertIsNotNone(solution)
self.assertIsNotNone(solution.x)
np.testing.assert_almost_equal([2., 1.], solution.x, 3)
self.assertIsNotNone(solution.fval)
np.testing.assert_almost_equal(2., solution.fval, 3)
def test_slsqp_bounded(self):
"""Same as above, but a bounded test"""
problem = QuadraticProgram()
problem.continuous_var(name="x", lowerbound=2.5)
problem.continuous_var(name="y", upperbound=0.5)
problem.maximize(linear=[2, 0], quadratic=[[-1, 2], [0, -2]])
slsqp = SlsqpOptimizer()
solution = slsqp.solve(problem)
self.assertIsNotNone(solution)
self.assertIsNotNone(solution.x)
np.testing.assert_almost_equal([2.5, 0.5], solution.x, 3)
self.assertIsNotNone(solution.fval)
np.testing.assert_almost_equal(0.75, solution.fval, 3)
def test_slsqp_equality(self):
"""A test with equality constraint"""
problem = QuadraticProgram()
problem.continuous_var(name="x")
problem.continuous_var(name="y")
problem.linear_constraint(linear=[1, -1], sense='=', rhs=0)
problem.maximize(linear=[2, 0], quadratic=[[-1, 2], [0, -2]])
slsqp = SlsqpOptimizer()
solution = slsqp.solve(problem)
self.assertIsNotNone(solution)
self.assertIsNotNone(solution.x)
np.testing.assert_almost_equal([1., 1.], solution.x, 3)
self.assertIsNotNone(solution.fval)
np.testing.assert_almost_equal(1., solution.fval, 3)
def test_slsqp_inequality(self):
"""A test with inequality constraint"""
problem = QuadraticProgram()
problem.continuous_var(name="x")
problem.continuous_var(name="y")
problem.linear_constraint(linear=[1, -1], sense='>=', rhs=1)
problem.maximize(linear=[2, 0], quadratic=[[-1, 2], [0, -2]])
slsqp = SlsqpOptimizer()
solution = slsqp.solve(problem)
self.assertIsNotNone(solution)
self.assertIsNotNone(solution.x)
np.testing.assert_almost_equal([2., 1.], solution.x, 3)
self.assertIsNotNone(solution.fval)
np.testing.assert_almost_equal(2., solution.fval, 3)
def test_slsqp_optimizer_with_quadratic_constraint(self):
"""A test with equality constraint"""
problem = QuadraticProgram()
problem.continuous_var(upperbound=1)
problem.continuous_var(upperbound=1)
problem.minimize(linear=[1, 1])
linear = [-1, -1]
quadratic = [[1, 0], [0, 1]]
problem.quadratic_constraint(linear=linear, quadratic=quadratic, rhs=-1/2)
slsqp = SlsqpOptimizer()
solution = slsqp.solve(problem)
self.assertIsNotNone(solution)
self.assertIsNotNone(solution.x)
np.testing.assert_almost_equal([0.5, 0.5], solution.x, 3)
self.assertIsNotNone(solution.fval)
np.testing.assert_almost_equal(1., solution.fval, 3)
def test_multistart_properties(self):
"""
Tests properties of MultiStartOptimizer.
Since it is an abstract class, the test is here.
"""
trials = 5
clip = 200.
slsqp = SlsqpOptimizer(trials=trials, clip=clip)
self.assertEqual(trials, slsqp.trials)
self.assertAlmostEqual(clip, slsqp.clip)
trials = 6
clip = 300.
slsqp.trials = trials
slsqp.clip = clip
self.assertEqual(trials, slsqp.trials)
self.assertAlmostEqual(clip, slsqp.clip)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
4926745
|
<filename>qdef2d/defects/calc_Eform_corr.py
import os
import argparse
import pandas as pd
def calc(dir_def,xlfile):
"""
Evaluate corrected defect formation energy.
dir_def (str): path to the defect directory containing the excel file
xlfile (str): excel filename to read/save the dataframe from/to
"""
## load list of dataframes from sheets from excel file
df = pd.read_excel(os.path.join(dir_def,xlfile),sheet_name=None)
for q in [qi for qi in df.keys() if qi != 'charge_0']:
## Finally, we can compute the corrected defect formation energy:
## Eform = Eform_uncorr + E_correction
df[q]["E_form_corr"] = df[q].loc[:,'E_form_uncorr'] + df[q].loc[:,'E_corr']
## write the updated excel file
writer = pd.ExcelWriter(os.path.join(dir_def,xlfile))
for q in df.keys():
df[q].to_excel(writer, q, index=False)
writer.save()
if __name__ == '__main__':
## this script can also be run directly from the command line
parser = argparse.ArgumentParser(description='Evaluate corrected defect formation energy.')
parser.add_argument('dir_def',help='path to the defect directory containing the excel file')
parser.add_argument('xlfile',help='excel filename to read/save the dataframe from/to')
## read in the above arguments from command line
args = parser.parse_args()
calc(args.dir_def, args.xlfile)
|
StarcoderdataPython
|
5024110
|
<filename>delfin/drivers/ibm/storwize_svc/ssh_handler.py
# Copyright 2020 The SODA Authors.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import time
from itertools import islice
import paramiko
import six
from oslo_log import log as logging
from oslo_utils import units
from delfin import exception, utils
from delfin.common import constants, alert_util
from delfin.drivers.ibm.storwize_svc import consts
from delfin.drivers.utils.ssh_client import SSHPool
from delfin.drivers.utils.tools import Tools
LOG = logging.getLogger(__name__)
class SSHHandler(object):
OID_ERR_ID = '1.3.6.1.4.192.168.127.12.4.3'
OID_SEQ_NUMBER = '1.3.6.1.4.192.168.127.12.4.9'
OID_LAST_TIME = '1.3.6.1.4.1.2.6.190.4.10'
OID_OBJ_TYPE = '1.3.6.1.4.1.2.6.190.4.11'
OID_OBJ_NAME = '1.3.6.1.4.192.168.127.12.4.17'
OID_SEVERITY = '1.3.6.1.6.3.1.1.4.1.0'
TRAP_SEVERITY_MAP = {
'1.3.6.1.4.1.2.6.190.1': constants.Severity.CRITICAL,
'1.3.6.1.4.192.168.127.12.2': constants.Severity.WARNING,
'1.3.6.1.4.192.168.127.12.3': constants.Severity.INFORMATIONAL,
}
SEVERITY_MAP = {"warning": "Warning",
"informational": "Informational",
"error": "Major"
}
CONTRL_STATUS_MAP = {"online": constants.ControllerStatus.NORMAL,
"offline": constants.ControllerStatus.OFFLINE,
"service": constants.ControllerStatus.NORMAL,
"flushing": constants.ControllerStatus.UNKNOWN,
"pending": constants.ControllerStatus.UNKNOWN,
"adding": constants.ControllerStatus.UNKNOWN,
"deleting": constants.ControllerStatus.UNKNOWN
}
DISK_PHYSICAL_TYPE = {
'fc': constants.DiskPhysicalType.FC,
'sas_direct': constants.DiskPhysicalType.SAS
}
VOLUME_PERF_METRICS = {
'readIops': 'ro',
'writeIops': 'wo',
'readThroughput': 'rb',
'writeThroughput': 'wb',
'readIoSize': 'rb',
'writeIoSize': 'wb',
'responseTime': 'res_time',
'throughput': 'tb',
'iops': 'to',
'ioSize': 'tb',
'cacheHitRatio': 'hrt',
'readCacheHitRatio': 'rhr',
'writeCacheHitRatio': 'whr'
}
DISK_PERF_METRICS = {
'readIops': 'ro',
'writeIops': 'wo',
'readThroughput': 'rb',
'writeThroughput': 'wb',
'responseTime': 'res_time',
'throughput': 'tb',
'iops': 'to'
}
CONTROLLER_PERF_METRICS = {
'readIops': 'ro',
'writeIops': 'wo',
'readThroughput': 'rb',
'writeThroughput': 'wb',
'responseTime': 'res_time',
'throughput': 'tb',
'iops': 'to'
}
PORT_PERF_METRICS = {
'readIops': 'ro',
'writeIops': 'wo',
'readThroughput': 'rb',
'writeThroughput': 'wb',
'throughput': 'tb',
'responseTime': 'res_time',
'iops': 'to'
}
TARGET_RESOURCE_RELATION = {
constants.ResourceType.DISK: 'mdsk',
constants.ResourceType.VOLUME: 'vdsk',
constants.ResourceType.PORT: 'port',
constants.ResourceType.CONTROLLER: 'node'
}
RESOURCE_PERF_MAP = {
constants.ResourceType.DISK: DISK_PERF_METRICS,
constants.ResourceType.VOLUME: VOLUME_PERF_METRICS,
constants.ResourceType.PORT: PORT_PERF_METRICS,
constants.ResourceType.CONTROLLER: CONTROLLER_PERF_METRICS
}
SECONDS_TO_MS = 1000
ALERT_NOT_FOUND_CODE = 'CMMVC8275E'
BLOCK_SIZE = 512
BYTES_TO_BIT = 8
def __init__(self, **kwargs):
self.ssh_pool = SSHPool(**kwargs)
@staticmethod
def handle_split(split_str, split_char, arr_number):
split_value = ''
if split_str is not None and split_str != '':
tmp_value = split_str.split(split_char, 1)
if arr_number == 1 and len(tmp_value) > 1:
split_value = tmp_value[arr_number].strip()
elif arr_number == 0:
split_value = tmp_value[arr_number].strip()
return split_value
@staticmethod
def parse_alert(alert):
try:
alert_model = dict()
alert_name = SSHHandler.handle_split(alert.get(
SSHHandler.OID_ERR_ID), ':', 1)
error_info = SSHHandler.handle_split(alert.get(
SSHHandler.OID_ERR_ID), ':', 0)
alert_id = SSHHandler.handle_split(error_info, '=', 1)
severity = SSHHandler.TRAP_SEVERITY_MAP.get(
alert.get(SSHHandler.OID_SEVERITY),
constants.Severity.INFORMATIONAL
)
alert_model['alert_id'] = str(alert_id)
alert_model['alert_name'] = alert_name
alert_model['severity'] = severity
alert_model['category'] = constants.Category.FAULT
alert_model['type'] = constants.EventType.EQUIPMENT_ALARM
alert_model['sequence_number'] = SSHHandler. \
handle_split(alert.get(SSHHandler.OID_SEQ_NUMBER), '=', 1)
timestamp = SSHHandler. \
handle_split(alert.get(SSHHandler.OID_LAST_TIME), '=', 1)
time_type = '%a %b %d %H:%M:%S %Y'
occur_time = int(time.mktime(time.strptime(
timestamp,
time_type)))
alert_model['occur_time'] = int(occur_time * SSHHandler.
SECONDS_TO_MS)
alert_model['description'] = alert_name
alert_model['resource_type'] = SSHHandler.handle_split(
alert.get(SSHHandler.OID_OBJ_TYPE), '=', 1)
alert_model['location'] = SSHHandler.handle_split(alert.get(
SSHHandler.OID_OBJ_NAME), '=', 1)
return alert_model
except Exception as e:
LOG.error(e)
msg = ("Failed to build alert model as some attributes missing "
"in alert message:%s.") % (six.text_type(e))
raise exception.InvalidResults(msg)
def login(self):
try:
with self.ssh_pool.item() as ssh:
result = SSHHandler.do_exec('lssystem', ssh)
if 'is not a recognized command' in result:
raise exception.InvalidIpOrPort()
except Exception as e:
LOG.error("Failed to login ibm storwize_svc %s" %
(six.text_type(e)))
raise e
@staticmethod
def do_exec(command_str, ssh):
"""Execute command"""
try:
utils.check_ssh_injection(command_str.split())
if command_str is not None and ssh is not None:
stdin, stdout, stderr = ssh.exec_command(command_str)
res, err = stdout.read(), stderr.read()
re = res if res else err
result = re.decode()
except paramiko.AuthenticationException as ae:
LOG.error('doexec Authentication error:{}'.format(ae))
raise exception.InvalidUsernameOrPassword()
except Exception as e:
err = six.text_type(e)
LOG.error('doexec InvalidUsernameOrPassword error')
if 'timed out' in err:
raise exception.SSHConnectTimeout()
elif 'No authentication methods available' in err \
or 'Authentication failed' in err:
raise exception.InvalidUsernameOrPassword()
elif 'not a valid RSA private key file' in err:
raise exception.InvalidPrivateKey()
else:
raise exception.SSHException(err)
return result
def exec_ssh_command(self, command):
try:
with self.ssh_pool.item() as ssh:
ssh_info = SSHHandler.do_exec(command, ssh)
return ssh_info
except Exception as e:
msg = "Failed to ssh ibm storwize_svc %s: %s" % \
(command, six.text_type(e))
raise exception.SSHException(msg)
def change_capacity_to_bytes(self, unit):
unit = unit.upper()
if unit == 'TB':
result = units.Ti
elif unit == 'GB':
result = units.Gi
elif unit == 'MB':
result = units.Mi
elif unit == 'KB':
result = units.Ki
else:
result = 1
return int(result)
def parse_string(self, value):
capacity = 0
if value:
if value.isdigit():
capacity = float(value)
else:
unit = value[-2:]
capacity = float(value[:-2]) * int(
self.change_capacity_to_bytes(unit))
return capacity
def get_storage(self):
try:
system_info = self.exec_ssh_command('lssystem')
storage_map = {}
self.handle_detail(system_info, storage_map, split=' ')
serial_number = storage_map.get('id')
status = 'normal' if storage_map.get('statistics_status') == 'on' \
else 'offline'
location = storage_map.get('location')
free_capacity = self.parse_string(storage_map.get(
'total_free_space'))
used_capacity = self.parse_string(storage_map.get(
'total_used_capacity'))
raw_capacity = self.parse_string(storage_map.get(
'total_mdisk_capacity'))
subscribed_capacity = self.parse_string(storage_map.get(
'virtual_capacity'))
firmware_version = ''
if storage_map.get('code_level') is not None:
firmware_version = storage_map.get('code_level').split(' ')[0]
s = {
'name': storage_map.get('name'),
'vendor': 'IBM',
'model': storage_map.get('product_name'),
'status': status,
'serial_number': serial_number,
'firmware_version': firmware_version,
'location': location,
'total_capacity': int(free_capacity + used_capacity),
'raw_capacity': int(raw_capacity),
'subscribed_capacity': int(subscribed_capacity),
'used_capacity': int(used_capacity),
'free_capacity': int(free_capacity)
}
return s
except exception.DelfinException as e:
err_msg = "Failed to get storage: %s" % (six.text_type(e.msg))
LOG.error(err_msg)
raise e
except Exception as err:
err_msg = "Failed to get storage: %s" % (six.text_type(err))
LOG.error(err_msg)
raise exception.InvalidResults(err_msg)
def handle_detail(self, deltail_info, detail_map, split):
detail_arr = deltail_info.split('\n')
for detail in detail_arr:
if detail is not None and detail != '':
strinfo = detail.split(split, 1)
key = strinfo[0]
value = ''
if len(strinfo) > 1:
value = strinfo[1]
detail_map[key] = value
def list_storage_pools(self, storage_id):
try:
pool_list = []
pool_info = self.exec_ssh_command('lsmdiskgrp')
pool_res = pool_info.split('\n')
for i in range(1, len(pool_res)):
if pool_res[i] is None or pool_res[i] == '':
continue
pool_str = ' '.join(pool_res[i].split())
strinfo = pool_str.split(' ')
detail_command = 'lsmdiskgrp %s' % strinfo[0]
deltail_info = self.exec_ssh_command(detail_command)
pool_map = {}
self.handle_detail(deltail_info, pool_map, split=' ')
status = 'normal' if pool_map.get('status') == 'online' \
else 'offline'
total_cap = self.parse_string(pool_map.get('capacity'))
free_cap = self.parse_string(pool_map.get('free_capacity'))
used_cap = self.parse_string(pool_map.get('used_capacity'))
subscribed_capacity = self.parse_string(pool_map.get(
'virtual_capacity'))
p = {
'name': pool_map.get('name'),
'storage_id': storage_id,
'native_storage_pool_id': pool_map.get('id'),
'description': '',
'status': status,
'storage_type': constants.StorageType.BLOCK,
'subscribed_capacity': int(subscribed_capacity),
'total_capacity': int(total_cap),
'used_capacity': int(used_cap),
'free_capacity': int(free_cap)
}
pool_list.append(p)
return pool_list
except exception.DelfinException as e:
err_msg = "Failed to get storage pool: %s" % (six.text_type(e))
LOG.error(err_msg)
raise e
except Exception as err:
err_msg = "Failed to get storage pool: %s" % (six.text_type(err))
LOG.error(err_msg)
raise exception.InvalidResults(err_msg)
def list_volumes(self, storage_id):
try:
volume_list = []
volume_info = self.exec_ssh_command('lsvdisk')
volume_res = volume_info.split('\n')
for i in range(1, len(volume_res)):
if volume_res[i] is None or volume_res[i] == '':
continue
volume_str = ' '.join(volume_res[i].split())
strinfo = volume_str.split(' ')
volume_id = strinfo[0]
detail_command = 'lsvdisk -delim : %s' % volume_id
deltail_info = self.exec_ssh_command(detail_command)
volume_map = {}
self.handle_detail(deltail_info, volume_map, split=':')
status = 'normal' if volume_map.get('status') == 'online' \
else 'offline'
volume_type = 'thin' if volume_map.get('se_copy') == 'yes' \
else 'thick'
total_capacity = self.parse_string(volume_map.get('capacity'))
free_capacity = self.parse_string(volume_map.
get('free_capacity'))
used_capacity = self.parse_string(volume_map.
get('used_capacity'))
compressed = True
deduplicated = True
if volume_map.get('compressed_copy') == 'no':
compressed = False
if volume_map.get('deduplicated_copy') == 'no':
deduplicated = False
v = {
'name': volume_map.get('name'),
'storage_id': storage_id,
'description': '',
'status': status,
'native_volume_id': str(volume_map.get('id')),
'native_storage_pool_id': volume_map.get('mdisk_grp_id'),
'wwn': str(volume_map.get('vdisk_UID')),
'type': volume_type,
'total_capacity': int(total_capacity),
'used_capacity': int(used_capacity),
'free_capacity': int(free_capacity),
'compressed': compressed,
'deduplicated': deduplicated
}
volume_list.append(v)
return volume_list
except exception.DelfinException as e:
err_msg = "Failed to get storage volume: %s" % (six.text_type(e))
LOG.error(err_msg)
raise e
except Exception as err:
err_msg = "Failed to get storage volume: %s" % (six.text_type(err))
LOG.error(err_msg)
raise exception.InvalidResults(err_msg)
def list_alerts(self, query_para):
try:
alert_list = []
alert_info = self.exec_ssh_command('lseventlog -monitoring yes '
'-message no')
alert_res = alert_info.split('\n')
for i in range(1, len(alert_res)):
if alert_res[i] is None or alert_res[i] == '':
continue
alert_str = ' '.join(alert_res[i].split())
strinfo = alert_str.split(' ', 1)
detail_command = 'lseventlog %s' % strinfo[0]
deltail_info = self.exec_ssh_command(detail_command)
alert_map = {}
self.handle_detail(deltail_info, alert_map, split=' ')
occur_time = int(alert_map.get('last_timestamp_epoch')) * \
self.SECONDS_TO_MS
if not alert_util.is_alert_in_time_range(query_para,
occur_time):
continue
alert_name = alert_map.get('event_id_text', '')
event_id = alert_map.get('event_id')
location = alert_map.get('object_name', '')
resource_type = alert_map.get('object_type', '')
severity = self.SEVERITY_MAP.get(alert_map.
get('notification_type'))
if severity == 'Informational' or severity is None:
continue
alert_model = {
'alert_id': event_id,
'alert_name': alert_name,
'severity': severity,
'category': constants.Category.FAULT,
'type': 'EquipmentAlarm',
'sequence_number': alert_map.get('sequence_number'),
'occur_time': occur_time,
'description': alert_name,
'resource_type': resource_type,
'location': location
}
alert_list.append(alert_model)
return alert_list
except exception.DelfinException as e:
err_msg = "Failed to get storage alert: %s" % (six.text_type(e))
LOG.error(err_msg)
raise e
except Exception as err:
err_msg = "Failed to get storage alert: %s" % (six.text_type(err))
LOG.error(err_msg)
raise exception.InvalidResults(err_msg)
def fix_alert(self, alert):
command_line = 'cheventlog -fix %s' % alert
result = self.exec_ssh_command(command_line)
if result:
if self.ALERT_NOT_FOUND_CODE not in result:
raise exception.InvalidResults(six.text_type(result))
LOG.warning("Alert %s doesn't exist.", alert)
def list_controllers(self, storage_id):
try:
controller_list = []
controller_cmd = 'lsnode'
control_info = self.exec_ssh_command(controller_cmd)
if 'command not found' in control_info:
controller_cmd = 'lsnodecanister'
control_info = self.exec_ssh_command(controller_cmd)
control_res = control_info.split('\n')
for i in range(1, len(control_res)):
if control_res[i] is None or control_res[i] == '':
continue
control_str = ' '.join(control_res[i].split())
str_info = control_str.split(' ')
control_id = str_info[0]
detail_command = '%s %s' % (controller_cmd, control_id)
deltail_info = self.exec_ssh_command(detail_command)
control_map = {}
self.handle_detail(deltail_info, control_map, split=' ')
status = SSHHandler.CONTRL_STATUS_MAP.get(
control_map.get('status'),
constants.ControllerStatus.UNKNOWN)
controller_result = {
'name': control_map.get('name'),
'storage_id': storage_id,
'native_controller_id': control_map.get('id'),
'status': status,
'soft_version':
control_map.get('code_level', '').split(' ')[0],
'location': control_map.get('name')
}
controller_list.append(controller_result)
return controller_list
except Exception as err:
err_msg = "Failed to get controller attributes from Storwize: %s"\
% (six.text_type(err))
LOG.error(err_msg)
raise exception.InvalidResults(err_msg)
def list_disks(self, storage_id):
try:
disk_list = []
disk_info = self.exec_ssh_command('lsmdisk')
disk_res = disk_info.split('\n')
for i in range(1, len(disk_res)):
if disk_res[i] is None or disk_res[i] == '':
continue
control_str = ' '.join(disk_res[i].split())
str_info = control_str.split(' ')
disk_id = str_info[0]
detail_command = 'lsmdisk %s' % disk_id
deltail_info = self.exec_ssh_command(detail_command)
disk_map = {}
self.handle_detail(deltail_info, disk_map, split=' ')
status = constants.DiskStatus.NORMAL
if disk_map.get('status') == 'offline':
status = constants.DiskStatus.OFFLINE
physical_type = SSHHandler.DISK_PHYSICAL_TYPE.get(
disk_map.get('fabric_type'),
constants.DiskPhysicalType.UNKNOWN)
location = '%s_%s' % (disk_map.get('controller_name'),
disk_map.get('name'))
disk_result = {
'name': disk_map.get('name'),
'storage_id': storage_id,
'native_disk_id': disk_map.get('id'),
'capacity': int(self.parse_string(
disk_map.get('capacity'))),
'status': status,
'physical_type': physical_type,
'native_disk_group_id': disk_map.get('mdisk_grp_name'),
'location': location
}
disk_list.append(disk_result)
return disk_list
except Exception as err:
err_msg = "Failed to get disk attributes from Storwize: %s" % \
(six.text_type(err))
raise exception.InvalidResults(err_msg)
def get_fc_port(self, storage_id):
port_list = []
fc_info = self.exec_ssh_command('lsportfc')
fc_res = fc_info.split('\n')
for i in range(1, len(fc_res)):
if fc_res[i] is None or fc_res[i] == '':
continue
control_str = ' '.join(fc_res[i].split())
str_info = control_str.split(' ')
port_id = str_info[0]
detail_command = 'lsportfc %s' % port_id
deltail_info = self.exec_ssh_command(detail_command)
port_map = {}
self.handle_detail(deltail_info, port_map, split=' ')
status = constants.PortHealthStatus.NORMAL
conn_status = constants.PortConnectionStatus.CONNECTED
if port_map.get('status') != 'active':
status = constants.PortHealthStatus.ABNORMAL
conn_status = constants.PortConnectionStatus.DISCONNECTED
port_type = constants.PortType.FC
if port_map.get('type') == 'ethernet':
port_type = constants.PortType.ETH
location = '%s_%s' % (port_map.get('node_name'),
port_map.get('id'))
speed = None
if port_map.get('port_speed')[:-2].isdigit():
speed = int(self.handle_port_bps(
port_map.get('port_speed'), 'fc'))
port_result = {
'name': location,
'storage_id': storage_id,
'native_port_id': port_map.get('id'),
'location': location,
'connection_status': conn_status,
'health_status': status,
'type': port_type,
'speed': speed,
'native_parent_id': port_map.get('node_name'),
'wwn': port_map.get('WWPN')
}
port_list.append(port_result)
return port_list
def get_iscsi_port(self, storage_id):
port_list = []
for i in range(1, 3):
port_array = []
port_command = 'lsportip %s' % i
port_info = self.exec_ssh_command(port_command)
port_arr = port_info.split('\n')
port_map = {}
for detail in port_arr:
if detail is not None and detail != '':
strinfo = detail.split(' ', 1)
key = strinfo[0]
value = ''
if len(strinfo) > 1:
value = strinfo[1]
port_map[key] = value
else:
if len(port_map) > 1:
port_array.append(port_map)
port_map = {}
continue
for port in port_array:
if port.get('failover') == 'yes':
continue
status = constants.PortHealthStatus.ABNORMAL
if port.get('state') == 'online':
status = constants.PortHealthStatus.NORMAL
conn_status = constants.PortConnectionStatus.DISCONNECTED
if port.get('link_state') == 'active':
conn_status = constants.PortConnectionStatus.CONNECTED
port_type = constants.PortType.ETH
location = '%s_%s' % (port.get('node_name'),
port.get('id'))
port_result = {
'name': location,
'storage_id': storage_id,
'native_port_id': location,
'location': location,
'connection_status': conn_status,
'health_status': status,
'type': port_type,
'speed': int(self.handle_port_bps(
port.get('speed'), 'eth')),
'native_parent_id': port.get('node_name'),
'mac_address': port.get('MAC'),
'ipv4': port.get('IP_address'),
'ipv4_mask': port.get('mask'),
'ipv6': port.get('IP_address_6')
}
port_list.append(port_result)
return port_list
@staticmethod
def change_speed_to_bytes(unit):
unit = unit.upper()
if unit == 'TB':
result = units.T
elif unit == 'GB':
result = units.G
elif unit == 'MB':
result = units.M
elif unit == 'KB':
result = units.k
else:
result = 1
return int(result)
def handle_port_bps(self, value, port_type):
speed = 0
if value:
if value.isdigit():
speed = float(value)
else:
if port_type == 'fc':
unit = value[-2:]
speed = float(value[:-2]) * int(
self.change_speed_to_bytes(unit))
else:
unit = value[-4:-2]
speed = float(value[:-4]) * int(
self.change_speed_to_bytes(unit))
return speed
def list_ports(self, storage_id):
try:
port_list = []
port_list.extend(self.get_fc_port(storage_id))
port_list.extend(self.get_iscsi_port(storage_id))
return port_list
except Exception as err:
err_msg = "Failed to get ports attributes from Storwize: %s" % \
(six.text_type(err))
raise exception.InvalidResults(err_msg)
@staticmethod
def handle_stats_filename(file_name, file_map):
name_arr = file_name.split('_')
file_type = '%s_%s_%s' % (name_arr[0], name_arr[1], name_arr[2])
file_time = '20%s%s' % (name_arr[3], name_arr[4])
time_pattern = '%Y%m%d%H%M%S'
tools = Tools()
occur_time = tools.time_str_to_timestamp(file_time, time_pattern)
if file_map.get(file_type):
file_map[file_type][occur_time] = file_name
else:
file_map[file_type] = {occur_time: file_name}
def get_stats_filelist(self, file_map):
stats_file_command = 'lsdumps -prefix /dumps/iostats'
file_list = self.exec_ssh_command(stats_file_command)
file_line = file_list.split('\n')
for file in islice(file_line, 1, None):
if file:
file_arr = ' '.join(file.split()).split(' ')
if len(file_arr) > 1:
file_name = file_arr[1]
SSHHandler.handle_stats_filename(file_name, file_map)
for file_stats in file_map:
file_map[file_stats] = sorted(file_map.get(file_stats).items(),
key=lambda x: x[0], reverse=False)
def packege_data(self, storage_id, resource_type, metrics, metric_map):
resource_id = None
resource_name = None
unit = None
for resource_info in metric_map:
if resource_type == constants.ResourceType.PORT:
port_info = self.get_fc_port(storage_id)
if port_info:
for fc_port in port_info:
if resource_info.strip('0x').upper() == fc_port.get(
'wwn').upper():
resource_id = fc_port.get('native_port_id')
resource_name = fc_port.get('name')
break
else:
resource_arr = resource_info.split('_')
resource_id = resource_arr[0]
resource_name = resource_arr[1]
for target in metric_map.get(resource_info):
if resource_type == constants.ResourceType.PORT:
unit = consts.PORT_CAP[target]['unit']
elif resource_type == constants.ResourceType.VOLUME:
unit = consts.VOLUME_CAP[target]['unit']
elif resource_type == constants.ResourceType.DISK:
unit = consts.DISK_CAP[target]['unit']
elif resource_type == constants.ResourceType.CONTROLLER:
unit = consts.CONTROLLER_CAP[target]['unit']
if 'responseTime' == target:
for res_time in metric_map.get(resource_info).get(target):
for iops_time in metric_map.get(resource_info).get(
'iops'):
if res_time == iops_time:
res_value = metric_map.get(resource_info).get(
target).get(res_time)
iops_value = metric_map.get(
resource_info).get('iops').get(iops_time)
res_value = \
res_value / iops_value if iops_value else 0
res_value = round(res_value, 3)
metric_map[resource_info][target][res_time] = \
res_value
break
labels = {
'storage_id': storage_id,
'resource_type': resource_type,
'resource_id': resource_id,
'resource_name': resource_name,
'type': 'RAW',
'unit': unit
}
metric_value = constants.metric_struct(name=target,
labels=labels,
values=metric_map.get(
resource_info).get(
target))
metrics.append(metric_value)
@staticmethod
def count_metric_data(last_data, now_data, interval, target, metric_type,
metric_map, res_id):
if not target:
return
if 'CACHEHITRATIO' not in metric_type.upper():
value = SSHHandler.count_difference(now_data.get(target),
last_data.get(target))
else:
value = now_data.get(
SSHHandler.VOLUME_PERF_METRICS.get(metric_type))
if 'THROUGHPUT' in metric_type.upper():
value = value / interval / units.Mi
elif 'IOSIZE' in metric_type.upper():
value = value / units.Ki
elif 'IOPS' in metric_type.upper() or 'RESPONSETIME' \
in metric_type.upper():
value = value / interval
value = round(value, 3)
if metric_map.get(res_id):
if metric_map.get(res_id).get(metric_type):
if metric_map.get(res_id).get(metric_type).get(
now_data.get('time')):
metric_map[res_id][metric_type][now_data.get('time')] \
+= value
else:
metric_map[res_id][metric_type][now_data.get('time')] \
= value
else:
metric_map[res_id][metric_type] = {now_data.get('time'): value}
else:
metric_map[res_id] = {metric_type: {now_data.get('time'): value}}
@staticmethod
def count_difference(now_value, last_value):
value = 0
if now_value >= last_value:
value = now_value - last_value
else:
value = now_value
return value
@staticmethod
def handle_volume_cach_hit(now_data, last_data):
rh = SSHHandler.count_difference(now_data.get('rh'),
last_data.get('rh'))
wh = SSHHandler.count_difference(now_data.get('wh'),
last_data.get('wh'))
rht = SSHHandler.count_difference(now_data.get('rht'),
last_data.get('rht'))
wht = SSHHandler.count_difference(now_data.get('wht'),
last_data.get('wht'))
rhr = rh * 100 / rht if rht > 0 else 0
whr = wh * 100 / wht if wht > 0 else 0
hrt = rhr + whr
now_data['rhr'] = rhr
now_data['whr'] = whr
now_data['hrt'] = hrt
def get_date_from_each_file(self, file, metric_map, target_list,
resource_type, last_data):
with self.ssh_pool.item() as ssh:
local_path = '%s/%s' % (
os.path.abspath(os.path.join(os.getcwd())),
consts.LOCAL_FILE_PATH)
file_xml = Tools.get_remote_file_to_xml(
ssh, file[1], local_path,
consts.REMOTE_FILE_PATH)
if not file_xml:
return
for data in file_xml:
if re.sub(u"\\{.*?}", "", data.tag) == \
SSHHandler.TARGET_RESOURCE_RELATION.get(
resource_type):
if resource_type == constants.ResourceType.PORT:
if data.attrib.get('fc_wwpn'):
resource_info = data.attrib.get('fc_wwpn')
else:
continue
elif resource_type == constants. \
ResourceType.CONTROLLER:
resource_info = '%s_%s' % (
int(data.attrib.get('node_id'), 16),
data.attrib.get('id'))
else:
resource_info = '%s_%s' % (data.attrib.get('idx'),
data.attrib.get('id'))
now_data = SSHHandler.package_xml_data(data.attrib,
file[0],
resource_type)
if last_data.get(resource_info):
interval = (int(file[0]) - last_data.get(
resource_info).get('time')) / units.k
if interval <= 0:
break
if resource_type == constants.ResourceType.VOLUME:
SSHHandler.handle_volume_cach_hit(
now_data, last_data.get(resource_info))
for target in target_list:
device_target = SSHHandler. \
RESOURCE_PERF_MAP.get(resource_type)
SSHHandler.count_metric_data(
last_data.get(resource_info),
now_data, interval,
device_target.get(target),
target, metric_map, resource_info)
last_data[resource_info] = now_data
else:
last_data[resource_info] = now_data
def get_stats_from_file(self, file_list, metric_map, target_list,
resource_type, start_time, end_time):
if not file_list:
return
find_first_file = False
recent_file = None
last_data = {}
for file in file_list:
if file[0] >= start_time and file[0] <= end_time:
if find_first_file is False:
if recent_file:
self.get_date_from_each_file(recent_file, metric_map,
target_list,
resource_type,
last_data)
self.get_date_from_each_file(file, metric_map, target_list,
resource_type, last_data)
find_first_file = True
else:
self.get_date_from_each_file(file, metric_map, target_list,
resource_type, last_data)
recent_file = file
@staticmethod
def package_xml_data(file_data, file_time, resource_type):
rb = 0
wb = 0
res_time = 0
rh = 0
wh = 0
rht = 0
wht = 0
if resource_type == constants.ResourceType.PORT:
rb = int(file_data.get('cbr')) + int(file_data.get('hbr')) + int(
file_data.get('lnbr')) + int(
file_data.get('rmbr')) * SSHHandler.BYTES_TO_BIT
wb = int(file_data.get('cbt')) + int(file_data.get('hbt')) + int(
file_data.get('lnbt')) + int(
file_data.get('rmbt')) * SSHHandler.BYTES_TO_BIT
ro = int(file_data.get('cer')) + int(file_data.get('her')) + int(
file_data.get('lner')) + int(file_data.get('rmer'))
wo = int(file_data.get('cet')) + int(file_data.get('het')) + int(
file_data.get('lnet')) + int(file_data.get('rmet'))
res_time = int(file_data.get('dtdt', 0)) / units.Ki
else:
if resource_type == constants.ResourceType.VOLUME:
rb = int(file_data.get('rb')) * SSHHandler.BLOCK_SIZE
wb = int(file_data.get('wb')) * SSHHandler.BLOCK_SIZE
rh = int(file_data.get('ctrhs'))
wh = int(file_data.get('ctwhs'))
rht = int(file_data.get('ctrs'))
wht = int(file_data.get('ctws'))
res_time = int(file_data.get('xl'))
elif resource_type == constants.ResourceType.DISK:
rb = int(file_data.get('rb')) * SSHHandler.BLOCK_SIZE
wb = int(file_data.get('wb')) * SSHHandler.BLOCK_SIZE
res_time = int(file_data.get('rq')) + int(file_data.get('wq'))
elif resource_type == constants.ResourceType.CONTROLLER:
rb = int(file_data.get('rb')) * SSHHandler.BYTES_TO_BIT
wb = int(file_data.get('wb')) * SSHHandler.BYTES_TO_BIT
res_time = int(file_data.get('rq')) + int(file_data.get('wq'))
ro = int(file_data.get('ro'))
wo = int(file_data.get('wo'))
now_data = {
'rb': rb,
'wb': wb,
'ro': ro,
'wo': wo,
'tb': rb + wb,
'to': ro + wo,
'rh': rh,
'wh': wh,
'rht': rht,
'wht': wht,
'res_time': res_time,
'time': int(file_time)
}
return now_data
def get_stats_file_data(self, file_map, res_type, metrics, storage_id,
target_list, start_time, end_time):
metric_map = {}
for file_tye in file_map:
file_list = file_map.get(file_tye)
if 'Nv' in file_tye and res_type == constants.ResourceType.VOLUME:
self.get_stats_from_file(file_list, metric_map, target_list,
constants.ResourceType.VOLUME,
start_time, end_time)
elif 'Nm' in file_tye and res_type == constants.ResourceType.DISK:
self.get_stats_from_file(file_list, metric_map, target_list,
constants.ResourceType.DISK,
start_time, end_time)
elif 'Nn' in file_tye and res_type == constants.ResourceType.PORT:
self.get_stats_from_file(file_list, metric_map, target_list,
constants.ResourceType.PORT,
start_time, end_time)
elif 'Nn' in file_tye and res_type == \
constants.ResourceType.CONTROLLER:
self.get_stats_from_file(file_list, metric_map, target_list,
constants.ResourceType.CONTROLLER,
start_time, end_time)
self.packege_data(storage_id, res_type, metrics, metric_map)
def collect_perf_metrics(self, storage_id, resource_metrics,
start_time, end_time):
metrics = []
file_map = {}
try:
self.get_stats_filelist(file_map)
if resource_metrics.get(constants.ResourceType.VOLUME):
self.get_stats_file_data(
file_map,
constants.ResourceType.VOLUME,
metrics,
storage_id,
resource_metrics.get(constants.ResourceType.VOLUME),
start_time, end_time)
if resource_metrics.get(constants.ResourceType.DISK):
self.get_stats_file_data(
file_map,
constants.ResourceType.DISK,
metrics,
storage_id,
resource_metrics.get(constants.ResourceType.DISK),
start_time, end_time)
if resource_metrics.get(constants.ResourceType.PORT):
self.get_stats_file_data(
file_map,
constants.ResourceType.PORT,
metrics,
storage_id,
resource_metrics.get(constants.ResourceType.PORT),
start_time, end_time)
if resource_metrics.get(constants.ResourceType.CONTROLLER):
self.get_stats_file_data(
file_map,
constants.ResourceType.CONTROLLER,
metrics,
storage_id,
resource_metrics.get(constants.ResourceType.CONTROLLER),
start_time, end_time)
except Exception as err:
err_msg = "Failed to collect metrics from svc: %s" % \
(six.text_type(err))
LOG.error(err_msg)
raise exception.InvalidResults(err_msg)
return metrics
def get_latest_perf_timestamp(self):
latest_time = 0
stats_file_command = 'lsdumps -prefix /dumps/iostats'
file_list = self.exec_ssh_command(stats_file_command)
file_line = file_list.split('\n')
for file in islice(file_line, 1, None):
if file:
file_arr = ' '.join(file.split()).split(' ')
if len(file_arr) > 1:
file_name = file_arr[1]
name_arr = file_name.split('_')
file_time = '20%s%s' % (name_arr[3], name_arr[4])
time_pattern = '%Y%m%d%H%M%S'
tools = Tools()
occur_time = tools.time_str_to_timestamp(
file_time, time_pattern)
if latest_time < occur_time:
latest_time = occur_time
return latest_time
|
StarcoderdataPython
|
3340234
|
<filename>SUAVE/SUAVE-2.5.0/regression/scripts/cnbeta/cnbeta.py
# test_cnbeta.py
# Created: Apr 2014 <NAME>
# Modified: Feb 2017, <NAME>
# Reference: Aircraft Dynamics: from Modeling to Simulation, by <NAME>
import SUAVE
import numpy as np
from SUAVE.Core import Units
from SUAVE.Methods.Flight_Dynamics.Static_Stability.Approximations.Tube_Wing.taw_cnbeta import taw_cnbeta
from SUAVE.Core import (
Data, Container,
)
import sys
sys.path.append('../Vehicles')
def main():
#only do calculation for 747
from Boeing_747 import vehicle_setup, configs_setup
vehicle = vehicle_setup()
configs = configs_setup(vehicle)
Mach = np.array([0.198])
segment = SUAVE.Analyses.Mission.Segments.Segment()
segment.freestream = Data()
segment.freestream.mach_number = Mach
segment.atmosphere = SUAVE.Analyses.Atmospheric.US_Standard_1976()
altitude = 0.0 * Units.feet
conditions = segment.atmosphere.compute_values(altitude / Units.km)
segment.a = conditions.speed_of_sound
segment.freestream.density = conditions.density
segment.freestream.dynamic_viscosity = conditions.dynamic_viscosity
segment.freestream.velocity = segment.freestream.mach_number * segment.a
#Method Test
cn_b = taw_cnbeta(vehicle,segment,configs.base)
expected = 0.09596976 # Should be 0.184
error = Data()
error.cn_b_747 = (cn_b-expected)/expected
print(error)
for k,v in list(error.items()):
assert(np.abs(v)<1e-6)
return
# ----------------------------------------------------------------------
# Call Main
# ----------------------------------------------------------------------
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
6512322
|
<reponame>dliess/capnzero<filename>scripts/capnp_file.py
import global_types
import subprocess
from common import *
def create_capnp_file_content_str(data, file_we):
the_id = subprocess.check_output(['capnp', 'id']).decode('utf-8').rstrip()
outStr = """\
{};
using Cxx = import "/capnp/c++.capnp";
$Cxx.namespace("capnzero::{}");
""".format(the_id, file_we)
if "capnp-inline" in data:
outStr += data["capnp-inline"]
# Create capnp enum for ServiceId
service_enum_str = "enum ServiceId {\n"
for idx, service_name in enumerate(data["services"]):
service_enum_str += "\t" + create_rpc_service_id_capnp_enum(service_name) + " @" + str(idx) + ";\n"
service_enum_str += "}\n"
outStr += service_enum_str
#create user defined enumerations
schema_enum_str = ""
for enum_name in global_types.enumerations:
schema_enum_str += "enum {} {{\n".format(enum_name)
for enum_element_name, enum_element_number in global_types.enumerations[enum_name].items():
schema_enum_str += "\t{} @{};\n".format(lowerfirst(enum_element_name), enum_element_number)
schema_enum_str +="}\n"
outStr += schema_enum_str
# Create capnp enum for rpc Ids
# convention for enum name: <service_name> + "RpcIds
rpc_enum_strings = []
for service_name in data["services"]:
if "rpc" in data["services"][service_name]:
rpc_enum_string = "enum " + create_rpc_id_enum(service_name) + "{\n"
for idx, rpc_name in enumerate(data["services"][service_name]["rpc"]):
rpc_enum_string += "\t" + rpc_name + " @" + str(idx) + ";\n"
rpc_enum_string += "}\n"
rpc_enum_strings.append(rpc_enum_string)
for rpc_enum in rpc_enum_strings:
outStr += rpc_enum
outStr += "struct RpcCoord {\n"
outStr += "\tserviceId @0 :UInt16;\n"
outStr += "\trpcId @1 :UInt16;\n"
outStr += "}\n"
# Create capnp type for parameter and return types
for service_name in data["services"]:
if "rpc" in data["services"][service_name]:
for rpc_name in data["services"][service_name]["rpc"]:
rpc_info = data["services"][service_name]["rpc"][rpc_name]
if "parameter" in rpc_info and isinstance(rpc_info["parameter"], dict):
parameter_struct_str = "struct {}{{\n".format(create_capnp_rpc_parameter_type_str(service_name, rpc_name))
params = rpc_info["parameter"]
for idx, key in enumerate(params.keys()):
parameter_struct_str += "\t" + key + " @" + str(idx) + " :" + map_descr_type_to_capnp_type(params[key]) + ";\n"
parameter_struct_str += "}\n"
outStr += parameter_struct_str
return_type = rpc_return_type(rpc_info)
if return_type == RPCType.Void:
pass
elif return_type == RPCType.Dict:
return_struct_str = "struct " + create_capnp_rpc_return_type_str(service_name, rpc_name) + " {\n"
members = rpc_info["returns"]
for idx, key in enumerate(members.keys()):
return_struct_str += "\t" + key + " @" + str(idx) + " :" + map_descr_type_to_capnp_type(members[key]) + ";\n"
return_struct_str += "}\n"
outStr += return_struct_str
elif return_type == RPCType.CapnpNative:
pass
elif return_type == RPCType.DirectType:
return_struct_str = "struct " + create_capnp_rpc_return_type_str(service_name, rpc_name) + " {\n"
return_struct_str += "\tretParam @0 :" + map_descr_type_to_capnp_type(rpc_info["returns"]) + ";\n"
return_struct_str += "}\n"
outStr += return_struct_str
if "signal" in data["services"][service_name]:
for signal_name in data["services"][service_name]["signal"]:
signal_info = data["services"][service_name]["signal"][signal_name]
if "parameter" in signal_info and isinstance(signal_info["parameter"], dict):
params = signal_info["parameter"]
parameter_struct_str = "struct {}{{\n".format(create_capnp_signal_param_type_str(service_name, signal_name))
for idx, key in enumerate(params.keys()):
parameter_struct_str += "\t" + key + " @" + str(idx) + " :" + map_descr_type_to_capnp_type(params[key]) + ";\n"
parameter_struct_str += "}\n"
outStr += parameter_struct_str
return outStr
|
StarcoderdataPython
|
1623022
|
<reponame>nukui-s/mlens
"""ML-ENSEMBLE
:author: <NAME>
:copyright: 2017-2018
:licence: MIT
Blend Ensemble class. Fully integrable with Scikit-learn.
"""
from __future__ import division
from .base import BaseEnsemble
from ..index import BlendIndex, FullIndex
class BlendEnsemble(BaseEnsemble):
r"""Blend Ensemble class.
The Blend Ensemble is a supervised ensemble closely related to
the :class:`SuperLearner`. It differs in that to estimate the prediction
matrix Z used by the meta learner, it uses a subset of the data to predict
its complement, and the meta learner is fitted on those predictions.
By only fitting every base learner once on a subset
of the full training data, :class:`BlendEnsemble` is a fast ensemble
that can handle very large datasets simply by only using portion of it at
each stage. The cost of this approach is that information is thrown out
at each stage, as one layer will not see the training data used by the
previous layer.
With large data that can be expected to satisfy an i.i.d. assumption, the
:class:`BlendEnsemble` can achieve similar performance to more
sophisticated ensembles at a fraction of the training time. However, with
data data is not uniformly distributed or exhibits high variance the
:class:`BlendEnsemble` can be a poor choice as information is lost at
each stage of fitting.
See Also
--------
:class:`SuperLearner`, :class:`Subsemble`
.. note :: All parameters can be overriden in the :attr:`add` method unless
otherwise specified. Notably, the ``backend`` and ``n_jobs`` cannot
be altered in the :attr:`add` method.
Parameters
----------
test_size : int, float (default = 0.5)
the size of the test set for each layer. This parameter can be
overridden in the :attr:`add` method if different test sizes is desired
for each layer. If a ``float`` is specified, it is presumed to be the
fraction of the available data to be used for training, and so
``0. < test_size < 1.``.
shuffle : bool (default = False)
whether to shuffle data before before processing each layer. This
parameter can be overridden in the :attr:`add` method if different test
sizes is desired for each layer.
random_state : int (default = None)
random seed for shuffling inputs. Note that the seed here is used to
generate a unique seed for each layer. Can be overridden in the
:attr:`add` method.
scorer : object (default = None)
scoring function. If a function is provided, base estimators will be
scored on the prediction made. The scorer should be a function that
accepts an array of true values and an array of predictions:
``score = f(y_true, y_pred)``. Can be overridden in the :attr:`add` method.
raise_on_exception : bool (default = True)
whether to issue warnings on soft exceptions or raise error.
Examples include lack of layers, bad inputs, and failed fit of an
estimator in a layer. If set to ``False``, warnings are issued instead
but estimation continues unless exception is fatal. Note that this
can result in unexpected behavior unless the exception is anticipated.
array_check : int (default = 2)
level of strictness in checking input arrays.
- ``array_check = 0`` will not check ``X`` or ``y``
- ``array_check = 1`` will check ``X`` and ``y`` for
inconsistencies and warn when format looks suspicious,
but retain original format.
- ``array_check = 2`` will impose Scikit-learn array checks,
which converts ``X`` and ``y`` to numpy arrays and raises
an error if conversion fails.
verbose : int or bool (default = False)
level of verbosity.
* ``verbose = 0`` silent (same as ``verbose = False``)
* ``verbose = 1`` messages at start and finish (same as
``verbose = True``)
* ``verbose = 2`` messages for each layer
If ``verbose >= 50`` prints to ``sys.stdout``, else ``sys.stderr``.
For verbosity in the layers themselves, use ``fit_params``.
n_jobs : int (default = -1)
Degree of parallel processing. Set to -1 for maximum parallelism and
1 for sequential processing. Cannot be overriden in the :attr:`add` method.
backend : str or object (default = 'threading')
backend infrastructure to use during call to
:class:`mlens.externals.joblib.Parallel`. See Joblib for further
documentation. To set global backend, set ``mlens.config._BACKEND``.
Cannot be overriden in the :attr:`add` method.
model_selection: bool (default=False)
Whether to use the ensemble in model selection mode. If ``True``,
this will alter the ``transform`` method. When calling ``transform``
on new data, the ensemble will call ``predict``, while calling
``transform`` with the training data reproduces predictions from the
``fit`` call. Hence the ensemble can be used as a pure transformer
in a preprocessing pipeline passed to the :class:`Evaluator`, as
training folds are faithfully reproduced as during a ``fit``call and
test folds are transformed with the ``predict`` method.
sample_size: int (default=20)
size of training set sample
(``[min(sample_size, X.size[0]), min(X.size[1], sample_size)]``)
Examples
--------
Instantiate ensembles with no preprocessing: use list of estimators
>>> from mlens.ensemble import BlendEnsemble
>>> from mlens.metrics.metrics import rmse
>>> from sklearn.datasets import load_boston
>>> from sklearn.linear_model import Lasso
>>> from sklearn.svm import SVR
>>>
>>> X, y = load_boston(True)
>>>
>>> ensemble = BlendEnsemble()
>>> ensemble.add([SVR(), ('can name some or all est', Lasso())])
>>> ensemble.add_meta(SVR())
>>>
>>> ensemble.fit(X, y)
>>> preds = ensemble.predict(X)
>>> rmse(y, preds)
7.3337...
Instantiate ensembles with different preprocessing pipelines through dicts.
>>> from mlens.ensemble import BlendEnsemble
>>> from mlens.metrics.metrics import rmse
>>> from sklearn.datasets import load_boston
>>> from sklearn. preprocessing import MinMaxScaler, StandardScaler
>>> from sklearn.linear_model import Lasso
>>> from sklearn.svm import SVR
>>>
>>> X, y = load_boston(True)
>>>
>>> preprocessing_cases = {'mm': [MinMaxScaler()],
... 'sc': [StandardScaler()]}
>>>
>>> estimators_per_case = {'mm': [SVR()],
... 'sc': [('can name some or all ests', Lasso())]}
>>>
>>> ensemble = BlendEnsemble()
>>> ensemble.add(estimators_per_case, preprocessing_cases).add(SVR(),
... meta=True)
>>>
>>> ensemble.fit(X, y)
>>> preds = ensemble.predict(X)
>>> rmse(y, preds)
8.249013
"""
def __init__(
self, test_size=0.5, shuffle=False, random_state=None, scorer=None,
raise_on_exception=True, array_check=2, verbose=False, n_jobs=-1,
backend=None, model_selection=False, sample_size=20, layers=None):
super(BlendEnsemble, self).__init__(
shuffle=shuffle, random_state=random_state, scorer=scorer,
raise_on_exception=raise_on_exception, array_check=array_check,
verbose=verbose, n_jobs=n_jobs, model_selection=model_selection,
sample_size=sample_size, layers=layers, backend=backend)
self.__initialized__ = 0 # Unlock parameter setting
self.test_size = test_size
self.__initialized__ = 1 # Protect against param resets
def add_meta(self, estimator, **kwargs):
"""Meta Learner.
Compatibility method for adding a meta learner to be used for final
predictions.
Parameters
----------
estimator : instance
estimator instance.
**kwargs : optional
optional keyword arguments.
"""
return self.add(estimators=estimator, meta=True, **kwargs)
def add(self, estimators, preprocessing=None,
proba=False, meta=False, propagate_features=None, **kwargs):
"""Add layer to ensemble.
Parameters
----------
preprocessing: dict of lists or list, optional (default = None)
preprocessing pipelines for given layer. If
the same preprocessing applies to all estimators, ``preprocessing``
should be a list of transformer instances. The list can contain the
instances directly, named tuples of transformers,
or a combination of both. ::
option_1 = [transformer_1, transformer_2]
option_2 = [("trans-1", transformer_1),
("trans-2", transformer_2)]
option_3 = [transformer_1, ("trans-2", transformer_2)]
If different preprocessing pipelines are desired, a dictionary
that maps preprocessing pipelines must be passed. The names of the
preprocessing dictionary must correspond to the names of the
estimator dictionary. ::
preprocessing_cases = {"case-1": [trans_1, trans_2],
"case-2": [alt_trans_1, alt_trans_2]}
estimators = {"case-1": [est_a, est_b],
"case-2": [est_c, est_d]}
The lists for each dictionary entry can be any of ``option_1``,
``option_2`` and ``option_3``.
estimators: dict of lists or list or instance
estimators constituting the layer. If preprocessing is none and the
layer is meant to be the meta estimator, it is permissible to pass
a single instantiated estimator. If ``preprocessing`` is
``None`` or ``list``, ``estimators`` should be a ``list``.
The list can either contain estimator instances,
named tuples of estimator instances, or a combination of both. ::
option_1 = [estimator_1, estimator_2]
option_2 = [("est-1", estimator_1), ("est-2", estimator_2)]
option_3 = [estimator_1, ("est-2", estimator_2)]
If different preprocessing pipelines are desired, a dictionary
that maps estimators to preprocessing pipelines must be passed.
The names of the estimator dictionary must correspond to the
names of the estimator dictionary. ::
preprocessing_cases = {"case-1": [trans_1, trans_2],
"case-2": [alt_trans_1, alt_trans_2]}
estimators = {"case-1": [est_a, est_b],
"case-2": [est_c, est_d]}
The lists for each dictionary entry can be any of ``option_1``,
``option_2`` and ``option_3``.
proba : bool (default = False)
Whether to call ``predict_proba`` on base learners.
propagate_features : list, optional
List of column indexes to propagate from the input of
the layer to the output of the layer. Propagated features are
concatenated and stored in the leftmost columns of the output
matrix. The ``propagate_features`` list should define a slice of
the numpy array containing the input data, e.g. ``[0, 1]`` to
propagate the first two columns of the input matrix to the output
matrix.
meta : bool (default = False)
Whether the layer should be treated as the final meta estimator.
**kwargs : optional
optional keyword arguments to instantiate layer with.
Returns
-------
self : instance
ensemble instance with layer instantiated.
"""
if meta:
idx = FullIndex()
else:
c = kwargs.pop('test_size', self.test_size)
idx = BlendIndex(c, raise_on_exception=self.raise_on_exception)
return super(BlendEnsemble, self).add(
estimators=estimators, preprocessing=preprocessing, indexer=idx,
proba=proba, propagate_features=propagate_features, **kwargs)
|
StarcoderdataPython
|
3273655
|
from typing import Any, List
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
from app import crud, schemas
from app.api import depends
router = APIRouter()
@router.get("/", response_model=List[schemas.CourseTimeslotInDB])
def get_list_of_timeslots(
db: Session = Depends(depends.get_db), skip: int = 0, limit: int = 100
) -> List[Any]:
""" Get all information of all c_timeslots"""
timeslots = crud.course_timeslot.get_multi(db)
return timeslots
@router.get("/{timeslot_id}", response_model=schemas.CourseTimeslotInDB)
def read_timeslot_information(
*, db: Session = Depends(depends.get_db), timeslot_id: int,
) -> Any:
"""
read timeslot info
"""
timeslot = crud.course_timeslot.get(db, id=timeslot_id)
if not timeslot:
raise HTTPException(
status_code=404,
detail="The timeslot with this id does not exist in the system",
)
return timeslot
|
StarcoderdataPython
|
18091
|
# -*- coding: utf-8 -*-
"""
This module defines a connexion app object and configures the API
endpoints based the swagger.yml configuration file.
copyright: © 2019 by <NAME>.
license: MIT, see LICENSE for more details.
"""
import connexion
app = connexion.App(__name__, specification_dir="./")
app.app.url_map.strict_slashes = False
app.add_api("swagger.yml")
if __name__ == "__main__":
# FLASK_ENV=development & FLASK_DEBUG=1 w/ Docker don't seem to enable debug mode.
app.run(debug=True)
|
StarcoderdataPython
|
3450163
|
<filename>jobbing/models_remote/org.py
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from jobbing.models.base_model_ import Model
from jobbing import util
class Org(Model):
def __init__(self,
org_id:int = None,
org_name:str = None,
org_media_id:int = None): # noqa: E501
self.swagger_types = {
'org_id': int,
'org_name': str,
'org_media_id': int
}
self.attribute_map = {
'org_id': 'org_id',
'org_name': 'org_name',
'org_media_id': 'org_media_id'
}
self._org_id = org_id
self._org_name = org_name
self._org_media_id = org_media_id
@classmethod
def from_dict(cls, dikt) -> 'Org':
return util.deserialize_model(dikt, cls)
@property
def org_id(self) -> int:
return self._org_id
@org_id.setter
def org_id(self, param):
if param is None:
raise ValueError("Invalid value for `org_id`, must not be `None`") # noqa: E501
self._org_id = param
@property
def org_name(self) -> str:
return self._org_name
@org_name.setter
def org_name(self, param):
if param is None:
raise ValueError("Invalid value for `org_name`, must not be `None`") # noqa: E501
self._org_name = param
@property
def org_media_id(self) -> int:
return self._org_media_id
@org_media_id.setter
def org_media_id(self, param):
if param is None:
raise ValueError("Invalid value for `org_media_id`, must not be `None`") # noqa: E501
self._org_media_id = param
|
StarcoderdataPython
|
9677327
|
import time
import os
import torch
import math
import sys
import matplotlib.pyplot as plt
from torch.utils.tensorboard import SummaryWriter
#
import numpy as np
from matplotlib.patches import Circle, Polygon, Ellipse
from matplotlib.collections import PatchCollection
def calc_linear_covariance(x):
peds = {}
# find all peds and their poses
for step in range(len(x)):
for ped in x[step]:
if ped[0] not in peds:
peds[ped[0]] = {"pose": [], "start_step": step}
peds[ped[0]]["pose"].append(ped[1:])
peds[ped[0]].update({"end_step": step})
# rm peds with only 1 pose
for ped in peds:
if len(peds[ped]["pose"])<2:
del peds[ped]
# find vel
for ped in peds:
peds[ped]["pose"] = np.array(peds[ped]["pose"])
peds[ped]["vel"] = peds[ped]["pose"][1] - peds[ped]["pose"][0]
# create linear aproximation
for ped in peds:
peds[ped]["linear"] = peds[ped]["pose"].copy()
print(peds[ped]["vel"])
if peds[ped]["vel"][0]<-0.1:
print("breakpoint")
for step in range(peds[ped]["end_step"]+1-peds[ped]["start_step"]):
peds[ped]["linear"][step] =peds[ped]["pose"][0]+peds[ped]["vel"]*step
peds[ped]["cov"] = np.abs(peds[ped]["pose"] - peds[ped]["linear"])
return peds
def plot_cov(ped_cov, isok=3):
colors = ["b", "r", "g", "y"]*3
i = 0
fig, ax = plt.subplots(1, 1)
ax.set_xlim(-10, 10)
ax.set_ylim(-10, 10)
ax.set_xlabel('distance, m')
ax.set_ylabel('distance, m')
ax.set_title("gt covariance")
ax.grid(True)
for ped in ped_cov:
ax.plot(ped_cov[ped]["pose"][:, 0],
ped_cov[ped]["pose"][:, 1],
colors[i]+"o", label="input ped "+str(ped))
ax.plot(ped_cov[ped]["linear"][:, 0],
ped_cov[ped]["linear"][:, 1],
colors[i]+"*", label="linear ped "+str(ped))
for n in range(len(ped_cov[ped]["pose"])):
ax.add_patch(Ellipse(xy=(ped_cov[ped]["pose"][n][0], ped_cov[ped]["pose"][n][1]),
width=ped_cov[ped]["cov"][n][0]*isok,
height=ped_cov[ped]["cov"][n][1]*isok,
alpha=0.1, edgecolor=colors[i], facecolor=colors[i]))
i+=1
ax.legend(loc='best', frameon=False)
plt.pause(2)
plt.close(fig)
class Validator():
def __init__(self, validation_param, sfm, dataloader, do_vis=False):
self.dataloader = dataloader
self.sfm = sfm
self.vp = validation_param
self.dataloader.reset_batch_pointer(valid=True)
plt.ion()
self.norms = []
self.do_vis = do_vis
self.save_data = []
def validate(self):
self.save_data = []
self.dataloader.reset_batch_pointer(valid=True)
log_folder = 1
while os.path.isdir('log/'+str(log_folder)):
log_folder += 1
os.mkdir('log/'+str(log_folder))
w = SummaryWriter('log/'+str(log_folder))
for batch in range(0, 300):
self.dataloader.reset_batch_pointer(valid=True)
x, y, d, numPedsList, PedsList, target_ids = self.dataloader.next_batch()
if np.linalg.norm(x[0][0][0] - x[0][19][0])<1.0:
continue
starting_pose = self.dataloader.get_starting_pose(
PedsList[0][0:1], x[0][0:1])
goals_ = self.dataloader.get_ped_goals(PedsList[0], x[0])
starting_time = self.dataloader.get_starting_time(
PedsList[0], x[0])
ped_cov = calc_linear_covariance(x[0])
plot_cov(ped_cov)
self.vp.update_num_ped(len(starting_pose))
ped_poses = []
# print (x[0][0])
for i, key in enumerate(x[0][0]):
ped_poses.append([x[0][0][i][1], x[0][0][i][2], 0, 0])
self.vp.index_to_id[i] = x[0][0][i][0]
# print (ped_poses)
# print (v.index_to_id)
goals = []
for idx in range(0, len(ped_poses)):
goals.append(goals_[self.vp.index_to_id[idx]])
self.vp.param.input_state = torch.tensor(ped_poses)
self.vp.param.goal = torch.tensor(goals)
stacked_trajectories_for_visualizer = self.vp.param.input_state.clone()
if self.do_vis:
fig, ax = plt.subplots(1, 1)
ax.set_xlim(-10, 10)
ax.set_ylim(-10, 10)
plt.pause(0.01)
ax.set_xlabel('distance, m')
ax.set_ylabel('distance, m')
ax.set_title("prediction visualsiation")
cur_delta_pred = 0
self.save_data.append("new set!")
for i in range(1, 13):
# ADD A PERSON (stack to bottom)
if PedsList[0][i] != list(self.vp.index_to_id.values()):
for ped_id in PedsList[0][i]:
if ped_id not in list(self.vp.index_to_id.values()):
pose = self.dataloader.get_starting_pose(
PedsList[0][i:i+1], x[0][i:i+1])[ped_id]
self.vp.param.input_state = torch.cat(
(self.vp.param.input_state, torch.tensor([[pose[0], pose[1], 0, 0], ])))
self.vp.param.num_ped += 1
self.vp.param.generateMatrices()
ped_goal = goals_[ped_id]
self.vp.param.goal = torch.cat((self.vp.param.goal, torch.tensor(
[[ped_goal[0], ped_goal[1]], ], dtype=self.vp.param.goal.dtype)))
self.vp.index_to_id[self.vp.param.goal.shape[0]-1] = ped_id
# REMOVE PERSONS
rows_to_remove = []
for key in self.vp.index_to_id.keys():
if self.vp.index_to_id[key] not in PedsList[0][i]:
rows_to_remove.append(key)
rows_to_remove.sort(reverse=True)
new_index_to_id = {}
del_counter = len(rows_to_remove)
for j in range(self.vp.param.input_state.shape[0]-1, -1, -1):
if j in rows_to_remove:
self.vp.param.input_state = torch.cat(
(self.vp.param.input_state[0:j, :], self.vp.param.input_state[1+j:, :]))
self.vp.param.goal = torch.cat(
(self.vp.param.goal[0:j, :], self.vp.param.goal[1+j:, :]))
del_counter -= 1
self.vp.param.num_ped -= 1
else:
new_index_to_id[j-del_counter] = self.vp.index_to_id[j]
self.vp.index_to_id = new_index_to_id.copy()
self.vp.param.generateMatrices()
# REMOVE PERSONS END
if self.do_vis:
ax.plot(self.vp.param.input_state[:, 0:1].tolist(), self.vp.param.input_state[:, 1:2].tolist(
), "g*", markersize=3, label="predicted")
ax.plot(torch.tensor(x[0][i-1])[:, 1:2].tolist(), torch.tensor(
x[0][i-1])[:, 2:3].tolist(), "r*", markersize=3, label="GT")
ax.grid(True)
if i == 1:
ax.legend(loc='best', frameon=False)
ax.set_title(
"prediction visualsiation\n cur_delta_pred" + str(cur_delta_pred))
plt.draw()
plt.show()
plt.pause(0.1)
rf, af = self.sfm.calc_forces(self.vp.param.input_state, self.vp.param.goal, self.vp.param.pedestrians_speed,
self.vp.param.robot_speed, self.vp.param.k, self.vp.param.alpha, self.vp.param.ped_radius, self.vp.param.ped_mass, self.vp.param.betta)
F = rf + af
self.vp.param.input_state = self.sfm.pose_propagation(
F, self.vp.param.input_state.clone(), self.vp.DT, self.vp.param.pedestrians_speed, self.vp.param.robot_speed)
stacked_trajectories_for_visualizer = torch.cat(
(stacked_trajectories_for_visualizer, self.vp.param.input_state.clone()))
cur_delta_pred = torch.norm(
self.vp.param.input_state[:, 0:2] - torch.tensor(x[0][i])[:, 1:3], dim=1)
mean_cur_delta_pred = torch.mean(cur_delta_pred)
w.add_scalar("cur_averaged_delta",
mean_cur_delta_pred, batch*100+i)
stroka = "\ncur_delta_pred " + str(cur_delta_pred.tolist())
# print(stroka, end="\r")
self.save_data.append(stroka)
self.norms.append(mean_cur_delta_pred)
if self.do_vis:
plt.close()
w.add_scalar("mean_averaged_delta", torch.mean(
torch.tensor((self.norms))), 0)
w.add_scalar("mean_averaged_delta", torch.mean(
torch.tensor((self.norms))), 1)
def print_result(self):
print(torch.mean(torch.tensor((self.norms))))
def get_result(self):
return torch.mean(torch.tensor((self.norms)))
def save_result(self, filename=None, data=None):
from contextlib import redirect_stdout
if filename is None:
self.dir = os.path.dirname(os.path.abspath(__file__))
filename = self.dir + "/result.txt"
with open(filename, 'w') as file, redirect_stdout(file):
print(self.save_data)
print(torch.mean(torch.tensor((self.norms))))
|
StarcoderdataPython
|
9712042
|
'''
@author: <NAME>
@version: 1.0
=======================
This script creates an oracle module for the training of the parser.
'''
'''
******* ********* *********
******* imports *********
******* ********* *********
'''
from collections import deque
from classes import *
'''
******* ********* *********
******* functions *********
******* ********* *********
'''
def can_left_arc(current_state, gold_arcs):
result = False
# check if buffer_front -> stack_top arc is in the gold set
if (current_state.queue[0], current_state.stack[-1]) in gold_arcs:
result = True
return result
def can_right_arc(current_state, gold_arcs):
result = False
# check if stack_top -> buffer_front arc is in the gold set
if (current_state.stack[-1], current_state.queue[0]) in gold_arcs:
result = True
return result
def can_reduce(current_state, gold_arcs):
stack_top = current_state.stack[-1]
# extract the number of heads assigned to stack_top from the predicted arc set
head_count = len([tup[0] for tup in current_state.arcs if tup[1] == stack_top])
# if no head is assigned return false
if head_count < 1:
return False
has_all_children = False
# extract list of children for stack_top from the gold arc set
gold_depedants = [tup[1] for tup in gold_arcs if tup[0] == stack_top]
#check if stack_top has children
if gold_depedants:
# extract list of children for stack_top from the predicted arc set
depedants = [tup[1] for tup in current_state.arcs if tup[0] == stack_top]
# get count of missing children
missing_children_count = len([child for child in gold_depedants+depedants if (child in gold_depedants) and (child not in depedants)])
if missing_children_count == 0:
has_all_children = True
else:
has_all_children = True
# if has a head and all children return true (if no head, we would have exited the function already)
return has_all_children
def get_oracle_transition(current_state, gold_arcs):
#find the next possible transition from the gold arc set
if can_left_arc(current_state, gold_arcs):
return Transition (1)
elif can_right_arc(current_state, gold_arcs):
return Transition (2)
elif can_reduce(current_state, gold_arcs):
return Transition (3)
else:
return Transition (0)
|
StarcoderdataPython
|
1696788
|
<filename>Python/minimum-average-difference.py<gh_stars>1-10
# Time: O(n)
# Space: O(1)
# prefix sum
class Solution(object):
def minimumAverageDifference(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
total = sum(nums)
mn, idx = float("inf"), -1
prefix = 0
for i, x in enumerate(nums):
prefix += x
a = prefix//(i+1)
b = (total-prefix)//(len(nums)-(i+1)) if i+1 < len(nums) else 0
diff = abs(a-b)
if diff < mn:
mn, idx = diff, i
return idx
|
StarcoderdataPython
|
3586359
|
<filename>maskrcnn_benchmark/modeling/roi_heads/attribute_head/loss.py<gh_stars>10-100
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch.nn import functional as F
from maskrcnn_benchmark.layers import smooth_l1_loss
from maskrcnn_benchmark.modeling.box_coder import BoxCoder
from maskrcnn_benchmark.modeling.matcher import Matcher
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
from maskrcnn_benchmark.modeling.utils import cat
class AttributeHeadLossComputation(object):
"""
Computes the loss for attribute head
"""
def __init__(
self,
loss_weight=0.1,
num_attri_cat=201,
max_num_attri=10,
attribute_sampling=True,
attribute_bgfg_ratio=5,
use_binary_loss=True,
pos_weight=1,
):
self.loss_weight = loss_weight
self.num_attri_cat = num_attri_cat
self.max_num_attri = max_num_attri
self.attribute_sampling = attribute_sampling
self.attribute_bgfg_ratio = attribute_bgfg_ratio
self.use_binary_loss = use_binary_loss
self.pos_weight = pos_weight
def __call__(self, proposals, attri_logits):
"""
Calculcate attribute loss
"""
attributes = cat([proposal.get_field("attributes") for proposal in proposals], dim=0)
assert attributes.shape[0] == attri_logits.shape[0]
# generate attribute targets
attribute_targets, selected_idxs = self.generate_attributes_target(attributes)
attri_logits = attri_logits[selected_idxs]
attribute_targets = attribute_targets[selected_idxs]
attribute_loss = self.attribute_loss(attri_logits, attribute_targets)
return attribute_loss * self.loss_weight
def generate_attributes_target(self, attributes):
"""
from list of attribute indexs to [1,0,1,0,0,1] form
"""
assert self.max_num_attri == attributes.shape[1]
num_obj = attributes.shape[0]
with_attri_idx = (attributes.sum(-1) > 0).long()
without_attri_idx = 1 - with_attri_idx
num_pos = int(with_attri_idx.sum())
num_neg = int(without_attri_idx.sum())
assert num_pos + num_neg == num_obj
if self.attribute_sampling:
num_neg = min(num_neg, num_pos * self.attribute_bgfg_ratio) if num_pos > 0 else 1
attribute_targets = torch.zeros((num_obj, self.num_attri_cat), device=attributes.device).float()
if not self.use_binary_loss:
attribute_targets[without_attri_idx > 0, 0] = 1.0
pos_idxs = torch.nonzero(with_attri_idx).squeeze(1)
perm = torch.randperm(num_obj - num_pos, device=attributes.device)[:num_neg]
neg_idxs = torch.nonzero(without_attri_idx).squeeze(1)[perm]
selected_idxs = torch.cat((pos_idxs, neg_idxs), dim=0)
assert selected_idxs.shape[0] == num_neg + num_pos
for idx in torch.nonzero(with_attri_idx).squeeze(1).tolist():
for k in range(self.max_num_attri):
att_id = int(attributes[idx, k])
if att_id == 0:
break
else:
attribute_targets[idx, att_id] = 1
return attribute_targets, selected_idxs
def attribute_loss(self, logits, labels):
if self.use_binary_loss:
all_loss = F.binary_cross_entropy_with_logits(logits, labels, pos_weight=torch.FloatTensor([self.pos_weight] * self.num_attri_cat).cuda())
return all_loss
else:
# soft cross entropy
# cross entropy attribute deteriorate the box head, even with 0.1 weight (although buttom-up top-down use cross entropy attribute)
all_loss = -F.softmax(logits, dim=-1).log()
all_loss = (all_loss * labels).sum(-1) / labels.sum(-1)
return all_loss.mean()
def make_roi_attribute_loss_evaluator(cfg):
loss_evaluator = AttributeHeadLossComputation(
cfg.MODEL.ROI_ATTRIBUTE_HEAD.ATTRIBUTE_LOSS_WEIGHT,
cfg.MODEL.ROI_ATTRIBUTE_HEAD.NUM_ATTRIBUTES,
cfg.MODEL.ROI_ATTRIBUTE_HEAD.MAX_ATTRIBUTES,
cfg.MODEL.ROI_ATTRIBUTE_HEAD.ATTRIBUTE_BGFG_SAMPLE,
cfg.MODEL.ROI_ATTRIBUTE_HEAD.ATTRIBUTE_BGFG_RATIO,
cfg.MODEL.ROI_ATTRIBUTE_HEAD.USE_BINARY_LOSS,
cfg.MODEL.ROI_ATTRIBUTE_HEAD.POS_WEIGHT,
)
return loss_evaluator
|
StarcoderdataPython
|
6585187
|
# Copyright 2018 SpiderOak, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import click
from ..util import format_json, format_python, close_kernel
def displayhook(value):
if value is None:
return
__builtins__['_'] = None
formatted = None
if isinstance(value, dict) or isinstance(value, list):
try:
formatted = format_json(value)
except Exception:
pass
if formatted is None:
formatted = format_python(value)
click.echo(formatted, nl=False)
__builtins__['_'] = value
def cli():
from ..main import pass_env
from .client import ApiClient
from .cache import Cache
from .cache_synchronizer import CacheSynchronizer
@click.command()
@pass_env
def cli(env):
'''Start a Python REPL with a Kubernetes API client object.'''
try:
import readline
except Exception:
pass
import code
old_displayhook = sys.displayhook
sys.displayhook = displayhook
try:
with ApiClient(env) as api:
context = {
'api': api,
'Cache': Cache,
'CacheSynchronizer': CacheSynchronizer,
}
shell = code.InteractiveConsole(context)
shell.interact()
finally:
sys.displayhook = old_displayhook
close_kernel()
return cli
|
StarcoderdataPython
|
9766747
|
<gh_stars>0
# this file came from https://www.caktusgroup.com/blog/2013/06/26/media-root-and-django-tests/
# used to delete all the media files created after a test run
import os
import shutil
from django.conf import settings
from django.test.runner import DiscoverRunner
class TempMediaMixin(object):
"""
Mixin to create MEDIA_ROOT in temp and tear down when complete.
"""
def setup_test_environment(self):
"""
Create temp directory and update MEDIA_ROOT and default storage.
"""
super(TempMediaMixin, self).setup_test_environment()
def teardown_test_environment(self):
"""
Delete temp storage.
"""
super(TempMediaMixin, self).teardown_test_environment()
if os.path.isdir(settings.MEDIA_ROOT):
shutil.rmtree(settings.MEDIA_ROOT)
class TemporaryMediaTestSuiteRunner(TempMediaMixin, DiscoverRunner):
"""
Local test suite runner.
"""
|
StarcoderdataPython
|
1756096
|
<filename>main/py-set-symmetric-difference-operation/py-set-symmetric-difference-operation.py<gh_stars>0
def input_set():
raw_input() # ignore n
return set(map(int, raw_input().split()))
print len(input_set() ^ input_set())
|
StarcoderdataPython
|
11315081
|
<reponame>kzborisov/Juliany-Pizza
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from juliany_pizza.menu.models import Category, Ingredient, Product, Stock
UserModel = get_user_model()
class TestCartViews(TestCase):
def setUp(self):
UserModel.objects.create(username='admin')
category = Category.objects.create(name='django')
Ingredient.objects.create(name='test-ingredient')
product = Product.objects.create(
name='test-product',
active=True,
category=category,
)
product_2 = Product.objects.create(
name='test-product-2',
active=True,
category=category,
)
Stock.objects.create(
size='L',
price=10,
product=product,
)
Stock.objects.create(
size='Xl',
price=2,
product=product_2,
)
# Add data to the session
self.client.post(
reverse('cart add'),
{
'itemId': 1,
'action': 'post'
},
xhr=True,
)
self.client.post(
reverse('cart add'),
{
'itemId': 2,
'action': 'post'
},
xhr=True,
)
def test_cart_url(self):
"""
Test cart summary url.
"""
response = self.client.get(
reverse('cart summary'),
)
self.assertEqual(response.status_code, 200)
def test_cart_add(self):
"""
Test add to cart.
"""
response = self.client.post(
reverse('cart add'),
{
'itemId': 1,
'action': 'post'
},
xhr=True,
)
self.assertEqual(
response.json(),
{
'qty': 3,
'subtotal': '22.00',
}
)
response = self.client.post(
reverse('cart add'),
{
'itemId': 2,
'action': 'post'
},
xhr=True,
)
self.assertEqual(
response.json(),
{
'qty': 4,
'subtotal': '24.00',
}
)
def test_cart_delete(self):
response = self.client.post(
reverse('cart delete'),
{
'itemId': 2,
'action': 'post'
},
xhr=True,
)
self.assertEqual(
response.json(),
{
'subtotal': '10.00',
'qty': 1,
'final_price': '12.00',
}
)
def test_cart_update(self):
response = self.client.post(
reverse('cart update'),
{
'itemId': 2,
'itemQty': 3,
'action': 'post'
},
xhr=True,
)
self.assertEqual(
response.json(),
{
'subtotal': '16.00',
'qty': 4,
'final_price': '18.00',
'item_total_price': '6.00',
}
)
|
StarcoderdataPython
|
6542217
|
# coding=utf-8
"""
The MIT License
Copyright (c) 2013 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
"""
Test to meausure the performance of merging and sorting trends
with two different code.
"""
from collections import defaultdict
import time
def mergeAndSortTrends(trends):
mergedList = []
for t in trends:
found = False
for m in mergedList:
if t.name == m.name:
m.time += t.time
found = True
break
if not found:
mergedList.append(t)
mergedList = sorted(mergedList, key=lambda trend: trend.time, reverse=True)
results = []
for t in mergedList:
results.append({"name": t.name, "value": t.time})
return results
def groupSumAndSortTrends(trends):
totals = defaultdict(int)
for trend in trends:
totals[trend.name] += trend.time
trends = [{'name': key, 'value': value} for key, value in totals.items()]
return sorted(trends, key=lambda x: x['value'], reverse=True)
class Trend:
def __init__(self, name, time):
self.name = name
self.time = time
# Test starts
trends = []
for i in range(1000000):
trends.append(Trend(str(i % 100), i))
start_time = time.time()
# Method 1
trends = groupSumAndSortTrends(trends)
# Method 2
#trends = mergeAndSortTrends(trends)
print("--- %s seconds ---" % (time.time() - start_time))
print trends
|
StarcoderdataPython
|
9696609
|
<filename>test_f_strings.py
import datetime
import decimal
import unittest
from f_strings import f
class TestFStrings(unittest.TestCase):
def test_fred(self):
name = 'Fred'
age = 50
anniversary = datetime.date(1991, 10, 12)
self.assertEqual(
f('My name is {name}, my age next year is {age+1}, '
'my anniversary is {anniversary:%A, %B %d, %Y}.'),
'My name is Fred, my age next year is 51, '
'my anniversary is Saturday, October 12, 1991.')
self.assertEqual(
f('He said his name is {name!r}.'),
"He said his name is 'Fred'.")
def test_decimal(self):
width = 10
precision = 4
value = decimal.Decimal('12.34567')
self.assertEqual(
f('result: {value:{width}.{precision}}'),
'result: 12.35')
def test_empty(self):
with self.assertRaises(ValueError):
f('{}')
with self.assertRaises(ValueError):
f('{ }')
|
StarcoderdataPython
|
11384650
|
<reponame>styx-dev/pystyx
import json
from typing import Callable, Dict
from munch import munchify
# Adapted from this response in Stackoverflow
# http://stackoverflow.com/a/19053800/1072990
def _to_camel_case(snake_str):
components = snake_str.split("_")
# We capitalize the first letter of each component except the first one
# with the 'capitalize' method and join them together.
return components[0] + "".join(x.capitalize() if x else "_" for x in components[1:])
class TomlFunction:
_functions: Dict[str, Callable] = {}
@staticmethod
def _parse_functions(functions_toml):
declared_functions = set(functions_toml.functions)
decorated_functions = set(TomlFunction._functions.keys())
extra_declared_functions = declared_functions - decorated_functions
extra_decorated_functions = decorated_functions - declared_functions
if extra_declared_functions:
functions = ", ".join(
sorted(func_name for func_name in extra_declared_functions)
)
msg = f"Found extra functions in functions.styx that were not defined!\nFunction names were: {functions}"
raise TypeError(msg)
if extra_decorated_functions:
functions = ", ".join(
sorted(func_name for func_name in extra_decorated_functions)
)
msg = f"Found extra functions decorated with @toml_function that were not declared in functions.styx!\nFunction names were: {functions}"
raise TypeError(msg)
return TomlFunction._functions
@staticmethod
def parse_functions(functions_toml):
if hasattr(functions_toml, "functions"):
if not isinstance(functions_toml.functions, list):
raise TypeError(
"functions.styx was malformed. 'functions' key must be a list."
)
return TomlFunction._parse_functions(functions_toml)
else:
raise TypeError("functions.styx was malformed. No 'functions' list found.")
class styx_function:
function: Callable
_functions = {}
def __init__(self, function: Callable):
self.function = function
function_name = function.__name__
if function_name in TomlFunction._functions:
raise RuntimeError(
f"Duplicate name found in toml_functions: {function_name}"
)
TomlFunction._functions[function_name] = function
def __call__(self, *args, **kwargs):
return self.function(*args, **kwargs)
@styx_function
def to_camel_case(snake_str):
return _to_camel_case(snake_str)
@styx_function
def parse_json(s):
return munchify(json.loads(s))
@styx_function
def parse_bool(s):
return s.lower() in ("true", "1", "t", "y", "yes")
|
StarcoderdataPython
|
4838062
|
<filename>neutron_taas/services/taas/drivers/linux/ovs_taas.py<gh_stars>10-100
# Copyright (C) 2015 Ericsson AB
# Copyright (c) 2015 Gigamon
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.agent.common import ovs_lib
from neutron.agent.linux import utils
from neutron.conf.agent import common
from neutron_lib import constants as n_consts
from neutron_taas.services.taas.agents.extensions import taas as taas_base
import neutron_taas.services.taas.drivers.linux.ovs_constants \
as taas_ovs_consts
import neutron_taas.services.taas.drivers.linux.ovs_utils as taas_ovs_utils
from oslo_config import cfg
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
TaaS_DRIVER_NAME = 'Taas OVS driver'
class OVSBridge_tap_extension(ovs_lib.OVSBridge):
def __init__(self, br_name, root_helper):
super(OVSBridge_tap_extension, self).__init__(br_name)
class OvsTaasDriver(taas_base.TaasAgentDriver):
def __init__(self):
super(OvsTaasDriver, self).__init__()
LOG.debug("Initializing Taas OVS Driver")
self.agent_api = None
self.root_helper = common.get_root_helper(cfg.CONF)
def initialize(self):
self.int_br = self.agent_api.request_int_br()
self.tun_br = self.agent_api.request_tun_br()
self.tap_br = OVSBridge_tap_extension('br-tap', self.root_helper)
# Prepare OVS bridges for TaaS
self.setup_ovs_bridges()
# Setup key-value manager for ingress BCMC flows
self.bcmc_kvm = taas_ovs_utils.key_value_mgr(4096)
def periodic_tasks(self, args=None):
#
# Regenerate the flow in br-tun's TAAS_SEND_FLOOD table
# to ensure all existing tunnel ports are included.
#
self.update_tunnel_flood_flow()
def setup_ovs_bridges(self):
#
# br-int : Integration Bridge
# br-tap : Tap Bridge
# br-tun : Tunnel Bridge
#
# Create br-tap
self.tap_br.create()
# Connect br-tap to br-int and br-tun
self.int_br.add_patch_port('patch-int-tap', 'patch-tap-int')
self.tap_br.add_patch_port('patch-tap-int', 'patch-int-tap')
self.tun_br.add_patch_port('patch-tun-tap', 'patch-tap-tun')
self.tap_br.add_patch_port('patch-tap-tun', 'patch-tun-tap')
# Get patch port IDs
patch_tap_int_id = self.tap_br.get_port_ofport('patch-tap-int')
patch_tap_tun_id = self.tap_br.get_port_ofport('patch-tap-tun')
patch_tun_tap_id = self.tun_br.get_port_ofport('patch-tun-tap')
# Purge all existing Taas flows from br-tap and br-tun
self.tap_br.delete_flows(table=0)
self.tap_br.delete_flows(table=taas_ovs_consts.TAAS_RECV_LOC)
self.tap_br.delete_flows(table=taas_ovs_consts.TAAS_RECV_REM)
self.tun_br.delete_flows(table=0,
in_port=patch_tun_tap_id)
self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_SEND_UCAST)
self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_SEND_FLOOD)
self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_CLASSIFY)
self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_DST_CHECK)
self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_SRC_CHECK)
self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_DST_RESPOND)
self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_SRC_RESPOND)
#
# Configure standard TaaS flows in br-tap
#
self.tap_br.add_flow(table=0,
priority=1,
in_port=patch_tap_int_id,
actions="resubmit(,%s)" %
taas_ovs_consts.TAAS_RECV_LOC)
self.tap_br.add_flow(table=0,
priority=1,
in_port=patch_tap_tun_id,
actions="resubmit(,%s)" %
taas_ovs_consts.TAAS_RECV_REM)
self.tap_br.add_flow(table=0,
priority=0,
actions="drop")
self.tap_br.add_flow(table=taas_ovs_consts.TAAS_RECV_LOC,
priority=0,
actions="output:%s" % str(patch_tap_tun_id))
self.tap_br.add_flow(table=taas_ovs_consts.TAAS_RECV_REM,
priority=0,
actions="drop")
#
# Configure standard Taas flows in br-tun
#
self.tun_br.add_flow(table=0,
priority=1,
in_port=patch_tun_tap_id,
actions="resubmit(,%s)" %
taas_ovs_consts.TAAS_SEND_UCAST)
self.tun_br.add_flow(table=taas_ovs_consts.TAAS_SEND_UCAST,
priority=0,
actions="resubmit(,%s)" %
taas_ovs_consts.TAAS_SEND_FLOOD)
flow_action = self._create_tunnel_flood_flow_action()
if flow_action != "":
self.tun_br.add_flow(table=taas_ovs_consts.TAAS_SEND_FLOOD,
priority=0,
actions=flow_action)
self.tun_br.add_flow(table=taas_ovs_consts.TAAS_CLASSIFY,
priority=2,
reg0=0,
actions="resubmit(,%s)" %
taas_ovs_consts.TAAS_DST_CHECK)
self.tun_br.add_flow(table=taas_ovs_consts.TAAS_CLASSIFY,
priority=1,
reg0=1,
actions="resubmit(,%s)" %
taas_ovs_consts.TAAS_DST_CHECK)
self.tun_br.add_flow(table=taas_ovs_consts.TAAS_CLASSIFY,
priority=1,
reg0=2,
actions="resubmit(,%s)" %
taas_ovs_consts.TAAS_SRC_CHECK)
self.tun_br.add_flow(table=taas_ovs_consts.TAAS_DST_CHECK,
priority=0,
actions="drop")
self.tun_br.add_flow(table=taas_ovs_consts.TAAS_SRC_CHECK,
priority=0,
actions="drop")
self.tun_br.add_flow(table=taas_ovs_consts.TAAS_DST_RESPOND,
priority=2,
reg0=0,
actions="output:%s" % str(patch_tun_tap_id))
self.tun_br.add_flow(table=taas_ovs_consts.TAAS_DST_RESPOND,
priority=1,
reg0=1,
actions=(
"output:%s,"
"move:NXM_OF_VLAN_TCI[0..11]->NXM_NX_TUN_ID"
"[0..11],mod_vlan_vid:2,output:in_port" %
str(patch_tun_tap_id)))
self.tun_br.add_flow(table=taas_ovs_consts.TAAS_SRC_RESPOND,
priority=1,
actions=(
"learn(table=%s,hard_timeout=60,"
"priority=1,NXM_OF_VLAN_TCI[0..11],"
"load:NXM_OF_VLAN_TCI[0..11]->NXM_NX_TUN_ID"
"[0..11],load:0->NXM_OF_VLAN_TCI[0..11],"
"output:NXM_OF_IN_PORT[])" %
taas_ovs_consts.TAAS_SEND_UCAST))
return
def consume_api(self, agent_api):
self.agent_api = agent_api
def create_tap_service(self, tap_service):
taas_id = tap_service['taas_id']
port = tap_service['port']
# Get OVS port id for tap service port
ovs_port = self.int_br.get_vif_port_by_id(port['id'])
ovs_port_id = ovs_port.ofport
# Get VLAN id for tap service port
port_dict = self.int_br.get_port_tag_dict()
port_vlan_id = port_dict[ovs_port.port_name]
# Get patch port IDs
patch_int_tap_id = self.int_br.get_port_ofport('patch-int-tap')
patch_tap_int_id = self.tap_br.get_port_ofport('patch-tap-int')
# Add flow(s) in br-int
self.int_br.add_flow(table=0,
priority=25,
in_port=patch_int_tap_id,
dl_vlan=taas_id,
actions="mod_vlan_vid:%s,output:%s" %
(str(port_vlan_id), str(ovs_port_id)))
# Add flow(s) in br-tap
self.tap_br.add_flow(table=taas_ovs_consts.TAAS_RECV_LOC,
priority=1,
dl_vlan=taas_id,
actions="output:in_port")
self.tap_br.add_flow(table=taas_ovs_consts.TAAS_RECV_REM,
priority=1,
dl_vlan=taas_id,
actions="output:%s" % str(patch_tap_int_id))
# Add flow(s) in br-tun
for tunnel_type in n_consts.TUNNEL_NETWORK_TYPES:
self.tun_br.add_flow(table=n_consts.TUN_TABLE[tunnel_type],
priority=1,
tun_id=taas_id,
actions=(
"move:NXM_OF_VLAN_TCI[0..11]->"
"NXM_NX_REG0[0..11],move:NXM_NX_TUN_ID"
"[0..11]->NXM_OF_VLAN_TCI[0..11],"
"resubmit(,%s)" %
taas_ovs_consts.TAAS_CLASSIFY))
self.tun_br.add_flow(table=taas_ovs_consts.TAAS_DST_CHECK,
priority=1,
tun_id=taas_id,
actions="resubmit(,%s)" %
taas_ovs_consts.TAAS_DST_RESPOND)
#
# Disable mac-address learning in the Linux bridge to which
# the OVS port is attached (via the veth pair) if the system
# uses OVSHybridIptablesFirewallDriver (Linux bridge & OVS).
# This will effectively turn the bridge into a hub, ensuring
# that all incoming mirrored traffic reaches the tap interface
# (used for attaching a VM to the bridge) irrespective of the
# destination mac addresses in mirrored packets.
#
# Get hybrid plug info
vif_details = port.get('binding:vif_details')
is_hybrid_plug = vif_details.get('ovs_hybrid_plug')
if is_hybrid_plug:
ovs_port_name = ovs_port.port_name
linux_br_name = ovs_port_name.replace('qvo', 'qbr')
utils.execute(['brctl', 'setageing', linux_br_name, 0],
run_as_root=True, privsep_exec=True)
return
def delete_tap_service(self, tap_service):
taas_id = tap_service['taas_id']
# Get patch port ID
patch_int_tap_id = self.int_br.get_port_ofport('patch-int-tap')
# Delete flow(s) from br-int
self.int_br.delete_flows(table=0,
in_port=patch_int_tap_id,
dl_vlan=taas_id)
# Delete flow(s) from br-tap
self.tap_br.delete_flows(table=taas_ovs_consts.TAAS_RECV_LOC,
dl_vlan=taas_id)
self.tap_br.delete_flows(table=taas_ovs_consts.TAAS_RECV_REM,
dl_vlan=taas_id)
# Delete flow(s) from br-tun
for tunnel_type in n_consts.TUNNEL_NETWORK_TYPES:
self.tun_br.delete_flows(table=n_consts.TUN_TABLE[tunnel_type],
tun_id=taas_id)
self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_DST_CHECK,
tun_id=taas_id)
self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_SRC_CHECK,
tun_id=taas_id)
return
def create_tap_flow(self, tap_flow):
taas_id = tap_flow['taas_id']
port = tap_flow['port']
direction = tap_flow['tap_flow']['direction']
# Get OVS port id for tap flow port
ovs_port = self.int_br.get_vif_port_by_id(port['id'])
ovs_port_id = ovs_port.ofport
# Get patch port ID
patch_int_tap_id = self.int_br.get_port_ofport('patch-int-tap')
# Add flow(s) in br-int
if direction == 'OUT' or direction == 'BOTH':
self.int_br.add_flow(table=0,
priority=20,
in_port=ovs_port_id,
actions="normal,mod_vlan_vid:%s,output:%s" %
(str(taas_id), str(patch_int_tap_id)))
if direction == 'IN' or direction == 'BOTH':
port_mac = tap_flow['port_mac']
#
# Note: The ingress side flow (for unicast traffic) should
# include a check for the 'VLAN id of the Neutron
# network the port belongs to' + 'MAC address of the
# port', to comply with the requirement that port MAC
# addresses are unique only within a Neutron network.
# Unfortunately, at the moment there is no clean way
# to implement such a check, given OVS's handling of
# VLAN tags and Neutron's use of the NORMAL action in
# br-int.
#
# We are therefore temporarily disabling the VLAN id
# check until a mechanism is available to implement
# it correctly. The {broad,multi}cast flow, which is
# also dependent on the VLAN id, has been disabled
# for the same reason.
#
# Get VLAN id for tap flow port
# port_dict = self.int_br.get_port_tag_dict()
# port_vlan_id = port_dict[ovs_port.port_name]
self.int_br.add_flow(table=0,
priority=20,
# dl_vlan=port_vlan_id,
dl_dst=port_mac,
actions="normal,mod_vlan_vid:%s,output:%s" %
(str(taas_id), str(patch_int_tap_id)))
# self._add_update_ingress_bcmc_flow(port_vlan_id,
# taas_id,
# patch_int_tap_id)
# Add flow(s) in br-tun
for tunnel_type in n_consts.TUNNEL_NETWORK_TYPES:
self.tun_br.add_flow(table=n_consts.TUN_TABLE[tunnel_type],
priority=1,
tun_id=taas_id,
actions=(
"move:NXM_OF_VLAN_TCI[0..11]->"
"NXM_NX_REG0[0..11],move:NXM_NX_TUN_ID"
"[0..11]->NXM_OF_VLAN_TCI[0..11],"
"resubmit(,%s)" %
taas_ovs_consts.TAAS_CLASSIFY))
self.tun_br.add_flow(table=taas_ovs_consts.TAAS_SRC_CHECK,
priority=1,
tun_id=taas_id,
actions="resubmit(,%s)" %
taas_ovs_consts.TAAS_SRC_RESPOND)
return
def delete_tap_flow(self, tap_flow):
port = tap_flow['port']
direction = tap_flow['tap_flow']['direction']
# Get OVS port id for tap flow port
ovs_port = self.int_br.get_vif_port_by_id(port['id'])
ovs_port_id = ovs_port.ofport
# Delete flow(s) from br-int
if direction == 'OUT' or direction == 'BOTH':
self.int_br.delete_flows(table=0,
in_port=ovs_port_id)
if direction == 'IN' or direction == 'BOTH':
port_mac = tap_flow['port_mac']
#
# The VLAN id related checks have been temporarily disabled.
# Please see comment in create_tap_flow() for details.
#
# taas_id = tap_flow['taas_id']
# Get VLAN id for tap flow port
# port_dict = self.int_br.get_port_tag_dict()
# port_vlan_id = port_dict[ovs_port.port_name]
# Get patch port ID
# patch_int_tap_id = self.int_br.get_port_ofport('patch-int-tap')
self.int_br.delete_flows(table=0,
# dl_vlan=port_vlan_id,
dl_dst=port_mac)
# self._del_update_ingress_bcmc_flow(port_vlan_id,
# taas_id,
# patch_int_tap_id)
return
def update_tunnel_flood_flow(self):
flow_action = self._create_tunnel_flood_flow_action()
if flow_action != "":
self.tun_br.mod_flow(table=taas_ovs_consts.TAAS_SEND_FLOOD,
actions=flow_action)
def _create_tunnel_flood_flow_action(self):
args = ["ovs-vsctl", "list-ports", "br-tun"]
res = utils.execute(args, run_as_root=True, privsep_exec=True)
port_name_list = res.splitlines()
flow_action = ("move:NXM_OF_VLAN_TCI[0..11]->NXM_NX_TUN_ID[0..11],"
"mod_vlan_vid:1")
tunnel_ports_exist = False
for port_name in port_name_list:
if (port_name != 'patch-int') and (port_name != 'patch-tun-tap'):
flow_action += (",output:%d" %
self.tun_br.get_port_ofport(port_name))
tunnel_ports_exist = True
if tunnel_ports_exist:
return flow_action
else:
return ""
def _create_ingress_bcmc_flow_action(self, taas_id_list, out_port_id):
flow_action = "normal"
for taas_id in taas_id_list:
flow_action += (",mod_vlan_vid:%d,output:%d" %
(taas_id, out_port_id))
return flow_action
#
# Adds or updates a special flow in br-int to mirror (duplicate and
# redirect to 'out_port_id') all ingress broadcast/multicast traffic,
# associated with a VLAN, to possibly multiple tap service instances.
#
def _add_update_ingress_bcmc_flow(self, vlan_id, taas_id, out_port_id):
# Add a tap service instance affiliation with VLAN
self.bcmc_kvm.affiliate(vlan_id, taas_id)
# Find all tap service instances affiliated with VLAN
taas_id_list = self.bcmc_kvm.list_affiliations(vlan_id)
#
# Add/update flow to mirror ingress BCMC traffic, associated
# with VLAN, to all affiliated tap-service instances.
#
flow_action = self._create_ingress_bcmc_flow_action(taas_id_list,
out_port_id)
self.int_br.add_flow(table=0,
priority=20,
dl_vlan=vlan_id,
dl_dst="01:00:00:00:00:00/01:00:00:00:00:00",
actions=flow_action)
return
#
# Removes or updates a special flow in br-int to mirror (duplicate
# and redirect to 'out_port_id') all ingress broadcast/multicast
# traffic, associated with a VLAN, to possibly multiple tap-service
# instances.
#
def _del_update_ingress_bcmc_flow(self, vlan_id, taas_id, out_port_id):
# Remove a tap-service instance affiliation with VLAN
self.bcmc_kvm.unaffiliate(vlan_id, taas_id)
# Find all tap-service instances affiliated with VLAN
taas_id_list = self.bcmc_kvm.list_affiliations(vlan_id)
#
# If there are tap service instances affiliated with VLAN, update
# the flow to mirror ingress BCMC traffic, associated with VLAN,
# to all of them. Otherwise, remove the flow.
#
if taas_id_list:
flow_action = self._create_ingress_bcmc_flow_action(taas_id_list,
out_port_id)
self.int_br.add_flow(table=0,
priority=20,
dl_vlan=vlan_id,
dl_dst="01:00:00:00:00:00/01:00:00:00:00:00",
actions=flow_action)
else:
self.int_br.delete_flows(table=0,
dl_vlan=vlan_id,
dl_dst=("01:00:00:00:00:00/"
"01:00:00:00:00:00"))
return
|
StarcoderdataPython
|
5096024
|
<reponame>MateusPsi/erp-Core-N400<filename>N400 ERP CORE_1.py
#!/usr/bin/env python
# coding: utf-8
# ---
# title: ERP Core N400 preprocessing in MNE-Python
# date: 2021-02-25
# image:
# preview_only: true
# tags:
# - Python
# - EEG
# - Preprocessing
# categories:
# - Python
# - EEG
# - English
# summary: "Replicating Matlab's ERP CORE pipeline in MNE-Python."
# copyright_license:
# enable: true
#
# ---
# <NAME> is one of the greatest authorities in EEG data processing and analysis. His lab has lauched the ERP Core database with how-to preprocess and get some of the most studied ERP waveforms in cognitive neuroscience.
#
# Luck's pipelines are available as Matlab scripts to be run with [EEGLAB](https://sccn.ucsd.edu/eeglab/index.php) and [ERPLAB](https://erpinfo.org/erplab) toolboxes. Here, I adapt the N400 ERP pipeline to [MNE-Python](https://mne.tools/stable/index.html), an open-source alternative to conduct EEG analyses. The idea is to show how MNE-Python works while replicating the pipeline proposed in ERP CORE. The idea is not to teach EEG preprocessing, but I hope this material can help people considering to switch their EEG analyses from MATLAB to Python and also newcomers.
#
# I will walkthrough each of N400 ERP CORE's scripts. In this first part, I will cover all the proprocessing steps. I'll always show do the steps in the data of a single participant and finish with a loop to run all subjects. ERP CORE Data and scripts can be found [here](https://osf.io/thsqg/) at OSF.
# ## Load libraries
#
# Before we start, we load the libraries we will use, libraries are similar to MATLAB's toolboxes. The most important is MNE-Python, of course. But we also load useful libraries to work with data in general: NumPy and Pandas.
# In[1]:
# set working directory
import os
os.chdir('D:\\EEGdata\\Erp CORE\\N400')
import mne
# import some methods directly so we can call them by name
from mne.io import read_raw_eeglab, read_raw
from mne.channels import read_dig_polhemus_isotrak, read_custom_montage
from mne.preprocessing import read_ica
import numpy as np
import pandas as pd
# graphics definitions for jupyter lab
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'qt')
get_ipython().run_line_magic('gui', 'qt')
# ## Individual-Subject EEG and ERP Processing Procedures
# ### Script 1: load, reference, downsample, montage and filter
# To start: load data, identify events (or "triggers"), downsample data do 256Hz, change reference to mastoids and create H/VEOG channels.
#
# In MNE, events are loaded as annotations. Annotations are identifiers associated to the data. They can be useful to show experiment sections in plots and to mark noisy segments in the data.
# In[2]:
subj = 1
raw_name = '{0}\\{0}_N400.set'.format(subj)
raw = read_raw_eeglab(raw_name, preload = True)
#Annotations. Uncomment if you want to see the structure
#set(raw.annotations.description)
#set(raw.annotations.duration)
#raw.annotations.onset
#Shift the stimulus event codes forward in time to account for the LCD monitor delay
#(26 ms on our monitor, as measured with a photosensor)
raw.annotations.onset = raw.annotations.onset+.026
# Create events, we'll need them later to make our epochs.
events, event_dict = mne.events_from_annotations(raw)
raw.events = events
#Downsample from the recorded sampling rate of 1024 Hz to 256 Hz to speed data processing
raw, events = raw.resample(256, events = events)
#Rereference to the average of P9 and P10
raw = raw.set_eeg_reference(['P9','P10'])
#Create HEOG channel...
heog_info = mne.create_info(['HEOG'], 256, "eog")
heog_data = raw['HEOG_left'][0]-raw['HEOG_right'][0]
heog_raw = mne.io.RawArray(heog_data, heog_info)
#...and VOEG
veog_info = mne.create_info(['VEOG'], 256, "eog")
veog_data = raw['VEOG_lower'][0]-raw['FP2'][0]
veog_raw = mne.io.RawArray(heog_data, veog_info)
#Append them to the data
raw.add_channels([heog_raw, veog_raw],True)
# Let's take a look at the data at this stage:
# In[3]:
raw.plot(start = 14) # 'start' here is just to get to a not so messy data period
# The original script sets electrodes positions with a function that checks present channels in the data and maps them to a layout with all possible positions. We will have to do the same by hand, since that is not the way things work in MNE. Usually, the data will already provide the right montage or you'll be able to use one of the many montages available in MNE ([documentation here](https://mne.tools/stable/auto_tutorials/intro/plot_40_sensor_locations.html)).
#
# Here, we create a montage with the `make_dig_montage`. The steps are:
#
# 1. load a montage with all possible positions given by ERP CORE
# 2. correct the names of Fp channels
# 3. make a dictionary of channel positions getting them from the montage
# 4. create the final montage with the dictionary and the fiducial positions from the original montage
# 5. add montage to the raw data object
# In[18]:
# 1 - load montage with all possible possitions
montage = read_custom_montage('standard_10_5_cap385.elp')
# 2 - correct FP names
raw.rename_channels(dict(FP1 = 'Fp1', FP2 = 'Fp2'))
# 3 - make dict of channel positions
ch_positions = dict()
for ch in raw.ch_names:
if not (ch in ['VEOG_lower', 'HEOG_right', 'HEOG_left']):
ch_index = montage.ch_names.index(ch)+3
ch_positions.update({ch : montage.dig[ch_index]['r']})
# 4 - create montage with really occuring channels in our data
montage = mne.channels.make_dig_montage(ch_positions,
nasion = montage.dig[1]['r'],
lpa = montage.dig[0]['r'],
rpa = montage.dig[2]['r'])
# 5 add it to the raw object
raw.set_montage(montage, on_missing='ignore')
# Let's take a look how it turned out:
# In[36]:
fig, ax = plt.subplots(figsize = (4,3))
raw.plot_sensors(show_names = True, show = False, axes = ax)
fig
# Everything seems to be in its right place. The only exceptions are the eog channels, which appear at the side because they do not have a mapped position. That is no problem, since we won't use them for any topographic operations.
#
# The only thing left to do in this first steps of preprocessing is filter the data with a high-pass filter.
# In[20]:
raw.filter(l_freq = 0.1, h_freq = None)
# At the end of this first script, data looks like this:
# In[21]:
raw.plot(start = 14)
# Run for all cases and save intermediate file:
# In[ ]:
# Set path you want to save data
raw_path = "D:/EEGdata/Erp CORE/N400/mne/raw/"
for subj in range(1,41):
raw_name = '{0}\\{0}_N400.set'.format(subj)
raw = read_raw_eeglab(raw_name, preload = True)
#Shift the stimulus event codes forward in time to account for the LCD monitor delay
#(26 ms on our monitor, as measured with a photosensor)
raw.annotations.onset = raw.annotations.onset+.026
# Create events, we'll need them to make our epochs.
events, event_dict = mne.events_from_annotations(raw)
raw.events = events
#Downsample from the recorded sampling rate of 1024 Hz to 256 Hz to speed data processing
raw, events = raw.resample(256, events = events)
#Rereference to the average of P9 and P10
raw = raw.set_eeg_reference(['P9','P10'])
#Create HEOG channel...
heog_info = mne.create_info(['HEOG'], 256, "eog")
heog_data = raw['HEOG_left'][0]-raw['HEOG_right'][0]
heog_raw = mne.io.RawArray(heog_data, heog_info)
#...and VOEG
veog_info = mne.create_info(['VEOG'], 256, "eog")
veog_data = raw['VEOG_lower'][0]-raw['FP2'][0]
veog_raw = mne.io.RawArray(veog_data, veog_info)
#Append them to the data
raw.add_channels([heog_raw, veog_raw],True)
#Create and set montage
montage = read_custom_montage('standard_10_5_cap385.elp')
ch_positions = dict()
raw.rename_channels(dict(FP1 = 'Fp1', FP2 = 'Fp2'))
for ch in raw.ch_names:
if not (ch in ['VEOG_lower', 'HEOG_right', 'HEOG_left']):
ch_index = montage.ch_names.index(ch)+3
ch_positions.update({ch : montage.dig[ch_index]['r']})
montage = mne.channels.make_dig_montage(ch_positions,
nasion = montage.dig[1]['r'],
lpa = montage.dig[0]['r'],
rpa = montage.dig[2]['r'])
raw.set_montage(montage, on_missing='ignore')
raw.filter(0.1, None, n_jobs = 6)
raw_name = "N400_ERP_CORE_{}-raw.fif".format(subj)
#raw.save(raw_path+raw_name, overwrite=True)
# At the next post, we will get to the artifact rejection steps of preprocessing. Stay tuned!
|
StarcoderdataPython
|
8157739
|
import os
import re
import time
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
from nba_api.stats.endpoints import leaguestandings
scatter_vals = ['Team', 'Average Age', 'Wins', 'Losses', 'Pythagorean Wins', 'Pythagorean Losses',
'Margin of Victory', 'Strength of Schedule', 'Simple Rating System', 'Offensive Rating',
'Defensive Rating', 'Net Rating', 'Pace', 'Free Throw Attempt Rate', '3 Point Attempt Rate',
'True Shooting Percentage', 'Effective Field Goal Percentage', 'Turnover Percentage',
'Offensive Rebound Percentage', 'Free Throws Per Field Goal Attempt',
'Effective Field Goal Percentage Allowed', 'Opponent Turnover Percentage',
'Defensive Rebound Pecentage', 'Opponent Free Throws Per Field Goal Attempt', 'Attendance',
'Attendance Per Game']
def scatter_data(season):
html = requests.get(f'http://www.basketball-reference.com/leagues/NBA_{int(season) + 1}.html').content
time.sleep(1)
cleaned_soup = BeautifulSoup(re.sub(rb"<!--|-->",rb"", html), features='lxml')
misc_table = cleaned_soup.find('table', {'id':'advanced-team'})
df = pd.read_html(str(misc_table))[0]
df.columns = df.columns.get_level_values(1)
df['Team'] = df['Team'].apply(lambda x: x if x[-1] != '*' else x[:-1])
df = df.drop(['Rk', 'Arena', 'Unnamed: 27_level_1', 'Unnamed: 17_level_1', 'Unnamed: 22_level_1'], axis=1).copy()
df.columns = scatter_vals
df = df[df['Team'] != 'League Average']
df[['Wins', 'Losses']] = df[['Wins', 'Losses']].astype(int)
return df
|
StarcoderdataPython
|
4935570
|
import pextant.backend_app.events.event_definitions as event_definitions
import socket
import selectors
import traceback
from pextant.backend_app.app_component import AppComponent
from pextant.backend_app.events.event_dispatcher import EventDispatcher
from pextant.backend_app.client_server.client_data_stream_handler import ClientDataStreamHandler, SocketClosedException
class Server(AppComponent):
"""A simple server used for accepting client connections and handling subsequent communication"""
'''=======================================
FIELDS
======================================='''
# consts
CONNECTION_ACCEPT_SERVER_DATA = "SERVER_SOCKET"
# properties
@property
def is_listening(self):
return self.server_socket is not None
'''=======================================
STARTUP/SHUTDOWN
======================================='''
def __init__(self, host_name, host_port, manager):
super().__init__(manager)
# create selector (object for handling socket events on multiple socket connections)
self.selector = selectors.DefaultSelector()
# store server information, reference to socket
self.server_address = (host_name, host_port)
self.server_socket = None
# store information about all clients that connect
self.connected_client_handlers = {}
def close(self):
super().close()
self.stop_listening()
self.selector.close()
def start_listening(self):
# if we don't already have an active listening socket
if self.server_socket is None:
# create the server socket
self.server_socket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_socket.bind(self.server_address)
self.server_socket.listen()
print(f"listening for clients at {self.server_address[0]}:{self.server_address[1]}")
# register with selector (read only - only job is to accept connections)
self.selector.register(self.server_socket, selectors.EVENT_READ, data=Server.CONNECTION_ACCEPT_SERVER_DATA)
def stop_listening(self):
# close all client connections
self._close_all_client_sockets()
# if we have a connection accept socket
if self.server_socket:
# shut it down
self.selector.unregister(self.server_socket)
self.server_socket.close()
self.server_socket = None
print("listening socket closed")
'''=======================================
UPDATES
======================================='''
def update(self, delta_time):
super().update(delta_time)
# if we have no server socket, do nothing
if self.server_socket is None:
return
# get all events that are currently ready
events = self.selector.select(timeout=0)
for key, mask in events:
# check to see if socket is our connection server
if key.data == Server.CONNECTION_ACCEPT_SERVER_DATA:
# all this thing does is accept connections
self._accept_pending_connection()
# otherwise... (one of our connected, peer-to-peer sockets)
else:
# have the event handler process the event
client_socket = key.fileobj
client_event_handler = self.connected_client_handlers[client_socket]
try:
client_event_handler.process_events(mask)
# if client socket closes, just close on our end
except SocketClosedException as e:
print("SocketClosedException:", e)
self._close_client_socket(client_socket)
# some other exception - print it out
except Exception as e: # RuntimeError or ValueError
print(
"main: error: exception for",
f"{client_event_handler.address}:\n{traceback.format_exc()}",
)
self._close_client_socket(client_socket)
'''=======================================
CONNECTED CLIENTS
======================================='''
def send_message_to_client(self, client_socket, msg):
# if client is in list of connected
if client_socket in self.connected_client_handlers:
# get handler and send a message
client_event_handler = self.connected_client_handlers[client_socket]
client_event_handler.enqueue_message(msg)
def send_message_to_all_clients(self, msg):
# for each connected client
for client_socket in self.connected_client_handlers.keys():
# send the message
self.send_message_to_client(client_socket, msg)
def _accept_pending_connection(self):
# accept connection
client_socket, address = self.server_socket.accept()
# register with our selector
client_event_handler = ClientDataStreamHandler(self.selector, client_socket, address)
events = selectors.EVENT_READ # | selectors.EVENT_WRITE
self.selector.register(client_socket, events)
# add to container of connected clients
self.connected_client_handlers[client_socket] = client_event_handler
# dispatch event
EventDispatcher.instance().trigger_event(event_definitions.CLIENT_CONNECTED, client_socket, address)
def _close_client_socket(self, client_socket):
# close the handler (will close socket) and remove from container
if client_socket in self.connected_client_handlers:
client_event_handler = self.connected_client_handlers[client_socket]
del self.connected_client_handlers[client_socket]
client_event_handler.close()
def _close_all_client_sockets(self):
# close all handlers
for client_event_handler in self.connected_client_handlers.values():
client_event_handler.close()
# clear the container
self.connected_client_handlers.clear()
|
StarcoderdataPython
|
4950531
|
<filename>Advanced/Exercises/Multi_Dimentional_Lists_Exercise_1/2_diagonal_difference.py
# Write a program that finds the difference between the sums of the square matrix diagonals (absolute value).
# On the first line, you will receive an integer N - the size of a square matrix.
# The following N lines hold the values for each column - N numbers separated by a single space.
# Print the absolute difference between the primary and the secondary diagonal sums
result = 0
size = int(input())
for i in range(size):
row = [int(x) for x in input().split()]
result += row[i] -row[-i-1]
print(abs(result))
|
StarcoderdataPython
|
319643
|
# example of a very simple Python script
# you can run this from inside TextPad: http://www.atug.com/andypatterns/textpad_and_python.htm
# you first need to install Python for Windows of course: http://www.python.org/
# for Python reference and help: http://safari.oreilly.com/JVXSL.asp
# this imports useful Python libraries
import re,os,sys,string,time
top_dir = 'y:/best_study_ever/analysis/'
subjects_list = [1,2,3,4,5,12,234]
[batch_file_list] = ['prestats','stats','poststats']
for subject in subjects_list:
# this appends the text 'subjectN' to the top directory, formating the subject number N to have leading 0s
subject_dir = top_dir+'subject%003d'%subject
# change to subject directory
os.chdir(subject_dir)
# now run all three batch files
for batch_file in batch_file_list:
batch_file_name = batch_file+'.bat'
# run the batch file
os.system(batch_file_name)
|
StarcoderdataPython
|
11360967
|
# -*- coding: utf-8 -*-
"""Raspa input plugin."""
import os
from shutil import copyfile, copytree
from aiida.orm import Dict, FolderData, List, RemoteData, SinglefileData
from aiida.common import CalcInfo, CodeInfo, InputValidationError
#from aiida.cmdline.utils import echo
from aiida.engine import CalcJob
from aiida.plugins import DataFactory
from aiida_raspa.utils import RaspaInput
# data objects
CifData = DataFactory('cif') # pylint: disable=invalid-name
class RaspaCalculation(CalcJob):
"""This is a RaspaCalculation, subclass of CalcJob, to prepare input for RASPA code.
For information on RASPA, refer to: https://github.com/iraspa/raspa2.
"""
# Defaults
INPUT_FILE = 'simulation.input'
OUTPUT_FOLDER = 'Output'
RESTART_FOLDER = 'Restart'
PROJECT_NAME = 'aiida'
DEFAULT_PARSER = 'raspa'
@classmethod
def define(cls, spec):
super().define(spec)
#Input parameters
spec.input('parameters', valid_type=Dict, required=True, help='Input parameters')
spec.input_namespace('framework', valid_type=CifData, required=False, dynamic=True, help='Input framework(s)')
spec.input_namespace('block_pocket',
valid_type=SinglefileData,
required=False,
dynamic=True,
help='Zeo++ block pocket file')
spec.input_namespace('file',
valid_type=SinglefileData,
required=False,
dynamic=True,
help='Additional input file(s)')
spec.input('settings', valid_type=Dict, required=False, help='Additional input parameters')
spec.input('parent_folder',
valid_type=RemoteData,
required=False,
help='Remote folder used to continue the same simulation stating from the binary restarts.')
spec.input('retrieved_parent_folder',
valid_type=FolderData,
required=False,
help='To use an old calculation as a starting poing for a new one.')
spec.inputs['metadata']['options']['parser_name'].default = cls.DEFAULT_PARSER
spec.inputs['metadata']['options']['resources'].default = {
'num_machines': 1,
'num_mpiprocs_per_machine': 1,
'num_cores_per_mpiproc': 1,
}
spec.inputs['metadata']['options']['withmpi'].default = False
# Output parameters
spec.output('output_parameters', valid_type=Dict, required=True, help="The results of a calculation")
spec.output('warnings', valid_type=List, required=False, help="Warnings that appeared during the calculation")
# Exit codes
spec.exit_code(100,
'ERROR_NO_RETRIEVED_FOLDER',
message='The retrieved folder data node could not be accessed.')
spec.exit_code(101, 'ERROR_NO_OUTPUT_FILE', message='The retrieved folder does not contain an output file.')
spec.exit_code(102,
'ERROR_SIMULATION_DID_NOT_START',
message='The output does not contain "Starting simulation".')
spec.exit_code(500, 'TIMEOUT', message='The calculation could not be completed due to the lack of time.')
# Default output node
spec.default_output_node = 'output_parameters'
# --------------------------------------------------------------------------
# pylint: disable = too-many-locals
def prepare_for_submission(self, folder):
"""
This is the routine to be called when you want to create
the input files and related stuff with a plugin.
:param folder: a aiida.common.folders.Folder subclass where
the plugin should put all its files.
"""
# create calc info
calcinfo = CalcInfo()
calcinfo.remote_copy_list = []
calcinfo.local_copy_list = []
# initialize input parameters
inp = RaspaInput(self.inputs.parameters.get_dict())
# keep order of systems in the extras
self.node.set_extra('system_order', inp.system_order)
# handle framework(s) and/or box(es)
if "System" in inp.params:
self._handle_system_section(inp.params["System"], folder)
# handle restart
if 'retrieved_parent_folder' in self.inputs:
self._handle_retrieved_parent_folder(inp, folder)
inp.params['GeneralSettings']['RestartFile'] = True
# handle binary restart
if 'parent_folder' in self.inputs:
inp.params['GeneralSettings']['ContinueAfterCrash'] = True
calcinfo.remote_copy_list.append((self.inputs.parent_folder.computer.uuid,
os.path.join(self.inputs.parent_folder.get_remote_path(),
'CrashRestart'), 'CrashRestart'))
# get settings
if 'settings' in self.inputs:
settings = self.inputs.settings.get_dict()
else:
settings = {}
# write raspa input file
with open(folder.get_abs_path(self.INPUT_FILE), "w") as fobj:
fobj.write(inp.render())
# create code info
codeinfo = CodeInfo()
codeinfo.cmdline_params = settings.pop('cmdline', []) + [self.INPUT_FILE]
codeinfo.code_uuid = self.inputs.code.uuid
calcinfo.stdin_name = self.INPUT_FILE
calcinfo.uuid = self.uuid
calcinfo.cmdline_params = codeinfo.cmdline_params
calcinfo.stdin_name = self.INPUT_FILE
#calcinfo.stdout_name = self.OUTPUT_FILE
calcinfo.codes_info = [codeinfo]
# file lists
if 'file' in self.inputs:
for fobj in self.inputs.file.values():
calcinfo.local_copy_list.append((fobj.uuid, fobj.filename, fobj.filename))
# block pockets
if 'block_pocket' in self.inputs:
for name, fobj in self.inputs.block_pocket.items():
calcinfo.local_copy_list.append((fobj.uuid, fobj.filename, name + '.block'))
calcinfo.retrieve_list = [self.OUTPUT_FOLDER, self.RESTART_FOLDER]
calcinfo.retrieve_list += settings.pop('additional_retrieve_list', [])
# check for left over settings
if settings:
raise InputValidationError("The following keys have been found " +
"in the settings input node {}, ".format(self.pk) + "but were not understood: " +
",".join(list(settings.keys())))
return calcinfo
def _handle_system_section(self, system_dict, folder):
"""Handle framework(s) and/or box(es)."""
for name, sparams in system_dict.items():
if sparams["type"] == "Framework":
try:
self.inputs.framework[name].export(folder.get_abs_path(name + '.cif'), fileformat='cif')
except KeyError:
raise InputValidationError(
"You specified '{}' framework in the input dictionary, but did not provide the input "
"framework with the same name".format(name))
def _handle_retrieved_parent_folder(self, inp, folder):
"""Enable restart from the retrieved folder."""
if "Restart" not in self.inputs.retrieved_parent_folder._repository.list_object_names(): # pylint: disable=protected-access
raise InputValidationError("Restart was requested but the restart "
"folder was not found in the previos calculation.")
dest_folder = folder.get_abs_path("RestartInitial")
# we first copy the whole restart folder
copytree(
os.path.join(self.inputs.retrieved_parent_folder._repository._get_base_folder().abspath, "Restart"), # pylint: disable=protected-access
dest_folder)
# once this is done, we rename the files to match temperature, pressure and number of unit cells
for i_system, system_name in enumerate(inp.system_order):
system = inp.params["System"][system_name]
current_folder = folder.get_abs_path("RestartInitial/System_{}".format(i_system))
content = os.listdir(current_folder)
if len(content) != 1:
raise InputValidationError("Restart folder should contain 1 file only, got {}".format(len(content)))
old_fname = content[0]
if system["type"] == "Box":
system_or_box = "Box"
(n_x, n_y, n_z) = (1, 1, 1)
if 'ExternalPressure' not in system:
system['ExternalPressure'] = 0
elif system["type"] == "Framework":
system_or_box = system_name
try:
(n_x, n_y, n_z) = tuple(map(int, system['UnitCells'].split()))
except KeyError:
(n_x, n_y, n_z) = 1, 1, 1
external_pressure = system['ExternalPressure'] if 'ExternalPressure' in system else 0
new_fname = "restart_{:s}_{:d}.{:d}.{:d}_{:f}_{:g}".format(system_or_box, n_x, n_y, n_z,
system['ExternalTemperature'], external_pressure)
os.rename(os.path.join(current_folder, old_fname), os.path.join(current_folder, new_fname))
|
StarcoderdataPython
|
6414970
|
<filename>probs/continuous/gamma.py<gh_stars>0
from dataclasses import dataclass
from scipy.stats import gamma # type: ignore[import]
from probs.continuous.rv import ContinuousRV
@dataclass(eq=False)
class Gamma(ContinuousRV):
"""
The gamma distribution is a two-parameter family of continuous probability
distributions. The exponential distribution, Erlang distribution, and
chi-squared distribution are special cases of the gamma distribution.
The parameterization with k and θ appears to be more common in econometrics
and certain other applied fields, where for example the gamma distribution
is frequently used to model waiting times.
The parameterization with α and β is more common in Bayesian statistics,
where the gamma distribution is used as a conjugate prior distribution for
various types of inverse scale (rate) parameters, such as the λ (rate) of
an exponential distribution or of a Poisson distribution.
https://en.wikipedia.org/wiki/Gamma_distribution
"""
alpha: float = 1
beta: float = 1
def __post_init__(self) -> None:
if self.alpha < 0 or self.beta < 0:
raise ValueError("α and β must be greater than 0.")
def __str__(self) -> str:
return f"Gamma(α={self.alpha}, β={self.beta})"
def median(self) -> float:
# No simple closed form
raise NotImplementedError
def mode(self) -> float:
return (self.alpha - 1) / self.beta
def expectation(self) -> float:
return self.alpha / self.beta
def variance(self) -> float:
return self.alpha / self.beta ** 2
def pdf(self, x: float) -> float:
# TODO this is incorrect - missing self.beta
return float(gamma.pdf(x, self.alpha))
def cdf(self, x: float) -> float:
# TODO this is incorrect - missing self.beta
return float(gamma.cdf(x, self.alpha))
|
StarcoderdataPython
|
1800438
|
"""
Simple example showing how to control a GPIO pin from the ULP coprocessor.
The GPIO port is configured to be attached to the RTC module, and then set
to OUTPUT mode. To avoid re-initializing the GPIO on every wakeup, a magic
token gets set in memory.
After every change of state, the ULP is put back to sleep again until the
next wakeup. The ULP wakes up every 500ms to change the state of the GPIO
pin. An LED attached to the GPIO pin would toggle on and off every 500ms.
The end of the python script has a loop to show the value of the magic token
and the current state, so you can confirm the magic token gets set and watch
the state value changing. If the loop is stopped (Ctrl-C), the LED attached
to the GPIO pin continues to blink, because the ULP runs independently from
the main processor.
"""
from esp32 import ULP
from machine import mem32
from esp32_ulp import src_to_binary
source = """\
# constants from:
# https://github.com/espressif/esp-idf/blob/1cb31e5/components/soc/esp32/include/soc/soc.h
#define DR_REG_RTCIO_BASE 0x3ff48400
# constants from:
# https://github.com/espressif/esp-idf/blob/1cb31e5/components/soc/esp32/include/soc/rtc_io_reg.h
#define RTC_IO_TOUCH_PAD2_REG (DR_REG_RTCIO_BASE + 0x9c)
#define RTC_IO_TOUCH_PAD2_MUX_SEL_M (BIT(19))
#define RTC_GPIO_OUT_REG (DR_REG_RTCIO_BASE + 0x0)
#define RTC_GPIO_ENABLE_W1TS_REG (DR_REG_RTCIO_BASE + 0x10)
#define RTC_GPIO_ENABLE_W1TC_REG (DR_REG_RTCIO_BASE + 0x14)
#define RTC_GPIO_ENABLE_W1TS_S 14
#define RTC_GPIO_ENABLE_W1TC_S 14
#define RTC_GPIO_OUT_DATA_S 14
# constants from:
# https://github.com/espressif/esp-idf/blob/1cb31e5/components/soc/esp32/include/soc/rtc_io_channel.h
#define RTCIO_GPIO2_CHANNEL 12
# When accessed from the RTC module (ULP) GPIOs need to be addressed by their channel number
.set gpio, RTCIO_GPIO2_CHANNEL
.set token, 0xcafe # magic token
.text
magic: .long 0
state: .long 0
.global entry
entry:
# load magic flag
move r0, magic
ld r1, r0, 0
# test if we have initialised already
sub r1, r1, token
jump after_init, eq # jump if magic == token (note: "eq" means the last instruction (sub) resulted in 0)
init:
# connect GPIO to ULP (0: GPIO connected to digital GPIO module, 1: GPIO connected to analog RTC module)
WRITE_RTC_REG(RTC_IO_TOUCH_PAD2_REG, RTC_IO_TOUCH_PAD2_MUX_SEL_M, 1, 1);
# GPIO shall be output, not input
WRITE_RTC_REG(RTC_GPIO_OUT_REG, RTC_GPIO_OUT_DATA_S + gpio, 1, 1);
# store that we're done with initialisation
move r0, magic
move r1, token
st r1, r0, 0
after_init:
move r1, state
ld r0, r1, 0
move r2, 1
sub r0, r2, r0 # toggle state
st r0, r1, 0 # store updated state
jumpr on, 0, gt # if r0 (state) > 0, jump to 'on'
jump off # else jump to 'off'
on:
# turn on led (set GPIO)
WRITE_RTC_REG(RTC_GPIO_ENABLE_W1TS_REG, RTC_GPIO_ENABLE_W1TS_S + gpio, 1, 1)
jump exit
off:
# turn off led (clear GPIO)
WRITE_RTC_REG(RTC_GPIO_ENABLE_W1TC_REG, RTC_GPIO_ENABLE_W1TC_S + gpio, 1, 1)
jump exit
exit:
halt # go back to sleep until next wakeup period
"""
binary = src_to_binary(source)
load_addr, entry_addr = 0, 8
ULP_MEM_BASE = 0x50000000
ULP_DATA_MASK = 0xffff # ULP data is only in lower 16 bits
ulp = ULP()
ulp.set_wakeup_period(0, 500000) # use timer0, wakeup after 500000usec (0.5s)
ulp.load_binary(load_addr, binary)
ulp.run(entry_addr)
while True:
print(hex(mem32[ULP_MEM_BASE + load_addr] & ULP_DATA_MASK), # magic token
hex(mem32[ULP_MEM_BASE + load_addr + 4] & ULP_DATA_MASK) # current state
)
|
StarcoderdataPython
|
9727915
|
from django.contrib import admin
from django.urls import path, include
from . import views
urlpatterns = [
path('',views.password_reset,name='password_reset'),
path('send_password_reset_mail',views.send_password_reset_mail,name='send_password_reset_mail'),
]
|
StarcoderdataPython
|
11348602
|
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
import onnx
from onnx import shape_inference
import warnings
from onnx_tf.backend import prepare
import numpy as np
# %%
def stride_print(input):
tensor = input.flatten().tolist()
length = len(tensor)
size = 20
stride = length//size
if stride == 0:
stride = 1
size = length // stride
nums = []
for i in range(0, size):
item = tensor[i * stride]
# nums.append(str(i * stride) + ": " + str(item))
nums.append(str(item))
print(nums)
# for i in range(0, size):
# item = tensor[i * stride]
# print ("{} ".format(item),end="")
# %%
diff_threadhold = 0.05
def compare(input):
stride_print(input)
tensor = input.flatten().tolist()
length = len(tensor)
size = 20
stride = length//size
if stride == 0:
stride = 1
size = length // stride
nums = []
for i in range(0, size):
item = tensor[i * stride]
# nums.append(str(i * stride) + ": " + str(item))
nums.append(item)
for i in range(0, size):
right_v = nums[i]
paddle_v = float(input_paddle[i])
if (abs(right_v-paddle_v) > diff_threadhold):
print("err at {} {} {} ".format(i, right_v, paddle_v))
# %%
model = onnx.load("v18_7_6_2_leakyReLU_rgb_mask_test_t2.onnx")
onnx.checker.check_model(model)
inferred_model = shape_inference.infer_shapes(model)
# %%
model.graph.output.extend(inferred_model.graph.value_info)
# %%
warnings.filterwarnings('ignore')
tfm = prepare(model)
# input = np.fromfile('input', dtype=np.float32).reshape(1, 3, 256, 256)
input = np.loadtxt('./input_1_3_256_256',
dtype=np.float32).reshape(1, 3, 256, 256)
res = tfm.run(input)
# %%
input_paddle = "0.53125 0.549316 0.558594 0.677246 0.470703 0.634766 0.540039 0.566406 0.495605 0.597168 0.602539 0.480957 0.448486 0.553711 0.474365 0.612793 0.609863 0.518555 0.617188 0.505371 0.504395".split(
" ")
compare(res["mask"])
# %%
input_paddle = "0.245117 -0.222656 0.0887451 0.803711 0.639648 0.0995483 0.807129 -0.224609 -0.267578 0.33667 0.372559 -0.353516 0.343262 0.549805 0.344971 0.503906 0.152466 -0.0531616 0.0315247 -0.0397034 -0.218262".split(
" ")
compare(res["rgb"])
|
StarcoderdataPython
|
8107705
|
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter1d
from matplotlib.pyplot import figure
def make_line_plot(save_to_folder: str, list_of_data_points, xlabel, ylabel, title, add_avg_line=False, sigma=0, all_xticks=False, custom_width=False, width=0):
if custom_width:
figure(num=None, figsize=(width, 6), dpi=300, facecolor='w', edgecolor='k')
else:
figure(num=None, figsize=(8, 6), dpi=300, facecolor='w', edgecolor='k')
if add_avg_line:
x = range(1, len(list_of_data_points) + 1)
y = gaussian_filter1d(list_of_data_points, sigma=sigma)
plt.plot(x, y, '--')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
x2 = range(1, len(list_of_data_points) + 1)
y2 = list_of_data_points
if all_xticks:
plt.xticks(x2)
plt.plot(x2, y2)
plt.grid()
file_name = str(title + '.jpg').replace(' ', '_')
plt.savefig(save_to_folder + file_name, dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, pad_inches=1, metadata=None)
return file_name
|
StarcoderdataPython
|
1984550
|
# encoding=utf-8
import tensorflow as tf
import numpy as np
time_steps = 12
channel_size = 3
embedding_size = 64
embedding_fn_size = 312
filter_num = 8
filter_sizes = [1, 3, 5]
threshold = 0.5
class CnnModel(object):
def __init__(self, init_learning_rate, decay_steps, decay_rate):
weights = {
'wc1': tf.Variable(tf.truncated_normal([filter_sizes[0], channel_size, filter_num], stddev=0.1)),
'wc2': tf.Variable(
tf.truncated_normal([filter_sizes[1], channel_size, filter_num], stddev=0.1)),
'wc3': tf.Variable(
tf.truncated_normal([filter_sizes[2], channel_size, filter_num], stddev=0.1))
}
biases = {
'bc1': tf.Variable(tf.truncated_normal([filter_num], stddev=0.1)),
'bc2': tf.Variable(tf.truncated_normal([filter_num], stddev=0.1)),
'bc3': tf.Variable(tf.truncated_normal([filter_num], stddev=0.1))
}
global_step = tf.Variable(0)
learning_rate = tf.train.exponential_decay(init_learning_rate, global_step, decay_steps, decay_rate,
staircase=True)
# define placeholder
self.x = tf.placeholder(tf.float32, [None, channel_size, time_steps])
x_emb = tf.transpose(self.x, [0, 2, 1]) # [None,time_steps,embedding_size]
self.e = tf.placeholder(tf.float32, [None, embedding_size])
self.y = tf.placeholder(tf.int32, [None, 1])
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
ones = tf.ones_like(self.y)
zeros = tf.zeros_like(self.y)
with tf.name_scope("FN_Part"):
output_fn = tf.layers.dense(self.e, embedding_fn_size, activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.1))
with tf.name_scope("CNN_Part"):
x_convs = self.multi_conv(x_emb, weights, biases)
x_convs = tf.reshape(x_convs, [-1, 3 * filter_num])
with tf.name_scope("Output_Part"):
concate_v = tf.concat([output_fn, x_convs], axis=1)
weight_last = tf.Variable(
tf.truncated_normal([3 * filter_num + embedding_fn_size, 1]) * np.sqrt(2. / (3 * filter_num)))
bias_last = tf.Variable(tf.truncated_normal([1], stddev=0.1))
concate_v = tf.nn.dropout(concate_v, self.dropout_keep_prob)
logits_cnn = tf.matmul(concate_v, weight_last) + bias_last
self.loss_cnn = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.cast(self.y, tf.float32), logits=logits_cnn))
self.optimizer_cnn = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.loss_cnn,
global_step=global_step)
self.logits_pred = tf.nn.sigmoid(logits_cnn)
self.prediction_cnn = tf.cast(tf.where(tf.greater(self.logits_pred, threshold), ones, zeros), tf.int32)
self.accuracy = tf.reduce_mean(tf.cast(tf.equal(self.prediction_cnn, self.y), tf.float32))
def conv1d(sef, x, W, b):
x = tf.reshape(x, shape=[-1, time_steps, channel_size])
x = tf.nn.conv1d(x, W, 1, padding='SAME')
x = tf.nn.bias_add(x, b)
# shape=(n,time_steps,filter_num)
h = tf.nn.relu(x)
pooled = tf.reduce_max(h, axis=1)
return pooled
def multi_conv(self, x, weights, biases):
# Convolution Layer
conv1 = self.conv1d(x, weights['wc1'], biases['bc1'])
conv2 = self.conv1d(x, weights['wc2'], biases['bc2'])
conv3 = self.conv1d(x, weights['wc3'], biases['bc3'])
# n*time_steps*(3*filter_num)
convs = tf.concat([conv1, conv2, conv3], 1)
return convs
|
StarcoderdataPython
|
6635672
|
<filename>molmodmt/sequence.py<gh_stars>0
from molmodmt import convert as _convert
from molmodmt import select as _select
def sequence_alignment(ref_item=None, item=None, engine='biopython', prettyprint=False,
prettyprint_alignment_index = 0, **kwards):
alignment = None
if engine=='biopython':
# from ensembler.modeling.align_target_template
# (https://github.com/choderalab/ensembler/blob/master/ensembler/modeling.py)
from Bio.pairwise2 import align as _biopython_align
from Bio.SubsMat import MatrixInfo as _biopython_MatrixInfo
tmp_ref_seq=_convert(ref_item,'biopython.Seq')
tmp_seq=_convert(item,'biopython.Seq')
matrix = getattr(_biopython_MatrixInfo,'gonnet')
gap_open=-10
gap_extend=-0.5
alignment = _biopython_align.globalds(tmp_ref_seq, tmp_seq, matrix, gap_open, gap_extend)
del(_biopython_MatrixInfo,_biopython_align,matrix,gap_open,gap_extend,tmp_ref_seq,tmp_seq)
elif engine=='modeller':
raise NotImplementedError
else:
raise NotImplementedError
if prettyprint:
textredbold = '\033[1;31;48m' # Red bold text
textbluebold = '\033[1;34;48m' # Green bold text
endcolor = '\033[m' # reset color
# Color guide in: http://ozzmaker.com/add-colour-to-text-in-python/
aln = alignment
align = prettyprint_alignment_index
seq1 =""
seq2 =""
for r in range(len(aln[align][0])):
res1 = aln[align][0][r]
res2 = aln[align][1][r]
if res1 == res2:
seq1+=res1
seq2+=res2
elif res1 == '-':
seq1+=res1
seq2+=textbluebold+res2+endcolor
elif res2 == '-':
seq1+=textbluebold+res1+endcolor
seq2+=res2
else:
seq1+=textredbold+res1+endcolor
seq2+=textredbold+res2+endcolor
print(seq1)
print()
print(seq2)
pass
else:
return alignment
def structure_alignment(ref_item=None, item=None,
ref_selection_alignment='all', selection_alignment='all',
ref_selection_rmsd=None, selection_rmsd='backbone',
ref_frame_index=0, frame_indices='all',
parallel=True,
engine_sequence_alignment = 'biopython',
engine_least_rmsd_fit = 'mdtraj',
syntaxis='mdtraj'):
from .rmsd import least_rmsd_fit as _least_rmsd_fit
from .multitool import extract as _extract
from .multitool import select as _select
if ref_selection_rmsd is None:
ref_selection_rmsd = selection_rmsd
if engine_sequence_alignment == 'biopython':
is_all_ref_item = False
if type(ref_selection_alignment)==int:
if ref_selection_alignment=='all':
is_all_ref_item =True
if is_all_ref_item:
tmp_ref_item = ref_item
else:
atomslist_ref_alignment = _select(ref_item, ref_selection_alignment)
tmp_ref_item = _extract(ref_item, atomslist_ref_alignment)
is_all_item = False
if type(selection_alignment)==int:
if selection_alignment=='all':
is_all_item =True
if is_all_item:
tmp_item = item
else:
atomslist_alignment = _select(item, selection_alignment)
tmp_item = _extract(item, atomslist_alignment)
idty, intersection_atoms = sequence_identity(tmp_ref_item,tmp_item,
intersection_set="atoms", engine='biopython')
if not is_all_ref_item:
intersection_atoms[0]=atomslist_ref_alignment[intersection_atoms[0]]
if not is_all_item:
intersection_atoms[1]=atomslist_alignment[intersection_atoms[1]]
is_all_ref_item = False
if type(ref_selection_rmsd)==int:
if ref_selection_rmsd=='all':
is_all_ref_item =True
is_all_item = False
if type(selection_rmsd)==int:
if selection_rmsd=='all':
is_all_item =True
if is_all_ref_item:
ref_item_selection = 'index '+" ".join(map(str,intersection_atoms[0]))
else:
ref_item_selection = ref_selection_rmsd+' and index '+\
" ".join(map(str,intersection_atoms[0]))
if is_all_item:
item_selection = 'index '+" ".join(map(str,intersection_atoms[1]))
else:
item_selection = selection_rmsd+' and index '+" ".join(map(str,intersection_atoms[1]))
if engine_least_rmsd_fit == 'mdtraj':
return _least_rmsd_fit(ref_item=ref_item, item=item,
ref_selection=ref_item_selection, selection=item_selection,
ref_frame_index=ref_frame_index, frame_indices=frame_indices,
engine=engine_least_rmsd_fit,
parallel=parallel, syntaxis=syntaxis)
else:
raise NotImplementedError
else:
raise NotImplementedError
def sequence_identity(ref_item=None, item=None, intersection_set=None, engine='biopython', **kwards):
if engine=='biopython':
# This is code from ensembler.modeling.calculate_seq_identity
# This should be implemented here importing the function but there is a conflict installing
# ensembler: ensembler is only available for python 2.7
# (https://github.com/choderalab/ensembler/blob/master/ensembler/modeling.py)
aln = sequence_alignment(ref_item, item, engine)
len_shorter_seq = min([len(aln[0][0].replace('-', '')), len(aln[0][1].replace('-', ''))])
seq_id = 0
intersect_1=[]
intersect_2=[]
ii_1 = 0
ii_2 = 0
for r in range(len(aln[0][0])):
res1 = aln[0][0][r]
res2 = aln[0][1][r]
if res1 == res2:
seq_id += 1
intersect_1.append(ii_1)
intersect_2.append(ii_2)
if res1 != '-':
ii_1+=1
if res2 != '-':
ii_2+=1
seq_id = 100 * float(seq_id) / float(len_shorter_seq)
if intersection_set=='residues':
from .multitool import convert as _convert
tmp_item = _convert(ref_item).topology
set_1 = [tmp_item.residue(ii).resSeq for ii in intersect_1]
tmp_item = _convert(item).topology
set_2 = [tmp_item.residue(ii).resSeq for ii in intersect_2]
del(_convert,tmp_item)
return seq_id, [set_1, set_2]
elif intersection_set=='atoms':
from .multitool import convert as _convert
set_1 = []
tmp_item = _convert(ref_item).topology
for tmp_residue in [tmp_item.residue(ii) for ii in intersect_1]:
for tmp_atom in tmp_residue.atoms:
set_1.append(tmp_atom.index)
tmp_item = _convert(item).topology
set_2 = []
for tmp_residue in [tmp_item.residue(ii) for ii in intersect_2]:
for tmp_atom in tmp_residue.atoms:
set_2.append(tmp_atom.index)
del(_convert,tmp_item,tmp_residue,tmp_atom)
return seq_id, [set_1, set_2]
else:
return seq_id
else:
raise NotImplementedError
|
StarcoderdataPython
|
1678057
|
<filename>Matlab and Python Scripts/PyAppNotes/PyAppNotes/AN24_02.py
# AN24_02 -- FMCW Basics
import Class.Adf24Tx2Rx4 as Adf24Tx2Rx4
from numpy import *
# (1) Connect to Radarbook
# (2) Enable Supply
# (3) Configure RX
# (4) Configure TX
# (5) Start Measurements
# (6) Configure calculation of range profile
#--------------------------------------------------------------------------
# Setup Connection
#--------------------------------------------------------------------------
Brd = Adf24Tx2Rx4.Adf24Tx2Rx4()
Brd.BrdRst()
#--------------------------------------------------------------------------
# Software Version
#--------------------------------------------------------------------------
Brd.BrdDispSwVers()
#--------------------------------------------------------------------------
# Configure Receiver
#--------------------------------------------------------------------------
Brd.RfRxEna()
TxPwr = 100
NrFrms = 4
#--------------------------------------------------------------------------
# Configure Transmitter (Antenna 0 - 3, Pwr 0 - 31)
#--------------------------------------------------------------------------
Brd.RfTxEna(1, TxPwr)
#--------------------------------------------------------------------------
# Configure Up-Chirp
#--------------------------------------------------------------------------
dCfg = {
"fs" : 1.0e6,
"fStrt" : 24.0e9,
"fStop" : 24.25e9,
"TRampUp" : 260/1.0e6,
"Tp" : 300/1.0e6,
"N" : 256,
"StrtIdx" : 0,
"StopIdx" : 2,
"MimoEna" : 0
}
Brd.RfMeas('Adi', dCfg);
fStrt = Brd.Adf_Pll.fStrt
fStop = Brd.Adf_Pll.fStop
TRampUp = Brd.Adf_Pll.TRampUp
DataTx1 = zeros((256*dCfg["StopIdx"]*4, int(NrFrms)))
for Cycles in range(0, int(NrFrms)):
Data = Brd.BrdGetData()
print("FrmCntr: ", Data[0,:])
del Brd
|
StarcoderdataPython
|
6549551
|
<reponame>Addovej/spotiplays<filename>src/conf/__init__.py
from functools import lru_cache
from .settings import Settings
@lru_cache
def get_settings() -> Settings:
return Settings()
settings = get_settings()
__all__ = (
'settings',
)
|
StarcoderdataPython
|
8018478
|
<gh_stars>1-10
'''
Author: <NAME>
Date: 2021-10-02 15:56:57
LastEditTime: 2021-10-06 10:04:04
LastEditors: <NAME>
Description:
FilePath: /INF/INF101/TD/1.3.14.py
'''
n = float(input("Saissez un nombre n"))
tmp = 0
def trouve_nombre_or(range):
table = []
count = 0
table.append(1)
table.append(1)
while True:
tmp = table[count + 1] + table[count]
table.append(tmp)
nombre_or = table[len(table) - 2] / table[len(table) - 1]
print(nombre_or)
if range > abs(nombre_or - 0.618033988749895):
return table, nombre_or
else:
count += 1
table, nombre_or = trouve_nombre_or(n)
print("Derniere Nombre: %s" % table[len(table) - 1])
print(table)
print("Nombre d'Or: %s" % nombre_or)
|
StarcoderdataPython
|
1811567
|
<filename>python/sum-and-prod.py
import numpy
n,m=map(int,input().split())
a=numpy.zeros((n,m),int)
for i in range(n):
a[i]=numpy.array(input().split())
print(numpy.prod(numpy.sum(a,axis=0),axis=None))
|
StarcoderdataPython
|
4811611
|
<reponame>eyalle/python_course<gh_stars>0
import sys, random, time
sys.path.append("../../")
from exercises.data import get_data
data = get_data(5)
users = [{'name': items['name']} for items in data]
hobbies = [
'football',
'strongman',
'mario',
'tekken-3',
'beach',
'soccer',
'bacon',
'pool',
'dodge ball'
]
hobbies2 = [
'star wars',
'star trek',
'mario cart',
'draw',
'manga artist',
'ogre slayer',
'porn star',
'high five guy!',
'idan hakimi'
]
consolidated_hobbies = list(set(hobbies + hobbies2))
print(consolidated_hobbies)
def add_random_hobby(users):
for user in users:
user['hobby'] = random.choice(hobbies)
time.sleep(1)
def count_hobbies(users):
count = {hobby: 0 for hobby in hobbies}
for user in users:
if (user['hobby']):
count[user['hobby']] += 1
elif (user['hobby2']):
count[user['hobby2']] += 1
for hobby in count:
print(hobby, count[hobby])
time.sleep(1)
def add_second_hobby(users):
for user in users:
hobby2 = random.choice(hobbies)
if (hobby2 == user['hobby']):
hobby2 = random.choice(hobbies2)
user['hobby2'] = hobby2
print(f'{users}')
time.sleep(1)
if __name__ == "__main__":
print('\n\nwelcome, adding random hobby')
add_random_hobby(users)
print('\ncounting hobbies..')
count_hobbies(users)
print('\n\nadding second hobby')
add_second_hobby(users)
print('\n\ncounting hobbies, now with seocnd hobby..')
count_hobbies(users)
|
StarcoderdataPython
|
12823853
|
from phactori import *
from paraview.simple import *
#utilities for doing various data grid/geometric operations in parallel
#in particular:
#GetListOfGridPointsNearestListOfPointsV5
# takes a list of geometric points, and goes through all processors and all
# blocks and finds the data grid point which is nearest to each point in the list
#GetListOfCellTestPointsNearestListOfPointsV5
# takes a list of geometric points, and goes through all processors and all
# blocks and all cells in each block and for each geometric point finds the
# cell bounding box center which is nearest to each point in the list
#phactori_combine_to_single_python_file_subpiece_begin_1
def GetGridPointsClosestToPointsInBlockV5(recursionObject, inInputCsData, inParameters):
if PhactoriDbg(100):
myDebugPrint3("GetGridPointsClosestToPointsInBlockV5 entered\n")
numCells = inInputCsData.GetNumberOfCells()
numPoints = inInputCsData.GetNumberOfPoints()
if (numCells == 0) or (numPoints == 0):
#no cells here
if PhactoriDbg(100):
myDebugPrint3("GetGridPointsClosestToPointsInBlockV5 returning (no cells or no points)\n")
return
if PhactoriDbg(100):
myDebugPrint3(str(inParameters.testPointList) + "\n")
myDebugPrint3(str(inParameters.distSqrdList) + "\n")
pointsArray = inInputCsData.GetPoints()
gridPtXyz = [0.0, 0.0, 0.0]
for gridPtNdx in range(0,numPoints):
pointsArray.GetPoint(gridPtNdx, gridPtXyz)
for ptndx, oneTestPt in enumerate(inParameters.testPointList):
testDist = vecDistanceSquared(oneTestPt, gridPtXyz)
if testDist < inParameters.distSqrdList[ptndx]:
inParameters.closestList[ptndx] = list(gridPtXyz)
inParameters.distSqrdList[ptndx] = testDist
if PhactoriDbg(100):
myDebugPrint3(str(inParameters.testPointList) + "\n")
myDebugPrint3(str(inParameters.distSqrdList) + "\n")
if PhactoriDbg(100):
myDebugPrint3("after this block:\n")
for ii, oneGridPoint in enumerate(inParameters.closestList):
myDebugPrint3(str(ii) + ": " + \
str(inParameters.distSqrdList[ii]) + "\n" + \
str(inParameters.testPointList[ii]) + "\n" + str(oneGridPoint))
myDebugPrint3("\n")
if PhactoriDbg(100):
myDebugPrint3("GetGridPointsClosestToPointsInBlockV5 returning\n")
def GetCellsClosestToPointsInBlockV5(recursionObject, inInputCsData, inParameters):
if PhactoriDbg(100):
myDebugPrint3("GetCellsClosestToPointsInBlock entered\n")
numCells = inInputCsData.GetNumberOfCells()
numPoints = inInputCsData.GetNumberOfPoints()
if (numCells == 0) or (numPoints == 0):
#no cells here
if PhactoriDbg(100):
myDebugPrint3("GetCellsClosestToPointsInBlock returning (no cells or no points)\n")
return
if PhactoriDbg(100):
myDebugPrint3(str(inParameters.testPointList) + "\n")
myDebugPrint3(str(inParameters.distSqrdList) + "\n")
for cellIndex in range(0,numCells):
oneCell = inInputCsData.GetCell(cellIndex)
cellTestPoint = GetCellTestPoint(oneCell)
for ptndx, oneTestPt in enumerate(inParameters.testPointList):
testDist = vecDistanceSquared(oneTestPt, cellTestPoint)
if testDist < inParameters.distSqrdList[ptndx]:
inParameters.closestList[ptndx] = cellTestPoint
inParameters.distSqrdList[ptndx] = testDist
if PhactoriDbg(100):
myDebugPrint3(str(inParameters.testPointList) + "\n")
myDebugPrint3(str(inParameters.distSqrdList) + "\n")
if PhactoriDbg(100):
myDebugPrint3("after this block:\n")
for ii, oneCellPoint in enumerate(inParameters.closestList):
myDebugPrint3(str(ii) + ": " + \
str(inParameters.distSqrdList[ii]) + "\n" + \
str(inParameters.testPointList[ii]) + "\n" + str(oneCellPoint))
myDebugPrint3("\n")
if PhactoriDbg(100):
myDebugPrint3("GetCellsClosestToPointsInBlock returning\n")
class GetCellsClosestToPointsInBlockRecursionParamsV5:
def __init__(self):
self.testPointList = []
self.distSqrdList = []
self.closestList = []
def InitializeWithPointList(self, inTestPointList):
self.testPointList = inTestPointList
numTestPoints = len(inTestPointList)
for ii in range(0, numTestPoints):
self.distSqrdList.append(sys.float_info.max)
self.closestList.append(None)
def GetCellsClosestToPointsOnThisProcessFromParaViewFilterV5(inInputFilter, inTestPointList):
if PhactoriDbg(100):
myDebugPrint3("GetCellsClosestToPointsOnThisProcessFromParaViewFilter entered\n")
recursionObj = PhactoriParaviewMultiBlockRecursionControl()
recursionObj.mParameters = GetCellsClosestToPointsInBlockRecursionParamsV5()
recursionObj.mParameters.InitializeWithPointList(inTestPointList)
recursionObj.mOperationToDoPerBlock = GetCellsClosestToPointsInBlockV5
PhactoriRecusivelyDoMethodPerBlockFromParaViewFilter(recursionObj, inInputFilter)
if PhactoriDbg(100):
myDebugPrint3("GetCellsClosestToPointsOnThisProcessFromParaViewFilter returning\n")
return recursionObj.mParameters.closestList, recursionObj.mParameters.distSqrdList
def GetGridPointsClosestToPointsOnThisProcessFromParaViewFilterV5(inInputFilter, inTestPointList):
if PhactoriDbg(100):
myDebugPrint3("GetGridPointsClosestToPointsOnThisProcessFromParaViewFilterV5 entered\n")
recursionObj = PhactoriParaviewMultiBlockRecursionControl()
recursionObj.mParameters = GetCellsClosestToPointsInBlockRecursionParamsV5()
recursionObj.mParameters.InitializeWithPointList(inTestPointList)
recursionObj.mOperationToDoPerBlock = GetGridPointsClosestToPointsInBlockV5
PhactoriRecusivelyDoMethodPerBlockFromParaViewFilter(recursionObj, inInputFilter)
if PhactoriDbg(100):
myDebugPrint3("GetGridPointsClosestToPointsOnThisProcessFromParaViewFilterV5 returning\n")
return recursionObj.mParameters.closestList, recursionObj.mParameters.distSqrdList
def GetPidWithLeastValueListV5(inLocalDistSqrdList):
myPid = int(SmartGetLocalProcessId())
globalDistSqrdList = UseReduceOnFloatList(inLocalDistSqrdList, 1)
localPidList = []
numItems = len(inLocalDistSqrdList)
for ndx in range(0,numItems):
if globalDistSqrdList[ndx] == inLocalDistSqrdList[ndx]:
localPidList.append(myPid)
else:
localPidList.append(-1)
pidWithDataList = UseReduceOnIntegerList(localPidList, 0)
return pidWithDataList, globalDistSqrdList
def UseMpiToGetGlobalCellPointsClosestV5(inInputFilter, inLocalCellPointList, inLocalDistSqrdList):
if PhactoriDbg(100):
myDebugPrint3("PhactoriSegmentCellSampler3.UseMpiToGetGlobalCellPointsClosest entered\n", 100)
if PhactoriDbg(100):
myDebugPrint3("inLocalCellPointList:\n" + str(inLocalCellPointList) + "\ninLocalDistSqrdList:\n" + str(inLocalDistSqrdList) + "\n")
pidWithDataList, globalDistSqrdList = GetPidWithLeastValueListV5(inLocalDistSqrdList)
if PhactoriDbg(100):
myDebugPrint3("pidWithDataList:\n" + str(pidWithDataList) + "\nglobalDistSqrdList:\n" + str(globalDistSqrdList) + "\n")
#convert cell point list to array of doubles and ints, use mpi reduce to share
#the values, then convert back to cell point list
serializeFloatArray = []
serializeIntArray = []
#convert cell point list to array of doubles
cellPointFloatArray = []
myPid = SmartGetLocalProcessId()
for ii, oneCellPoint in enumerate(inLocalCellPointList):
if pidWithDataList[ii] == myPid:
cellPointFloatArray.append(oneCellPoint[0])
cellPointFloatArray.append(oneCellPoint[1])
cellPointFloatArray.append(oneCellPoint[2])
else:
cellPointFloatArray.append(0.0)
cellPointFloatArray.append(0.0)
cellPointFloatArray.append(0.0)
#use mpi reduce to spread array correctly
globalCellPointFloatArray = UseReduceOnFloatList(cellPointFloatArray, 2)
#now create return global cell point list from arrays
numCells = len(inLocalCellPointList)
returnGlobalCellPointList = []
for ii in range(0,numCells):
myndx = ii*3
oneCellPoint = [globalCellPointFloatArray[myndx],
globalCellPointFloatArray[myndx+1],
globalCellPointFloatArray[myndx+2]]
returnGlobalCellPointList.append(oneCellPoint)
if PhactoriDbg(100):
myDebugPrint3("returnGlobalCellPointList:\n" + str(returnGlobalCellPointList) + "\n")
if PhactoriDbg(100):
myDebugPrint3("PhactoriSegmentCellSampler3.UseMpiToGetGlobalCellPointsClosest returning\n", 100)
return returnGlobalCellPointList, globalDistSqrdList
def GetListOfCellTestPointsNearestListOfPointsV5(inInputFilter, pointList):
"""for each point in the list, find the cell test point (e.g. center of
cell bounding box) which is nearest the test point. Use MPI to work
in parallel"""
thisProcessNearestCellPointList, thisProcDistSqrdList = \
GetCellsClosestToPointsOnThisProcessFromParaViewFilterV5(inInputFilter, pointList)
nearestCellList, distanceList = UseMpiToGetGlobalCellPointsClosestV5(
inInputFilter, thisProcessNearestCellPointList, thisProcDistSqrdList)
return nearestCellList
def GetListOfGridPointsNearestListOfPointsV5(inInputFilter, pointList):
"""for each point in the list, find the point in the data grid
which is nearest the test point. Use MPI to work
in parallel"""
thisProcessNearestGridPointList, thisProcDistSqrdList = \
GetGridPointsClosestToPointsOnThisProcessFromParaViewFilterV5(inInputFilter, pointList)
nearestCellList, distanceList = UseMpiToGetGlobalCellPointsClosestV5(
inInputFilter, thisProcessNearestGridPointList, thisProcDistSqrdList)
return nearestCellList
#phactori_combine_to_single_python_file_subpiece_end_1
|
StarcoderdataPython
|
3523114
|
import warnings
import numpy as np
from sklearn.preprocessing import StandardScaler
class SkLearnMixerHelper:
def _input_encoded_columns(self, target_column, when_data_source):
"""
:param when_data_source: is a DataSource object
:return: numpy.nd array input encoded values
"""
input_encoded = None
for column in self.feature_columns[target_column]:
if input_encoded is None:
input_encoded = self._encoded_data([column], when_data_source)
else:
input_encoded = np.append(input_encoded, self._encoded_data([column], when_data_source), axis=1)
return StandardScaler().fit_transform(input_encoded)
def _output_encoded_columns(self, column, data_source):
"""
:param data_source: is a DataSource object
:return: numpy.nd array output encoded values
"""
output_encoded_column = self._encoded_data([column], data_source)
self.output_encoders = data_source.encoders
return output_encoded_column
def _extract_features(self, data_source, model_class, output_encoded_column):
"""
:param data_source: is a DataSource object
:param model_class: type of model to be fitted
:param output_encoded_column: target variable encoded values
:return: numpy.nd array: important input encoded columns
"""
input_encoded_columns = None
feature_columns = []
for column in self.input_column_names:
input_encoded_column = self._encoded_data([column], data_source)
input_encoded_column = StandardScaler().fit_transform(input_encoded_column)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
model = model_class.fit(StandardScaler().fit_transform(input_encoded_column), output_encoded_column)
score = model.score(input_encoded_column, output_encoded_column)
self.feature_models[column] = model
self.feature_importance[column] = score
if score > self.score_threshold:
feature_columns.append(column)
if input_encoded_columns is None:
input_encoded_columns = input_encoded_column
else:
input_encoded_columns = np.append(input_encoded_columns, input_encoded_column, axis=1)
return StandardScaler().fit_transform(input_encoded_columns), feature_columns
@staticmethod
def _encoded_data(features, data_source):
"""
:param features: list of column names
:param data_source: input data
:return encoded_data: numpy.nd array encoded values
"""
for cnt, column in enumerate(features):
if cnt == 0:
encoded_data = data_source.get_encoded_column_data(column).numpy()
else:
encoded_data = np.append(encoded_data, data_source.get_encoded_column_data(column).numpy(), axis=1)
return encoded_data
def _decoded_data(self, features, data_source, encoded_data):
"""
:param features: list : columns to be decoded
:param data_source: is a DataSource object
:param encoded_data: encoded data
:return: decoded data
"""
for column in features:
encoders = self.output_encoders.get(column, None)
if encoders is None:
decoded_data = data_source.get_decoded_column_data(column, encoded_data)
else:
decoded_data = encoders.decode(encoded_data)
return decoded_data
def _determine_model_class(self, column, data_source):
"""
:param column: name of the column
:param data_source: is a DataSource object
:return: model: Model to be considered for fitting data
"""
data_type = data_source.get_column_config(column)['type']
from lightwood.mixers.sk_learn.feature import FeatureFactory
if data_type is not None:
feature = FeatureFactory.create_feature(data_source.get_column_config(column))
model = feature.get_model_class(self.classifier_class, self.regression_class)
return model
|
StarcoderdataPython
|
8075658
|
<gh_stars>1-10
# coding: utf-8
"""
MailSlurp API
MailSlurp is an API for sending and receiving emails from dynamically allocated email addresses. It's designed for developers and QA teams to test applications, process inbound emails, send templated notifications, attachments, and more. ## Resources - [Homepage](https://www.mailslurp.com) - Get an [API KEY](https://app.mailslurp.com/sign-up/) - Generated [SDK Clients](https://www.mailslurp.com/docs/) - [Examples](https://github.com/mailslurp/examples) repository # noqa: E501
The version of the OpenAPI document: 6.5.2
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import mailslurp_client
from mailslurp_client.api.inbox_forwarder_controller_api import InboxForwarderControllerApi # noqa: E501
from mailslurp_client.rest import ApiException
class TestInboxForwarderControllerApi(unittest.TestCase):
"""InboxForwarderControllerApi unit test stubs"""
def setUp(self):
self.api = mailslurp_client.api.inbox_forwarder_controller_api.InboxForwarderControllerApi() # noqa: E501
def tearDown(self):
pass
def test_create_new_inbox_forwarder(self):
"""Test case for create_new_inbox_forwarder
Create an inbox forwarder # noqa: E501
"""
pass
def test_delete_inbox_forwarder(self):
"""Test case for delete_inbox_forwarder
Delete an inbox forwarder # noqa: E501
"""
pass
def test_delete_inbox_forwarders(self):
"""Test case for delete_inbox_forwarders
Delete inbox forwarders # noqa: E501
"""
pass
def test_get_inbox_forwarder(self):
"""Test case for get_inbox_forwarder
Get an inbox forwarder # noqa: E501
"""
pass
def test_get_inbox_forwarders(self):
"""Test case for get_inbox_forwarders
List inbox forwarders # noqa: E501
"""
pass
def test_test_inbox_forwarder(self):
"""Test case for test_inbox_forwarder
Test an inbox forwarder # noqa: E501
"""
pass
def test_test_inbox_forwarders_for_inbox(self):
"""Test case for test_inbox_forwarders_for_inbox
Test inbox forwarders for inbox # noqa: E501
"""
pass
def test_test_new_inbox_forwarder(self):
"""Test case for test_new_inbox_forwarder
Test new inbox forwarder # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
3284294
|
<filename>utils.py
import torch
import torch.nn as nn
import numpy as np
from scipy.interpolate import interp1d
import os
import sys
import random
import config
def upgrade_resolution(arr, scale):
x = np.arange(0, arr.shape[0])
f = interp1d(x, arr, kind='linear', axis=0, fill_value='extrapolate')
scale_x = np.arange(0, arr.shape[0], 1 / scale)
up_scale = f(scale_x)
return up_scale
def get_proposal_oic(tList, wtcam, final_score, c_pred, scale, v_len, sampling_frames, num_segments, _lambda=0.25, gamma=0.2):
t_factor = float(16 * v_len) / (scale * num_segments * sampling_frames)
temp = []
for i in range(len(tList)):
c_temp = []
temp_list = np.array(tList[i])[0]
if temp_list.any():
grouped_temp_list = grouping(temp_list)
for j in range(len(grouped_temp_list)):
inner_score = np.mean(wtcam[grouped_temp_list[j], i, 0])
len_proposal = len(grouped_temp_list[j])
outer_s = max(0, int(grouped_temp_list[j][0] - _lambda * len_proposal))
outer_e = min(int(wtcam.shape[0] - 1), int(grouped_temp_list[j][-1] + _lambda * len_proposal))
outer_temp_list = list(range(outer_s, int(grouped_temp_list[j][0]))) + list(range(int(grouped_temp_list[j][-1] + 1), outer_e + 1))
if len(outer_temp_list) == 0:
outer_score = 0
else:
outer_score = np.mean(wtcam[outer_temp_list, i, 0])
c_score = inner_score - outer_score + gamma * final_score[c_pred[i]]
t_start = grouped_temp_list[j][0] * t_factor
t_end = (grouped_temp_list[j][-1] + 1) * t_factor
c_temp.append([c_pred[i], c_score, t_start, t_end])
temp.append(c_temp)
return temp
def result2json(result):
result_file = []
for i in range(len(result)):
line = {'label': config.class_dict[result[i][0]], 'score': result[i][1],
'segment': [result[i][2], result[i][3]]}
result_file.append(line)
return result_file
def grouping(arr):
return np.split(arr, np.where(np.diff(arr) != 1)[0] + 1)
def save_best_record_thumos(test_info, file_path):
fo = open(file_path, "w")
fo.write("Step: {}\n".format(test_info["step"][-1]))
fo.write("Test_acc: {:.4f}\n".format(test_info["test_acc"][-1]))
fo.write("average_mAP[0.1:0.7]: {:.4f}\n".format(test_info["average_mAP[0.1:0.7]"][-1]))
fo.write("average_mAP[0.1:0.5]: {:.4f}\n".format(test_info["average_mAP[0.1:0.5]"][-1]))
fo.write("average_mAP[0.3:0.7]: {:.4f}\n".format(test_info["average_mAP[0.3:0.7]"][-1]))
tIoU_thresh = np.linspace(0.1, 0.7, 7)
for i in range(len(tIoU_thresh)):
fo.write("mAP@{:.1f}: {:.4f}\n".format(tIoU_thresh[i], test_info["mAP@{:.1f}".format(tIoU_thresh[i])][-1]))
fo.close()
def minmax_norm(act_map, min_val=None, max_val=None):
if min_val is None or max_val is None:
relu = nn.ReLU()
max_val = relu(torch.max(act_map, dim=1)[0])
min_val = relu(torch.min(act_map, dim=1)[0])
delta = max_val - min_val
delta[delta <= 0] = 1
ret = (act_map - min_val) / delta.detach()
ret[ret > 1] = 1
ret[ret < 0] = 0
return ret
def nms(proposals, thresh):
proposals = np.array(proposals)
x1 = proposals[:, 2]
x2 = proposals[:, 3]
scores = proposals[:, 1]
areas = x2 - x1 + 1
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(proposals[i].tolist())
xx1 = np.maximum(x1[i], x1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
inter = np.maximum(0.0, xx2 - xx1 + 1)
iou = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(iou < thresh)[0]
order = order[inds + 1]
return keep
def set_seed(seed):
torch.manual_seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
torch.backends.cudnn.deterministic=True
torch.backends.cudnn.benchmark=False
def save_config(config, file_path):
fo = open(file_path, "w")
fo.write("Configurtaions:\n")
fo.write(str(config))
fo.close()
def feature_sampling(features, start, end, num_divide):
step = (end - start) / num_divide
feature_lst = torch.zeros((num_divide, features.shape[1])).cuda()
for i in range(num_divide):
start_point = int(start + step * i)
end_point = int(start + step * (i+1))
if start_point >= end_point:
end_point += 1
sample_id = np.random.randint(start_point, end_point)
feature_lst[i] = features[sample_id]
return feature_lst.mean(dim=0)
def get_oic_score(cas_sigmoid_fuse, start, end, delta=0.25):
length = end - start + 1
inner_score = torch.mean(cas_sigmoid_fuse[start:end+1])
outer_s = max(0, int(start - delta * length))
outer_e = min(int(cas_sigmoid_fuse.shape[0] - 1), int(end + delta * length))
outer_seg = list(range(outer_s, start)) + list(range(end + 1, outer_e + 1))
if len(outer_seg) == 0:
outer_score = 0
else:
outer_score = torch.mean(cas_sigmoid_fuse[outer_seg])
return inner_score - outer_score
def select_seed(cas_sigmoid_fuse, point_anno):
point_anno_agnostic = point_anno.max(dim=2)[0]
bkg_seed = torch.zeros_like(point_anno_agnostic)
act_seed = point_anno.clone().detach()
act_thresh = 0.1
bkg_thresh = 0.95
bkg_score = cas_sigmoid_fuse[:,:,-1]
for b in range(point_anno.shape[0]):
act_idx = torch.nonzero(point_anno_agnostic[b]).squeeze(1)
""" most left """
if act_idx[0] > 0:
bkg_score_tmp = bkg_score[b,:act_idx[0]]
idx_tmp = bkg_seed[b,:act_idx[0]]
idx_tmp[bkg_score_tmp >= bkg_thresh] = 1
if idx_tmp.sum() >= 1:
start_index = idx_tmp.nonzero().squeeze(1)[-1]
idx_tmp[:start_index] = 1
else:
max_index = bkg_score_tmp.argmax(dim=0)
idx_tmp[:max_index+1] = 1
""" pseudo action point selection """
for j in range(act_idx[0] - 1, -1, -1):
if bkg_score[b][j] <= act_thresh and bkg_seed[b][j] < 1:
act_seed[b, j] = act_seed[b, act_idx[0]]
else:
break
""" most right """
if act_idx[-1] < (point_anno.shape[1] - 1):
bkg_score_tmp = bkg_score[b,act_idx[-1]+1:]
idx_tmp = bkg_seed[b,act_idx[-1]+1:]
idx_tmp[bkg_score_tmp >= bkg_thresh] = 1
if idx_tmp.sum() >= 1:
start_index = idx_tmp.nonzero().squeeze(1)[0]
idx_tmp[start_index:] = 1
else:
max_index = bkg_score_tmp.argmax(dim=0)
idx_tmp[max_index:] = 1
""" pseudo action point selection """
for j in range(act_idx[-1] + 1, point_anno.shape[1]):
if bkg_score[b][j] <= act_thresh and bkg_seed[b][j] < 1:
act_seed[b, j] = act_seed[b, act_idx[-1]]
else:
break
""" between two instances """
for i in range(len(act_idx) - 1):
if act_idx[i+1] - act_idx[i] <= 1:
continue
bkg_score_tmp = bkg_score[b,act_idx[i]+1:act_idx[i+1]]
idx_tmp = bkg_seed[b,act_idx[i]+1:act_idx[i+1]]
idx_tmp[bkg_score_tmp >= bkg_thresh] = 1
if idx_tmp.sum() >= 2:
start_index = idx_tmp.nonzero().squeeze(1)[0]
end_index = idx_tmp.nonzero().squeeze(1)[-1]
idx_tmp[start_index+1:end_index] = 1
else:
max_index = bkg_score_tmp.argmax(dim=0)
idx_tmp[max_index] = 1
""" pseudo action point selection """
for j in range(act_idx[i] + 1, act_idx[i+1]):
if bkg_score[b][j] <= act_thresh and bkg_seed[b][j] < 1:
act_seed[b, j] = act_seed[b, act_idx[i]]
else:
break
for j in range(act_idx[i+1] - 1, act_idx[i], -1):
if bkg_score[b][j] <= act_thresh and bkg_seed[b][j] < 1:
act_seed[b, j] = act_seed[b, act_idx[i+1]]
else:
break
return act_seed, bkg_seed
|
StarcoderdataPython
|
64999
|
#$Id$
class Category:
"""This class is used to create object for category."""
def __init__(self):
"""Initialize parameters for Category."""
self.id = ""
self.name = ""
def set_id(self, id):
"""Set id.
Args:
id(str): Id.
"""
self.id = id
def get_id(self):
"""Get id.
Returns:
str: Id.
"""
return self.id
def set_name(self, name):
"""Set name.
Args:
name(str): name.
"""
self.name = name
def get_name(self):
"""Get name.
Returns:
str: name.
"""
return self.name
|
StarcoderdataPython
|
1703828
|
from __future__ import print_function
import re
import tempfile
from .base import Base
from .hash import HASH_ALGORITHM
from .signature import SIGN_ALGORITHM
_DIRTY_PATH = re.compile('(?:^|/)(\.\.?)(?:/|$)')
class Downloader(Base):
def _validate_entry_path(self, path):
if path.startswith('/'):
raise Exception(
'Unexpected absolute entry in manifest: {}'.format(path))
match = _DIRTY_PATH.search(path)
if match:
if match.group(1) == '.':
raise Exception(
'Manifest entry contains redundant \'./\' segments: {}'
.format(path))
raise Exception(
'Manifest entry contains disallowed \'../\' segments: {}'
.format(path))
def _validate_manifest(self, manifest):
'''Validates the manifest.
Returns list of (hash, mode, path)-tuples for the file entries in the
manifest.
'''
manifest.seek(0)
for hash_algorithm in manifest:
break
else:
raise Exception('Manifest is empty')
if hash_algorithm[:-1].decode() != HASH_ALGORITHM:
raise Exception(
'Unexpected hashing algorithm in manifest: {}'
.format(hash_algorithm[:-1]))
entries = []
for entry in manifest:
if entry == b'---\n':
break
entries.append(entry)
else:
raise Exception('Manifest is not trusted: missing signature')
for sign_algorithm in manifest:
break
else:
raise Exception('Manifest is not trusted: missing signature')
if sign_algorithm[:-1].decode() != SIGN_ALGORITHM:
raise Exception(
'Unexpected signing algorithm in manifest: {}'
.format(sign_algorithm[:-1]))
manifest_hash = self.get_hash([hash_algorithm] + entries)
signature = b''.join(list(manifest))
if not self.verify(manifest_hash, signature):
raise Exception(
'Manifest is not trusted: signature verification failed')
result = []
for entry in entries:
content_hash, mode, path = entry[:-1].split(b' ', 2)
content_hash = content_hash.decode()
mode = int(mode, 8)
path = path.decode()
self._validate_entry_path(path)
result.append((content_hash, mode, path))
return result
def download(self):
with tempfile.SpooledTemporaryFile(max_size=1024*1024) as manifest:
self.download_manifest(manifest)
entries = self._validate_manifest(manifest)
self.ensure_directory(self.directory)
for content_hash, mode, path in entries:
self.download_file(path, mode=mode, content_hash=content_hash)
run = download
|
StarcoderdataPython
|
1881308
|
<filename>auction_api/api/bidder_admin.py
import pymongo
from werkzeug.wrappers.response import Response
import utils.globales as globales
from bson.json_util import dumps
def get_requests_by_status(status):
if status not in ["0","1"]:
return Response("Invalid Status",400)
mongo_cli = pymongo.MongoClient(globales.mongodb_uri)
db = mongo_cli[globales.mongodb_db]
col = db['bidders']
cur = col.find({"status" : int(status)})
result = dumps(cur)
cur.close()
return Response(result,200)
def delete_pending_requests(id_number):
pass
def approve_pending_requests(id_number):
pass
|
StarcoderdataPython
|
1952711
|
numbers = [int(s) for s in input().split(', ')]
def get_positive_numbers(numbers):
return [str(s) for s in numbers if s >= 0]
def get_negative_numbers(numbers):
return [str(s) for s in numbers if s < 0]
def get_odd_numbers(numbers):
return [str(s) for s in numbers if s % 2 != 0]
def get_even_numbers(numbers):
return [str(s) for s in numbers if s % 2 == 0]
def print_result(positive_numbers, negative_numbers, odd_numbers, even_numbers):
print(f'Positive: {", ".join(positive_numbers)}')
print(f'Negative: {", ".join(negative_numbers)}')
print(f'Even: {", ".join(even_numbers)}')
print(f'Odd: {", ".join(odd_numbers)}')
positive_numbers = get_positive_numbers(numbers)
negative_numbers = get_negative_numbers(numbers)
odd_numbers = get_odd_numbers(numbers)
even_numbers = get_even_numbers(numbers)
print_result(positive_numbers, negative_numbers, odd_numbers, even_numbers)
|
StarcoderdataPython
|
11205921
|
import datetime as dt
from collections import Sequence
from pandas_datareader.data import DataReader
from .download_splits_dividends import download_splits_dividends
def download_stock(symbol, start_date=dt.datetime(1990, 1, 1)):
# Download assets.
stock = DataReader(symbol, "yahoo", start_date)
stock.rename(columns={"Adj Close": "Adj. Close"}, inplace=True)
# Compute the adjusted prices.
ratio = stock["Adj. Close"] / stock["Close"]
stock["Adj. Open"] = stock["Open"] * ratio
stock["Adj. High"] = stock["High"] * ratio
stock["Adj. Low"] = stock["Low"] * ratio
stock["Adj. Volume"] = stock["Volume"] * ratio
stock["Split Ratio"] = 1.0
stock["Ex-Dividend"] = 0.0
# Fetch the dividends and splits for this stock. Notice that we restrict the
# dates to lie in the appropriate range.
ds = download_splits_dividends(symbol)
# Store dividend data.
if "DIVIDEND" in ds.index:
divs = ds.ix[["DIVIDEND"]].set_index("datetime")
idx = divs.index.intersection(stock.index)
stock.ix[idx, "Ex-Dividend"] = [
float(x) for x in divs.ix[idx, "adjustment"]
]
# Store stock split data.
if "SPLIT" in ds.index:
splits = ds.ix[["SPLIT"]].set_index("datetime")
splits["adjustment"] = [
float(x.split(":")[0]) / float(x.split(":")[1])
for x in splits["adjustment"]
]
idx = splits.index.intersection(stock.index)
stock.ix[idx, "Split Ratio"] = splits.ix[idx, "adjustment"]
return stock
|
StarcoderdataPython
|
3552544
|
<reponame>ghanigreen/pytest_code
import textwrap
from math import sqrt
from pytest import approx
def magnitude(x, y):
return sqrt(x * x + y * y)
def test_simple_math():
assert abs(0.1 + 0.2) - 0.3 < 0.0001
def test_simple_math2():
assert (-0.1 - 0.2) + 0.3 < 0.0001
def test_approx_simple():
assert 0.1 + 0.2 == approx(0.3)
def test_approx_list():
assert [0.1 + 1.2, 0.2 + 0.8] == approx([1.3, 1.0])
def test_approx_dict():
values = {"v1": 0.1 + 1.2, "v2": 0.2 + 0.8}
assert values == approx(dict(v1=1.3, v2=1.0))
def test_approx_simple_fail():
assert 0.1 + 0.2 == approx(0.35)
def test_approx_list_fail():
assert [0.1 + 1.2, 0.2 + 0.8] == approx([1.3, 1.1])
def test_approx_numpy():
import numpy as np
values = np.array([0.1, 0.2]) + np.array([1.2, 0.8])
assert values == approx(np.array([1.3, 1.0]))
def test_magnitude_plain():
assert abs(magnitude(8.0, 20.0) - 21.540659) < 0.00001
def test_magnitude():
assert magnitude(8.0, 20.0) == approx(21.540659)
def get_default_health(class_name):
assert class_name == "warrior"
return 80
def test_default_health():
health = get_default_health("warrior")
assert health == 95
def get_default_player_class():
return "warrior"
def test_default_player_class():
x = get_default_player_class()
assert x == "sorcerer"
def get_short_class_description(class_name):
assert class_name == "warrior"
return "A battle-hardened veteran, favors heavy armor and weapons."
def test_warrior_short_description():
desc = get_short_class_description("warrior")
assert (
desc
== "A battle-hardened veteran, can equip heavy armor and weapons."
)
def get_long_class_description(class_name):
assert class_name == "warrior"
return textwrap.dedent(
"""\
A seasoned veteran of many battles. High Strength and Dexterity
allow to yield heavy armor and weapons, as well as carry
more equipment while keeping a light roll. Weak in magic.
"""
)
def test_warrior_long_description():
desc = get_long_class_description("warrior")
assert (
desc
== textwrap.dedent(
"""\
A seasoned veteran of many battles. Strength and Dexterity
allow to yield heavy armor and weapons, as well as carry
more equipment. Weak in magic.
"""
)
)
def get_starting_equipment(class_name):
assert class_name == "warrior"
return ["long sword", "warrior set", "shield"]
def test_get_starting_equiment():
expected = ["long sword", "shield"]
assert get_starting_equipment("warrior") == expected
def test_long_list():
x = [str(i) for i in range(100)]
y = [str(i) for i in range(0, 100, 2)]
assert x == y
def get_classes_starting_health():
return {"warrior": 85, "sorcerer": 55, "knight": 95}
def test_starting_health():
expected = {"warrior": 85, "sorcerer": 50}
assert get_classes_starting_health() == expected
def get_player_classes():
return {"warrior", "knight", "sorcerer"}
def test_player_classes():
assert get_player_classes() == {"warrior", "sorcerer"}
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.