id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
1639987 | import pytest
from ciphers import vignere
def test_encrypt():
plaintext ="ATTACKATDAWN"
key = "LEMON"
assert vignere.encrypt(plaintext, key) == "LXFOPVEFRNHR"
def test_decrypt():
ciphertext ="LXFOPVEFRNHR"
key = "LEMON"
assert vignere.decrypt(ciphertext, key) == "ATTACKATDAWN"
def test_encypt_1():
"""
Test w/ keylength of 1, should be identical to a shift of 1
"""
plaintext = "ABCDEFG"
key = "B"
assert vignere.encrypt(plaintext, key) == "BCDEFGH"
def test_decrypt_1():
"""
Test w/ keylength of 1, should be identical to a shift of 1
"""
plaintext = "BCDEFGH"
key = "B"
assert vignere.decrypt(plaintext, key) == "ABCDEFG"
def test_non_alpha_text():
"""
"""
plaintext = "ATTACK AT DAWN"
key = "LEMON"
assert vignere.encrypt(plaintext, key) == "LXFOPV EF RNHR"
assert vignere.decrypt("LXFOPV EF RNHR", key) == plaintext
def test_non_alpha_key():
with pytest.raises(ValueError):
vignere.encrypt("ATTACKATDAWN", "LEMON MELON")
def test_non_cap_text():
"""
"""
plaintext = "attackatdawn"
key = "LEMON"
assert vignere.encrypt(plaintext, key) == "LXFOPVEFRNHR"
assert vignere.decrypt("LXFOPVEFRNHR".lower(), key) == plaintext.upper()
def test_non_cap_key():
"""
"""
plaintext = "ATTACKATDAWN"
key = "lemon"
assert vignere.encrypt(plaintext, key) == "LXFOPVEFRNHR"
assert vignere.decrypt("LXFOPVEFRNHR".lower(), key) == plaintext | StarcoderdataPython |
1776891 | #!/usr/bin/env python
import os
import sys
import unittest
from random import randint
from concurrent.futures import ProcessPoolExecutor, as_completed
pkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) # noqa
sys.path.insert(0, pkg_root) # noqa
from getm.concurrent import SharedBufferArray
def _write(sb_name, chunk_id, chunk_content):
with SharedBufferArray(sb_name) as sb:
sb[chunk_id][:] = chunk_content
class TestSharedBufferArray(unittest.TestCase):
def test_foo(self):
num_chunks, chunk_size = 4, 5
expected = b"".join([os.urandom(chunk_size) for _ in range(num_chunks)])
with SharedBufferArray(chunk_size=chunk_size, num_chunks=num_chunks, create=True) as sb:
with ProcessPoolExecutor(max_workers=num_chunks) as e:
futures = [None] * num_chunks
for i in range(num_chunks):
chunk = expected[i * chunk_size: (i + 1) * chunk_size]
futures[i] = e.submit(_write, sb.name, i, chunk)
for f in as_completed(futures):
f.result()
self.assertEqual(expected, bytes(sb._shared_memory.buf[:num_chunks * chunk_size]))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1696304 | <gh_stars>0
import torch
import cv2
def time_synchronized():
torch.cuda.synchronize() if torch.cuda.is_available() else None
return time.time()
class VideoCamera(object):
def __init__(self):
global res;
self.video = cv2.VideoCapture(sys.argv[1])
res = f"{int(self.video.get(cv2.CAP_PROP_FRAME_WIDTH))} x {int(self.video.get(cv2.CAP_PROP_FRAME_HEIGHT))}"
def __del__(self):
self.video.release()
cv2.destroyAllWindows()
def get_frame(self):
global fps;
success, image = self.video.read()
if success:
t1 = time_synchronized()
fps = f"{int((1./(time_synchronized()-t1)))}"
ret, jpeg = cv2.imencode('.jpg', image)
return jpeg.tobytes()
else:
return "Video is Completed !!!"
def gen(camera):
fps = 0.0
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
@server.route('/video_feed')
def video_feed():
return Response(gen(VideoCamera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
# ---------------------------------------------------------------------------------------------------#
| StarcoderdataPython |
3205166 | # -*- coding: UTF-8 -*-
import M2Crypto
from Crypto.PublicKey import RSA
import base64
import sys
#私钥加密
def pri_encrypt(msg, file_name):
rsa_pri = M2Crypto.RSA.load_key(file_name)
output = ''
while msg:
input = msg[:117]
msg = msg[117:]
out = rsa_pri.private_encrypt(input, M2Crypto.RSA.pkcs1_padding) #解密
output = output + out
output64 = output.encode('base64')
print('密文:\n%s' % output64)
return output64
#公钥加密
def pub_encrypt(msg, file_name):
rsa_pub = M2Crypto.RSA.load_pub_key(file_name)
output = ''
while msg:
input = msg[:117]
msg = msg[117:]
out = rsa_pub.public_encrypt(input, M2Crypto.RSA.pkcs1_padding) #解密
output = output + out
output64 = output.encode('base64')
print('密文:\n%s' % output64)
return output64
#公钥解密
def pub_decrypt_with_pubkeyfile(msg, file_name):
rsa_pub = M2Crypto.RSA.load_pub_key(file_name)
pub_decrypt(msg, rsa_pub)
def pub_decrypt(msg, rsa_pub):
ctxt_pri = msg.decode("base64") # 先将str转成base64
maxlength = 128
output = ''
while ctxt_pri:
input = ctxt_pri[:128]
ctxt_pri = ctxt_pri[128:]
out = rsa_pub.public_decrypt(input, M2Crypto.RSA.pkcs1_padding) #解密
output = output + out
print('明文:%s'% output)
#私钥解密
def pri_decrypt_with_prikeyfile(msg, file_name):
rsa_pub = M2Crypto.RSA.load_key(file_name)
pri_decrypt(msg, rsa_pub)
def pri_decrypt(msg, rsa_pub):
ctxt_pri = msg.decode("base64") # 先将str转成base64
maxlength = 128
output = ''
while ctxt_pri:
input = ctxt_pri[:128]
ctxt_pri = ctxt_pri[128:]
out = rsa_pub.private_decrypt(input, M2Crypto.RSA.pkcs1_padding) #解密
output = output + out
print('明文:%s'% output)
################################################################################
if __name__ == "__main__":
prikey_file = '/home/kiya/Desktop/rsa_private_key.pem'
pubkey_file = '/home/kiya/Desktop/rsa_public_key.pem'
msg = 'hello'
pub_encrypt(msg,pubkey_file)
# primsg = pri_encrypt(msg, prikey_file)
# pub_decrypt_with_pubkeyfile(primsg, pubkey_file)
| StarcoderdataPython |
3328037 | <gh_stars>0
import json
from data_visualysis import DataModeler
def main():
filename = 'covid_turkey.json'
with open(filename, 'r') as f_obj:
contents = json.load(f_obj)
daily_results = []
tests_list = []
cases_list = []
deaths_list = []
healed_list = []
result_lists = [tests_list, cases_list, deaths_list, healed_list]
for daily_result in contents.values():
tests_list.append(daily_result['Tests'])
cases_list.append(daily_result['Cases'])
deaths_list.append(daily_result['Deaths'])
healed_list.append(daily_result['Healed'])
data_modeler = DataModeler(dataset=[1])
title_num = 0
for result_list in result_lists:
title_num += 1
if title_num == 1:
title = "Tests"
elif title_num == 2:
title = "Cases"
elif title_num == 3:
title = "Deaths"
else:
title = "Healed"
data_modeler.dataset = result_list
data_modeler.calculate.dataset = result_list
print("\nNow analyzing", title, "results:\n")
data_modeler.show_results(True)
data_modeler.line_graph(title=title)
new_dataset = []
for test, case in zip(tests_list, cases_list):
new_dataset.append(test / case)
data_modeler.dataset = new_dataset
data_modeler.line_graph(title="Test/Case Per Day")
if __name__ == '__main__':
main()
# write_covid_results() | StarcoderdataPython |
3329677 | # -*- coding: utf-8 -*-
__version__ = "2.11.2" # version bump; deviates from divio/master which is at 1.11.0
| StarcoderdataPython |
179193 | #
# Explicit model for potential drop across a lithium metal electrode
#
from .base_ohm import BaseModel
class LithiumMetalExplicit(BaseModel):
"""Explicit model for potential drop across a lithium metal electrode.
Parameters
----------
param : parameter class
The parameters to use for this submodel
options : dict, optional
A dictionary of options to be passed to the model.
**Extends:** :class:`pybamm.electrode.ohm.BaseModel`
"""
def __init__(self, param, options=None):
super().__init__(param, "Negative", options=options)
def get_coupled_variables(self, variables):
param = self.param
i_boundary_cc = variables["Current collector current density"]
T_n = variables["Negative current collector temperature"]
l_n = param.l_n
delta_phi_s = i_boundary_cc * l_n / param.sigma_n(T_n)
delta_phi_s_dim = param.potential_scale * delta_phi_s
variables.update(
{
"Negative electrode potential drop": delta_phi_s,
"Negative electrode potential drop [V]": delta_phi_s_dim,
"X-averaged negative electrode ohmic losses": delta_phi_s / 2,
"X-averaged negative electrode ohmic losses [V]": delta_phi_s_dim / 2,
}
)
return variables
def set_boundary_conditions(self, variables):
pass
| StarcoderdataPython |
3359381 | <gh_stars>100-1000
""" Implementação do algoritmo passeio do cavalo """
def aceitavel(x, y):
"""
Aceitavel se estiver dentro do tabuleiro e a casa ainda nao tiver sido
visitada
Retorna True ou False
"""
if (
x >= 0
and x <= num - 1
and y >= 0
and y <= num - 1
and tabuleiro[x][y] == 0
):
return True
else:
return False
def tenta_mover(i, x, y):
"""
Tenta o i-esimo movimento em (x,y), 1 <= i <= n^2
"""
done = i > numSqr # True ou False
k = 0
while not done and k < 8:
u = x + dx[k] # Coordenadas dos 8 movimentos possiveis do cavalo
v = y + dy[k] # Coordenadas dos 8 movimentos possiveis do cavalo
if aceitavel(u, v):
tabuleiro[u][v] = i
done = tenta_mover(i + 1, u, v) # Tenta outro movimento
if not done:
tabuleiro[u][v] = 0 # Sem sucesso, descarta movimento
k += 1 # Passa ao proximo movimento possivel
return done
def mostra_movimento(x, y):
tabuleiro[x][y] = 1
done = tenta_mover(2, x, y)
string = ""
if done:
for x in range(0, num):
for y in range(0, num):
if tabuleiro[x][y] < 10:
string += "0" + str(tabuleiro[x][y]) + " "
else:
string += str(tabuleiro[x][y]) + " "
string += "\n"
print(string)
else:
print("Nao ha passeio possivel\n")
dx = [2, 1, -1, -2, -2, -1, 1, 2]
dy = [1, 2, 2, 1, -1, -2, -2, -1]
print("Digite o num de posicoes do tabuleiro: (ex.: 6) <= 10")
num = int(input()) # Numero de posicoes do tabuleiro
print("Digite a posicao x onde o cavalo deve iniciar: (ex.: 1) >= 0")
x = int(input()) # Numero de posicoes do tabuleiro
print("Digite a posicao y onde o cavalo deve iniciar: (ex.: 2) >= 0")
y = int(input()) # Numero de posicoes do tabuleiro
numSqr = num * num # Numero total de casas
print()
tabuleiro = [[], [], [], [], [], [], [], [], [], []] # Tabuleiro maximo 20x20
for x in range(0, num):
for y in range(0, num):
tabuleiro[x].append(0)
# print tabuleiro
mostra_movimento(x, y)
| StarcoderdataPython |
3234942 | from django.dispatch import receiver
from django.urls import resolve, reverse
from django.utils.translation import ugettext_lazy as _
from pretix.presale.signals import sass_postamble
from pretix.control.signals import nav_event_settings
@receiver(nav_event_settings, dispatch_uid="custom_css_settings")
def custom_css_settings(sender, request, **kwargs):
url = resolve(request.path_info)
return [
{
"label": _("Custom CSS"),
"url": reverse(
"plugins:pretix_custom_css:settings",
kwargs={
"event": request.event.slug,
"organizer": request.organizer.slug,
},
),
"active": url.namespace == "plugins:pretix_custom_css"
and url.url_name == "settings",
}
]
@receiver(sass_postamble, dispatch_uid="custom_css_sass_postamble")
def custom_css_sass_postamble(sender, filename=None, **kwargs):
if sender.settings.custom_css_code:
return sender.settings.custom_css_code
else:
return ""
| StarcoderdataPython |
3381873 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-09 20:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('showings', '0005_auto_20170809_1859'),
]
operations = [
migrations.AddField(
model_name='show',
name='cooldown_period',
field=models.IntegerField(default=0, help_text=b'Will be filled when saving according to type of show. Input the value manually to set override.'),
),
migrations.AddField(
model_name='show',
name='show_type',
field=models.CharField(choices=[(b'ms', b'Main series'), (b'mc', b'Main series candidate'), (b'ex', b'Exec choice'), (b'an', b'Allnighter'), (b'ot', b'Other')], default='ot', max_length=2),
preserve_default=False,
),
]
| StarcoderdataPython |
4822179 | """Day 4 challenge"""
# Built-in
import re
# Personal
from _shared import read_input
# --------------------------------------------------------------------------------
# > Helpers
# --------------------------------------------------------------------------------
class PassportForm:
LINE_REGEX = r"([a-z]{3}):([^ ]+)"
FIELDS = [
("byr", True), # field_name, required
("iyr", True),
("eyr", True),
("hgt", True),
("hcl", True),
("ecl", True),
("pid", True),
("cid", False),
]
def __init__(self, line):
"""
Read the passport info to fill a PassportForm
:param str line: Passport data from the input file
"""
self.line = line
self.fill_form()
self.find_invalid_fields()
def fill_form(self):
"""Parses the input file to set our form fields/values"""
for match in re.finditer(self.LINE_REGEX, self.line):
field = match.group(1)
value = match.group(2)
setattr(self, field, value)
def find_invalid_fields(self):
"""
Checks for missing fields
:return: The required fields that are missing from our form
:rtype: set
"""
invalid_fields = set()
for field_name, required in self.FIELDS:
value = getattr(self, field_name, None)
# Check required
if required and value is None:
invalid_fields.add(field_name)
# Custom validation
if value is not None:
function_name = f"validate_{field_name}"
field_validation_function = getattr(self, function_name)
if not field_validation_function():
invalid_fields.add(field_name)
self.invalid_fields = invalid_fields
@property
def is_valid(self):
"""
:return: Whether the form is valid
:rtype: bool
"""
return len(self.invalid_fields) == 0
def validate_byr(self):
"""
:return: Whether BYR is within the range
:rtype: bool
"""
value = int(self.byr)
return 1920 <= value <= 2002
def validate_iyr(self):
"""
:return: Whether IYR is within the range
:rtype: bool
"""
value = int(self.iyr)
return 2010 <= value <= 2020
def validate_eyr(self):
"""
:return: Whether EYR is within the range
:rtype: bool
"""
value = int(self.eyr)
return 2020 <= value <= 2030
def validate_hgt(self):
"""
Checks the HGT is valid and within the right range, depending on the unit of measure
:return: Whether HGT is within the range
:rtype: bool
"""
regex = r"^(\d+)(cm|in)$"
match = re.match(regex, self.hgt)
if match is not None:
value = int(match.group(1))
units = match.group(2)
if units == "cm":
return 150 <= value <= 193
else:
return 59 <= value <= 76
return False
def validate_hcl(self):
"""
:return: Whether the HCL format is valid
:rtype: bool
"""
regex = r"^#[a-f0-9]{6}$"
return not re.match(regex, self.hcl) is None
def validate_ecl(self):
"""
:return: Whether the ECL value is in the list of accepted values
:rtype: bool
"""
return self.ecl in {
"amb",
"blu",
"brn",
"gry",
"grn",
"hzl",
"oth",
}
def validate_pid(self):
"""
:return: Whether PID is a chain of 9 digits
:rtype: bool
"""
regex = r"^\d{9}$"
return not re.match(regex, self.pid) is None
@staticmethod
def validate_cid():
"""
:return: No custom validation. Always valid
:rtype: bool
"""
return True
def get_passport_info_from_input():
"""
Fetches the input file and rebuilds passport info as a one-liner
:return: List string of passport info
:rtype: [str]
"""
passport_list = []
text = ""
for line in read_input("day_04.txt"):
if line != "":
text += f" {line}"
else:
passport_list.append(text[1:])
text = ""
passport_list.append(text[1:]) # Adding the last one
return passport_list
# --------------------------------------------------------------------------------
# > Main
# --------------------------------------------------------------------------------
passport_info = get_passport_info_from_input()
passport_forms = [PassportForm(line) for line in passport_info]
valid_forms = [form for form in passport_forms if form.is_valid]
print(len(valid_forms))
| StarcoderdataPython |
1657015 | from logger import log_info
from Classes.Metadata import Metadata
from Classes.PortablePacket import PortablePacket
from timeit import default_timer as timer
from extension import write, write_debug
from colorama import Fore
from zip_utils import *
import os
import sys
home = os.path.expanduser('~')
def install_portable(packet: PortablePacket, metadata: Metadata):
if find_existing_installation(f'{packet.extract_dir}@{packet.latest_version}'):
log_info(
f'Detected an existing installation of {packet.display_name}', metadata.logfile)
write(
f'Found Existing Installation Of {packet.display_name}', 'bright_yellow', metadata)
continue_installation = confirm(
f'Would you like to reinstall {packet.display_name}?')
if not continue_installation:
sys.exit()
if packet.dependencies:
log_info(
f'Installing dependencies for {packet.display_name}', metadata.logfile)
install_dependencies(packet, metadata)
changes_environment = False
shortcuts = packet.shortcuts
extract_dir = packet.extract_dir
write_debug(
f'Downloading {packet.json_name}{packet.file_type} from {packet.url}', metadata)
log_info(
f'Downloading {packet.json_name}{packet.file_type} from {packet.url}', metadata.logfile)
show_progress_bar = not metadata.silent and not metadata.no_progress
if isinstance(packet.url, str):
download(packet, packet.url, packet.file_type, rf'{home}\electric\\' + f'{packet.extract_dir}@{packet.latest_version}',
metadata, show_progress_bar=show_progress_bar, is_zip=True)
if packet.checksum:
verify_checksum(
rf'{home}\electric\\' + f'{packet.extract_dir}@{packet.latest_version}{packet.file_type}', packet.checksum, metadata)
unzip_dir = unzip_file(f'{packet.extract_dir}@{packet.latest_version}' +
packet.file_type, f'{extract_dir}@{packet.latest_version}', packet.file_type, metadata)
elif isinstance(packet.url, list):
for idx, url in enumerate(packet.url):
if idx == 0:
download(packet, url['url'], '.zip', rf'{home}\electric\\' + f'{packet.extract_dir}@{packet.latest_version}',
metadata, show_progress_bar=show_progress_bar, is_zip=True)
unzip_dir = unzip_file(
f'{packet.extract_dir}@{packet.latest_version}' + '.zip', extract_dir, url['file-type'], metadata)
else:
write(
f'Downloading {url["file-name"]}{url["file-type"]}', 'cyan', metadata)
download(packet, url['url'], url['file-type'],
rf'{home}\electric\extras\{packet.extract_dir}@{packet.latest_version}\\{url["file-name"]}', metadata, show_progress_bar=False, is_zip=False)
if packet.pre_install:
log_info('Executing pre install code', metadata.logfile)
if packet.pre_install['type'] == 'powershell':
packet.pre_install['code'] = [l.replace('<dir>', unzip_dir.replace(
'\\\\', '\\')) for l in packet.pre_install['code']]
packet.pre_install['code'] = [l.replace('<extras>', rf'{home}\electric\extras\{packet.extract_dir}@{packet.latest_version}'.replace(
'\\\\', '\\')) for l in packet.pre_install['code']]
if not os.path.isdir(rf'{home}\electric\temp\Scripts'):
try:
os.mkdir(rf'{home}\electric\temp')
except:
# temp directory already exists
pass
os.mkdir(rf'{home}\electric\temp\Scripts')
with open(rf'{home}\electric\temp\Scripts\temp.ps1', 'w+') as f:
for line in packet.pre_install['code']:
f.write(f'\n{line}')
os.system(
rf'powershell -executionpolicy bypass -File {home}\electric\temp\Scripts\temp.ps1')
write('Successfully Executed Pre-Install Code',
'bright_green', metadata)
if packet.pre_install['type'] in ['bat', 'cmd']:
packet.pre_install['code'] = [l.replace('<dir>', unzip_dir.replace(
'\\\\', '\\')) for l in packet.pre_install['code']]
packet.pre_install['code'] = [l.replace('<extras>', rf'{home}\electric\extras\{packet.extract_dir}@{packet.latest_version}'.replace(
'\\\\', '\\')) for l in packet.pre_install['code']]
if not os.path.isdir(rf'{home}\electric\temp\Scripts'):
try:
os.mkdir(rf'{home}\electric\temp')
except:
# temp directory already exists
pass
os.mkdir(rf'{home}\electric\temp\Scripts')
with open(rf'{home}\electric\temp\Scripts\temp.bat', 'w+') as f:
for line in packet.pre_install['code']:
f.write(f'\n{line}')
os.system(
rf'{home}\electric\temp\Scripts\temp.bat')
write('Successfully Executed Pre-Install Code',
'bright_green', metadata)
if packet.pre_install['type'] == 'python':
code = ''''''.join(l + '\n' for l in packet.pre_install['code'])
exec(code)
if packet.chdir:
dir = packet.chdir.replace('<version>', packet.latest_version)
unzip_dir += f'\\{dir}\\'
if packet.bin and isinstance(packet.bin, list):
for binary in packet.bin:
if isinstance(binary, str):
shim_dir = unzip_dir
shim = ''.join(binary.split('.')[:-1])
shim_ext = binary.split('.')[-1]
if '\\' in binary:
shim = ''.join(binary.split('\\')[-1])
shim = ''.join(shim.split('.')[:-1])
shim_ext = binary.split('.')[-1]
shim_dir += ' '.join(binary.split('\\')
[:-1]).replace(' ', '\\')
shim = shim.replace('<version>', packet.latest_version)
shim_dir = shim_dir.replace('<version>', packet.latest_version)
start = timer()
generate_shim(f'{shim_dir}', shim, shim_ext)
end = timer()
write(
f'{Fore.LIGHTCYAN_EX}Successfully Generated {shim} Shim In {round(end - start, 5)} seconds{Fore.RESET}', 'white', metadata)
else:
val = binary['file-name']
shim_dir = unzip_dir
shim = ''.join(val.split('.')[:-1])
shim_ext = val.split('.')[-1]
if '\\' in val:
shim = ''.join(val.split('\\')[-1])
shim = ''.join(shim.split('.')[:-1])
shim_ext = val.split('.')[-1]
shim_dir += ' '.join(val.split('\\')
[:-1]).replace(' ', '\\')
shim = shim.replace('<version>', packet.latest_version)
shim_dir = shim_dir.replace('<version>', packet.latest_version)
val = val.replace('<version>', packet.latest_version)
start = timer()
generate_shim(f'{shim_dir}', val.split(
'\\')[-1].split('.')[0], shim_ext, overridefilename=binary['shim-name'])
end = timer()
write(
f'{Fore.LIGHTCYAN_EX}Successfully Generated {binary["shim-name"]} Shim In {round(end - start, 5)} seconds{Fore.RESET}', 'white', metadata)
if shortcuts:
for shortcut in shortcuts:
shortcut_name = shortcut['shortcut-name']
file_name = shortcut['file-name']
log_info(
f'Creating shortcuts for {packet.display_name}', metadata.logfile)
create_start_menu_shortcut(unzip_dir, file_name, shortcut_name)
if packet.set_env:
if isinstance(packet.set_env, list):
changes_environment = True
for obj in packet.set_env:
log_info(
f'Setting environment variables for {packet.display_name}', metadata.logfile)
write(
f'Setting Environment Variable {obj["name"]}', 'bright_green', metadata)
set_environment_variable(obj['name'], obj['value'].replace(
'<install-directory>', unzip_dir).replace('\\\\', '\\'))
else:
changes_environment = True
log_info(
f'Setting environment variables for {packet.display_name}', metadata.logfile)
write(
f'Setting Environment Variable {packet.set_env["name"]}', 'bright_green', metadata)
set_environment_variable(packet.set_env['name'], packet.set_env['value'].replace(
'<install-directory>', unzip_dir).replace('\\\\', '\\'))
if changes_environment:
log_info(
'Detected change in PATH variable. Requesting `refreshenv` to be run', metadata.logfile)
write(
f'{Fore.LIGHTGREEN_EX}The PATH environment variable has changed. Run `refreshenv` to refresh your environment variables.{Fore.RESET}', 'white', metadata)
if packet.post_install:
log_info('Executing post installation code', metadata.logfile)
for line in packet.post_install:
exec(line.replace('<install-directory>', unzip_dir).replace('<extras>',
rf'{home}\electric\extras\{packet.extract_dir}@{packet.latest_version}'))
if packet.install_notes:
log_info('Found Installation Notes, Writing To Console.',
metadata.logfile)
display_notes(packet, unzip_dir, metadata)
write(
f'Successfully Installed {packet.display_name}', 'bright_magenta', metadata)
| StarcoderdataPython |
3386972 | <reponame>TroyWilliams3687/fuel_tracker
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# -----------
# SPDX-License-Identifier: MIT
# Copyright (c) 2021 <NAME>
# uuid: 81ef08b8-0503-11ec-b7e5-a9913e95621d
# author: <NAME>
# email: <EMAIL>
# date: 2021-08-24
# -----------
"""
Perform bulk operations on the database.
"""
# ------------
# System Modules - Included with Python
from pathlib import Path
from datetime import date
# ------------
# 3rd Party - From PyPI
import click
import pandas as pd
from pandas import ExcelWriter
from sqlalchemy import delete
# ------------
# Custom Modules
from .models import (
Vehicle,
FuelRecord,
select_vehicle_by_id,
select_vehicle_by_name,
)
from .common import is_int
# -------------
@click.group("bulk")
@click.pass_context
def bulk(*args, **kwargs):
"""
Perform bulk operations on the database such as adding new vehicles
and records or deleting existing vehicles and records.
# Usage
$ ft bulk add ./data/vw-passat-2015.ods
"""
pass
@bulk.command("add")
@click.pass_context
@click.argument(
"spreadsheet",
nargs=-1, # accept an unlimited number of arguments. This makes it an iterable
type=click.Path(
exists=True,
dir_okay=False,
readable=True,
path_type=Path,
),
)
def add(*args, **kwargs):
"""
Add a new vehicle and fuel records from a spreadsheet to the
database. You must specify the path to the spreadsheet. This
methods assumes that the vehicle does not exist in the database and
none of the fuel records exist either.
Support for `.ods` Open Office Format and `.xlsx` Excel format. The
spreadsheet should have the following columns:
- name
- make
- model
- year
- tank_capacity
- initial_odometer
- fill_date
- mileage
- fuel
- cost
- partial
- comment
NOTE: The order doesn't matter.
# Usage
\b
$ ft bulk add ./data/vw-passat-2015.ods
$ ft bulk add ./data/vw-passat-2015.ods ./data/dodge-intrepid-1997.ods
$ ft bulk add ./data/*.ods
"""
ctx = args[0]
config = ctx.obj["config"]
for spreadsheet in kwargs["spreadsheet"]:
click.echo(f"Processing {spreadsheet}...")
df = pd.read_excel(spreadsheet, parse_dates=["fill_date"])
# Fill NaN with 0, before casting to bool. Otherwise we end up
# with a lot of True values.
df["partial"] = df["partial"].fillna(0)
df = df.astype({"partial": bool})
vehicles = {}
vehicle_columns = [
"name",
"make",
"model",
"year",
"tank_capacity",
"initial_odometer",
]
for vehicle_values, group in df.groupby(vehicle_columns):
new_vehicle = Vehicle(
**{k: v for k, v in zip(vehicle_columns, vehicle_values)}
)
# remove the vehicle columns from the dataframe
fr = group.drop(vehicle_columns, axis=1)
new_vehicle.fuel_records = [
FuelRecord(**fuel_record) for fuel_record in fr.to_dict("records")
]
with config["db"].begin() as session:
session.add(new_vehicle)
session.flush() # get the new id
click.echo(new_vehicle)
# create a vehicle format function that can handle the
# units (liters and kilometers)
click.echo(f"Fuel Records: {len(new_vehicle.fuel_records)}")
click.echo()
@bulk.command("delete")
@click.pass_context
@click.argument(
"vehicles",
nargs=-1,
type=str,
)
def delete(*args, **kwargs):
"""
Delete the vehicle (by name or id) from the database along with all
of its fuel records.
# Usage
\b
$ ft bulk delete passat 2
"""
ctx = args[0]
config = ctx.obj["config"]
with config["db"].begin() as session:
for vid in kwargs["vehicles"]:
click.echo(f"Deleting {vid}...")
if is_int(vid):
statement = select_vehicle_by_id(vid)
else:
statement = select_vehicle_by_name(vid)
selected_vehicle = session.execute(statement).first()
# NOTE: session.execute returns an iterable. Deal with it
# appropriately
if len(selected_vehicle) == 1:
delete_query = delete(Vehicle).where(Vehicle.vehicle_id == vid)
session.execute(delete_query)
elif len(selected_vehicle) == 0:
click.secho(f"No matches for: {vid}", fg="cyan")
else:
click.secho(
f"More than one vehicle returned ({len(selected_vehicle)})! Doing Nothing!",
fg="red",
)
click.secho("Here are the returned Vehicles:", fg="red")
for v in selected_vehicle:
click.secho(v, fg="red")
click.echo()
@bulk.command("export")
@click.pass_context
@click.argument(
"vehicles",
nargs=-1,
type=str,
)
@click.option(
"--excel",
type=click.Path(
exists=False,
dir_okay=False,
readable=False,
path_type=Path,
),
help="Write the vehicle(s) and fuel records to an excel spreadsheet.",
)
@click.option(
"--ods",
type=click.Path(
exists=False,
dir_okay=False,
readable=False,
path_type=Path,
),
help="Write the vehicle(s) and fuel records to an Open Office spreadsheet (*.ods).",
)
@click.option(
"--csv",
type=click.Path(
exists=False,
dir_okay=False,
readable=False,
path_type=Path,
),
help="Write the vehicle(s) and fuel records to a csv file.",
)
def export(*args, **kwargs):
"""
bulk export the specified vehicles by name or id separated by spaces
on the command line to:
\b
- csv
- excel
- ods - open office format
- stdout
The vehicle and fuel records will be combined into one table and
exported to the file. If an output format isn't selected, it will
be displayed in the terminal.
# Usage
$ ft bulk export passat 2
$ ft bulk export passat interpid --excel=file.xlsx
$ ft bulk export passat interpid --ods=file.ods
$ ft bulk export passat interpid --csv=file.csv
$ ft bulk export passat interpid --excel=file.xlsx --ods=file.ods --csv=file.csv
$ ft bulk export passat intrepid 2 --ods=./output/file.ods
$ ft bulk export passat intrepid soul matrix --ods=./output/data.ods --csv=./output/data.csv
"""
ctx = args[0]
config = ctx.obj["config"]
output = []
with config["db"].begin() as session:
for vid in kwargs["vehicles"]:
click.echo(f"Exporting {vid}...")
click.echo()
if is_int(vid):
statement = select_vehicle_by_id(vid, join=True)
else:
statement = select_vehicle_by_name(vid, join=True)
page_name = f"{vid}"
df = pd.read_sql(statement, session.connection())
df = df.drop(["vehicle_id", "vehicle_id_1"], axis=1)
if kwargs.get("csv", False):
csv_file = kwargs.get("csv")
df.to_csv(csv_file.parent / Path(f"{csv_file.stem}_{page_name}.csv"))
output.append((page_name, df))
if kwargs.get("excel", False):
with ExcelWriter(kwargs.get("excel")) as writer:
for page_name, df in output:
df.to_excel(writer, page_name, index=False)
writer.save()
if kwargs.get("ods", False):
with ExcelWriter(
kwargs.get("ods"),
engine="odf",
) as writer:
for page_name, df in output:
# for some reason, export the fill_date as a datetime to
# ods assigns the cell formatting as a number. In the
# spreadsheet we can change the format to date and the
# value is displayed correctly, but the user shouldn't
# have to do that. We'll convert it to a string and leave it at that.
df["fill_date"] = pd.to_datetime(df["fill_date"]).dt.strftime(
"%Y-%m-%d"
)
df.to_excel(writer, page_name, index=False)
writer.save()
if not kwargs.get("excel", False) and kwargs.get("ods", False):
for page_name, df in output:
click.echo(
df.to_markdown(
index=False,
tablefmt="pretty",
)
)
click.echo()
click.secho("Completed!", fg="cyan")
| StarcoderdataPython |
1657875 | <filename>src/MidiConnector.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# JMMidiBassPedalController v3.0
# File: src/MidiConnector.py
# By: <NAME> <<EMAIL>> @ 28.10.2020
# This project is licensed under the MIT License. Please see the LICENSE.md file
# on the main folder of this code. An online version can be found here:
# https://github.com/jmeile/JMMidiBassPedalController/blob/master/LICENSE.md
#
"""
Connects to the MIDI ports
"""
from __future__ import print_function
import traceback
import sys
import fnmatch
from rtmidi import MidiIn, MidiOut
from rtmidi.midiutil import open_midiport
from MidiProcessor import MidiProcessor
from CustomLogger import CustomLogger, PrettyFormat
import logging
from autologging import logged
import xmlschema
import platform
VIRTUAL_PREFFIX = "Virtual:"
#Creates a logger for this module.
logger = logging.getLogger(CustomLogger.get_module_name())
#Setups the logger with default settings
logger.setup()
#Register the logger with this class
@logged(logger)
class MidiConnector:
"""
Opens the MIDI ports and process the incomming connections
"""
def __init__(self, args, xsd_schema = 'conf/MidiBassPedalController.xsd'):
"""
Initializes the MidiConnector class
Parameters:
* args: command-line arguments
* xsd_schema: path to the xsd schema
"""
self.__log.debug("Initializing MidiConnector")
self._args = args
self._xsd_schema = xsd_schema
self._midi_in = None
self._midi_out = None
self._in_ports = []
self._in_port = 0
self._use_virtual_in = False
self._out_ports = []
self._out_port = 0
self._use_virtual_out = False
self._xml_dict = {}
self.__log.debug("MidiConnector was initialized:\n%s",
PrettyFormat(self.__dict__))
def start(self):
"""
Starts the processing requests
Remarks:
* It will either list the available MIDI ports, run in interative or
silent mode, according to the passed command line options
Returns:
* A status string; either: "Quit", "Reload", "Reboot", or "Shutdown"
"""
self.__log.info("Starting MidiConnector")
status = None
self._get_all_ports()
exit = False
if len(self._in_ports) == 0:
self.__log.info("No MIDI IN ports were found. Please connect your MIDI "
"device and run the script again")
exit = True
if len(self._out_ports) == 0:
self.__log.info("No MIDI OUT ports were found. Please connect your MIDI "
"device and run the script again")
exit = True
if not exit:
if self._args.list:
self.__log.debug("--list switch was passed")
self._list_ports()
else:
self._parse_xml_config()
self._parse_ports()
self._open_ports()
midi_processor = MidiProcessor(
self._xml_dict,
self._midi_in,
self._midi_out,
ignore_sysex = False,
ignore_timing = False,
ignore_active_sense = False,
)
midi_processor.parse_xml()
status = midi_processor.read_midi()
self.__log.info("Exiting")
self._close_ports()
self._free_midi()
self.__log.debug("MidiConnector has been ended")
return status
def _parse_xml_config(self):
"""
Parses the specified xml configuration file
"""
self.__log.info("Parsing XML config: %s", self._xsd_schema)
exit = False
self.__log.debug("Calling XMLSchema11 api")
try:
xsd_schema = xmlschema.XMLSchema11(self._xsd_schema)
except:
exit = True
error = traceback.format_exc()
self.__log.info("Error while parsing xsd file:\n%s\n\n%s",
self._xsd_schema, error)
if not exit:
self.__log.debug("Converting XML schema to dict")
try:
xml_dict = xsd_schema.to_dict(self._args.config)
#A last manual validation must be done here: the InitialBank value must
#be less or equal than the total number of banks
if xml_dict['@InitialBank'] > len(xml_dict['Bank']):
raise Exception("InitialBank is higher than the possible number of "
"banks / maximum: " + str(len(xml_dict['Bank'])) + \
", given value: " + str(xml_dict['@InitialBank']))
self.__log.debug("Got: \n%s", PrettyFormat(xml_dict))
except:
exit = True
error = traceback.format_exc()
message = "Error while parsing xml file:\n%s\n\n%s" % (
self._args.config, error
)
self.__log.info(message)
if exit:
self.__log.debug("Unexpected error occured, aborting...")
self._free_midi()
sys.exit()
self._xml_dict = xml_dict
def _open_port(self, interface_type, midi_port, is_virtual = False):
"""
Opens the specified MIDI port for the entered midi_callback
Parameters:
* interface_type: which interface to open: 'input' or 'output'
* midi_port: MIDI port used to open the MIDI interface
* is_virtual: whether or not the port is virtual
Returns:
* In case of opening a virtual port, it will return a MIDI interface
"""
if not is_virtual:
self.__log.debug("Opening MIDI port: %s", PrettyFormat(midi_port))
port_name = None
client_name = None
else:
self.__log.debug("Opening Virtual MIDI port")
port_name = midi_port
midi_port = None
client_name = VIRTUAL_PREFFIX[:-1]
try:
midi_interface = open_midiport(port = midi_port, type_ = interface_type,
use_virtual = is_virtual,
interactive = False,
client_name = client_name,
port_name = port_name)[0]
except:
error = traceback.format_exc()
self.__log.info(error)
self._free_midi()
sys.exit()
return midi_interface
def _open_ports(self):
"""
Opens the entered MIDI ports
"""
self._midi_in = self._open_port("input", self._in_port,
self._use_virtual_in)
if self._use_virtual_in:
port_name = self._in_port
else:
port_name = self._in_ports[self._in_port]
self.__log.info("MIDI IN Port: '%s' was opened", port_name)
self._midi_out = self._open_port("output", self._out_port,
self._use_virtual_out)
if self._use_virtual_out:
port_name = self._out_port
else:
port_name = self._out_ports[self._out_port]
self.__log.info("MIDI OUT Port: '%s' was opened", port_name)
def _close_port(self, midi_interface):
"""
Closes the specified MIDI interface
Parameters:
* midi_interface: MIDI interface that will be closed
"""
self.__log.debug("Closing MIDI port")
try:
midi_interface.close_port()
except:
error = traceback.format_exc()
self.__log.info(error)
def _close_ports(self):
"""
Closes all opened MIDI ports
"""
self._close_port(self._midi_in)
if self._use_virtual_in:
port_name = self._in_port
else:
port_name = self._in_ports[self._in_port]
self.__log.info("MIDI IN Port: '%s' was closed", port_name)
self._close_port(self._midi_out)
if self._use_virtual_out:
port_name = self._out_port
else:
port_name = self._out_ports[self._out_port]
self.__log.info("MIDI OUT Port: '%s' was closed", port_name)
def _parse_port(self, port_list, arg_name):
"""
Gets the specified port from command line
Parameters:
* port_list: List of available MIDI ports
* arg_name: name of the argument to get. It can be: InPort or OutPort
Returns:
* A tupple containing:
- either a port index or a virtual port string name
- either if using a virtual or a real port
"""
self.__log.debug("Getting: %s from:\n%s", arg_name, PrettyFormat(port_list))
use_virtual = False
num_ports = len(port_list)
port_value = self._xml_dict.get('@'+arg_name, num_ports)
self.__log.debug("Port value: %s", port_value)
if (type(port_value) == str) and port_value.isdigit():
port_value = int(port_value)
elif type(port_value) == str:
is_windows = (platform.system() == "Windows")
if port_value.startswith(VIRTUAL_PREFFIX):
if not is_windows:
#Virtual port only work unser MACOS and Linux. Windows doesn't
#supports this. On the last operating system, the Virtual part will be
#removed and it will be threatened as a normal port. You can assure
#compatibilty between Windows and other OS by creating first the ports
#with loopMIDI
use_virtual = True
elif port_value[-1] != '*':
port_value += '*'
port_value = port_value[len(VIRTUAL_PREFFIX):]
if not use_virtual:
self.__log.debug("Searching port")
#On this case, a string with part of the name was given, so, it
#will be searched in the available ports
port_index = 0
port_found = False
for port_name in port_list:
filtered = fnmatch.filter([port_name], port_value)
if filtered != []:
port_found = True
break
port_index += 1
if not port_found:
self.__log.info("The %s: %s wasn't found.", arg_name, port_value)
self._free_midi()
self.__log.debug("Port wasn't found, exiting")
sys.exit()
port_value = port_index + 1
self.__log.debug("Port was found, index: %d", port_value)
else:
self.__log.debug("Virutal Port will be used")
if not use_virtual:
#Internally, port numbers start from 0 because they are in an array
port_value -= 1
if port_value >= num_ports:
self.__log.info("Invalid port number was supplied")
self._free_midi()
self.__log.debug("Exiting after getting invalid port")
sys.exit()
return port_value, use_virtual
def _parse_ports(self):
"""
Gets the passed ports to the command line
"""
self.__log.debug("Parsing ports")
self._in_port, self._use_virtual_in = self._parse_port(self._in_ports,
'InPort')
self._out_port, self._use_virtual_out = self._parse_port(self._out_ports,
'OutPort')
self.__log.debug("Ports were parsed")
def _open_midi(self):
"""Starts MIDI without opening a port"""
self.__log.info("Opening MIDI interfaces")
try:
self._midi_out = MidiOut()
self._midi_in = MidiIn()
#Note: if you need to catch SysEx, MIDI clock, and active sense
#messages, then use the method: ignore_types as follows:
#self._midi_in.ignore_types(sysex = False, timing = False,
# active_sense = False)
#They are ignored by default. I don't need this right now, so the
#standard behaviour is OK for me
except:
error = traceback.format_exc()
self.__log.info(error)
self._free_midi()
return False
self.__log.debug("MIDI interfaces were opened")
return True
def _free_midi(self):
"""Frees MIDI resources"""
self.__log.debug("Releasing MIDI")
if hasattr(self, '_midi_in'):
del self._midi_in
if hasattr(self, '_midi_out'):
del self._midi_out
self.__log.info("MIDI was released")
def _get_midi_ports(self, midi_interface):
"""
Gets the available ports for the specified MIDI interface
Parameters:
* midi_interface: interface used for listing the ports. It can be
either _midi_in or _midi_out.
"""
self.__log.debug("Getting available MIDI ports")
ports = midi_interface.get_ports()
self.__log.debug("Got:\n%s", PrettyFormat(ports))
port_index = 0
for port in ports:
port_index_str = str(port_index)
ports[port_index] = port
port_index += 1
self.__log.debug("Fixed port indexes:\n%s", PrettyFormat(ports))
return ports
def _get_all_ports(self):
"""
Gets all the available MIDI IN and Out ports.
"""
in_ports = []
out_ports = []
if self._open_midi():
self.__log.debug("Getting all MIDI IN ports")
in_ports = self._get_midi_ports(self._midi_in)
self.__log.debug("Got:\n%s", PrettyFormat(in_ports))
self.__log.debug("Getting all MIDI OUT ports")
out_ports = self._get_midi_ports(self._midi_out)
self.__log.debug("Got:\n%s", PrettyFormat(out_ports))
self._in_ports = in_ports
self._out_ports = out_ports
self._free_midi()
def _get_formatted_port_list(self, port_list):
"""
Gets the port list as follows:
<port_index>: <port_name>
"""
self.__log.debug("Getting formatted port list")
port_list_tuples = []
for port_index, port_name in enumerate(port_list):
port_list_tuples.append(str(port_index + 1) + ": " + port_name)
self.__log.debug("Got: %s", PrettyFormat(port_list_tuples))
return '\n\r'.join(port_list_tuples)
def _list_ports(self):
"""
Lists all the available MIDI IN and Out ports.
"""
self.__log.info("\nAvailable MIDI IN ports:")
self.__log.info(self._get_formatted_port_list(self._in_ports))
self.__log.info("\nAvailable MIDI OUT ports:")
self.__log.info(self._get_formatted_port_list(self._out_ports))
| StarcoderdataPython |
1683768 | import os
import sys
import re
import matplotlib as mpl
from jupyter_core.paths import jupyter_config_dir
# path to install (~/.jupyter/custom/)
jupyter_custom = os.path.join(jupyter_config_dir(), 'custom')
# path to local site-packages/jupyterthemes
package_dir = os.path.dirname(os.path.realpath(__file__))
# theme colors, layout, and font directories
styles_dir = os.path.join(package_dir, 'styles')
# text file containing name of currently installed theme
theme_name_file = os.path.join(jupyter_custom, 'current_theme.txt')
# base style params
base_style = {
'axes.axisbelow': True,
'figure.autolayout': True,
'grid.linestyle': u'-',
'lines.solid_capstyle': u'round',
'legend.frameon': False,
"legend.numpoints": 1,
"legend.scatterpoints": 1,
'font.family': u'sans-serif',
'font.sans-serif': [u'Helvetica',
u'Arial',
u'Bitstream Vera Sans',
u'sans-serif']}
# base context params
base_context = {
'axes.linewidth': 1.4,
"grid.linewidth": 1.4,
"lines.linewidth": 1.5,
"patch.linewidth": .2,
"lines.markersize": 7,
"lines.markeredgewidth": 0,
"xtick.major.width": 1,
"ytick.major.width": 1,
"xtick.minor.width": .5,
"ytick.minor.width": .5,
"xtick.major.pad": 7,
"ytick.major.pad": 7,
"xtick.major.size": 0,
"ytick.major.size": 0,
"xtick.minor.size": 0,
"ytick.minor.size": 0}
# base font params
base_font = {
"font.size": 11,
"axes.labelsize": 12,
"axes.titlesize": 12,
"xtick.labelsize": 10.5,
"ytick.labelsize": 10.5,
"legend.fontsize": 10.5}
def remove_non_colors(clist):
checkHex = r'^#(?:[0-9a-fA-F]{3}){1,2}$'
return [clr for clr in clist if re.search(checkHex, clr)]
def infer_theme():
""" checks jupyter_config_dir() for text file containing theme name
(updated whenever user installs a new theme)
"""
if os.path.exists(theme_name_file):
with open(theme_name_file) as f:
theme = f.readlines()[0]
else:
theme = 'default'
return theme
def style(theme=None, context='paper', grid=True, gridlines=u'-', ticks=False, spines=True, fscale=1.2, figsize=(8., 7.)):
"""
main function for styling matplotlib according to theme
::Arguments::
theme (str): 'oceans16', 'grade3', 'chesterish', 'onedork', 'monokai', 'solarizedl', 'solarizedd'. If no theme name supplied the currently installed notebook theme will be used.
context (str): 'paper' (Default), 'notebook', 'talk', or 'poster'
grid (bool): removes axis grid lines if False
gridlines (str): set grid linestyle (e.g., '--' for dashed grid)
ticks (bool): make major x and y ticks visible
spines (bool): removes x (bottom) and y (left) axis spines if False
fscale (float): scale font size for axes labels, legend, etc.
figsize (tuple): default figure size of matplotlib figures
"""
# set context and font rc parameters, return rcdict
rcdict = set_context(context=context, fscale=fscale, figsize=figsize)
# read in theme name from ~/.jupyter/custom/current_theme.txt
if theme is None:
theme = infer_theme()
# combine context & font rcparams with theme style
set_style(rcdict, theme=theme, grid=grid, gridlines=gridlines, ticks=ticks, spines=spines)
def set_style(rcdict, theme=None, grid=True, gridlines=u'-', ticks=False, spines=True):
"""
This code has been modified from seaborn.rcmod.set_style()
::Arguments::
rcdict (str): dict of "context" properties (filled by set_context())
theme (str): name of theme to use when setting color properties
grid (bool): turns off axis grid if False (default: True)
ticks (bool): removes x,y axis ticks if True (default: False)
spines (bool): removes axis spines if False (default: True)
"""
# extract style and color info for theme
styleMap, clist = get_theme_style(theme)
# extract style variables
figureFace = styleMap['figureFace']
axisFace = styleMap['axisFace']
textColor = styleMap['textColor']
edgeColor = styleMap['edgeColor']
gridColor = styleMap['gridColor']
if not spines:
edgeColor = 'none'
style_dict = {
'figure.edgecolor': figureFace,
'figure.facecolor': figureFace,
'axes.facecolor': axisFace,
'axes.edgecolor': edgeColor,
'axes.labelcolor': textColor,
'axes.grid': grid,
'grid.linestyle': gridlines,
'grid.color': gridColor,
'text.color': textColor,
'xtick.color': textColor,
'ytick.color': textColor,
'patch.edgecolor': axisFace,
'patch.facecolor': gridColor,
'savefig.facecolor': figureFace,
'savefig.edgecolor': figureFace}
# update rcdict with style params
rcdict.update(style_dict)
# Show or hide the axes ticks
if ticks:
rcdict.update({
"xtick.major.size": 6,
"ytick.major.size": 6,
"xtick.minor.size": 3,
"ytick.minor.size": 3})
base_style.update(rcdict)
# update matplotlib with rcdict (incl. context, font, & style)
mpl.rcParams.update(rcdict)
try:
from cycler import cycler
# set color cycle to jt-style color list
mpl.rcParams['axes.prop_cycle'] = cycler(color=clist)
except Exception:
pass
# replace default blue, green, etc. with jt colors
for code, color in zip("bgrmyck", clist[:7]):
rgb = mpl.colors.colorConverter.to_rgb(color)
mpl.colors.colorConverter.colors[code] = rgb
mpl.colors.colorConverter.cache[code] = rgb
def set_context(context='paper', fscale=1., figsize=(8., 7.)):
"""
Most of this code has been copied/modified from seaborn.rcmod.plotting_context()
::Arguments::
context (str): 'paper', 'notebook', 'talk', or 'poster'
fscale (float): font-size scalar applied to axes ticks, legend, labels, etc.
"""
# scale all the parameters by the same factor depending on the context
scaling = dict(paper=.8, notebook=1, talk=1.3, poster=1.6)[context]
context_dict = {k: v * scaling for k, v in base_context.items()}
# scale default figsize
figX, figY = figsize
context_dict["figure.figsize"] = (figX*scaling, figY*scaling)
# independently scale the fonts
font_dict = {k: v * fscale for k, v in base_font.items()}
context_dict.update(font_dict)
return context_dict
def figsize(x=8, y=7., aspect=1.):
""" manually set the default figure size of plots
::Arguments::
x (float): x-axis size
y (float): y-axis size
aspect (float): aspect ratio scalar
"""
# update rcparams with adjusted figsize params
mpl.rcParams.update({'figure.figsize': (x*aspect, y)})
def get_theme_style(theme):
"""
read-in theme style info and populate styleMap (dict of with mpl.rcParams)
and clist (list of hex codes passed to color cylcler)
::Arguments::
theme (str): theme name
::Returns::
styleMap (dict): dict containing theme-specific colors for figure properties
clist (list): list of colors to replace mpl's default color_cycle
"""
styleMap, clist = get_default_jtstyle()
if theme == 'default':
return styleMap, clist
syntaxVars = ['@yellow:', '@orange:', '@red:', '@magenta:', '@violet:', '@blue:', '@cyan:', '@green:']
get_hex_code = lambda line: line.split(':')[-1].split(';')[0][-7:]
themeFile = os.path.join(styles_dir, theme+'.less')
with open(themeFile) as f:
for line in f:
for k, v in styleMap.items():
if k in line.strip():
styleMap[k] = get_hex_code(line)
for c in syntaxVars:
if c in line.strip():
syntaxVars[syntaxVars.index(c)] = get_hex_code(line)
# remove duplicate hexcolors
syntaxVars = list(set(syntaxVars))
clist.extend(syntaxVars)
clist = remove_non_colors(clist)
return styleMap, clist
def get_default_jtstyle():
styleMap = {'axisFace': 'white',
'figureFace': 'white',
'textColor': '.15',
'edgeColor': '.8',
'gridColor': '.8'}
return styleMap, get_color_list()
def get_color_list():
return ['#3572C6', '#83a83b', '#c44e52', '#8172b2', "#ff914d",
"#77BEDB", "#222222", "#4168B7", "#27ae60", "#e74c3c",'#cc89e0',
"#ff711a", "#3498db", '#6C7A89']
def reset():
""" full reset of matplotlib default style and colors
"""
colors = [(0., 0., 1.), (0., .5, 0.), (1., 0., 0.), (.75, .75, 0.),
(.75, .75, 0.), (0., .75, .75), (0., 0., 0.)]
for code, color in zip("bgrmyck", colors):
rgb = mpl.colors.colorConverter.to_rgb(color)
mpl.colors.colorConverter.colors[code] = rgb
mpl.colors.colorConverter.cache[code] = rgb
mpl.rcParams.update(mpl.rcParamsDefault)
mpl.rcParams['figure.facecolor'] = 'white'
mpl.rcParams['axes.facecolor'] = 'white'
| StarcoderdataPython |
3338348 | <reponame>abal2051/Amwal
from amwal.cache import JsonCache, cached
from amwal.extract import RawExtractor
from amwal.log import logger
class Engine:
def __init__(self, downloader):
self.downloader = downloader
@cached([JsonCache()])
def daily_bulletin(self, date, ):
# should validate date here with dateutil.parsing
date = date.replace("_", "/")
res = self.downloader.daily_bulletin(date)
res = RawExtractor.daily_bulletin(res)
return res
@cached([JsonCache()])
def listing(self, ):
res = self.downloader.listing()
res = RawExtractor.listing(res)
return res
@cached([JsonCache()])
def income_statement(self, stock_number, ):
res = self.downloader.income_statement(stock_number)
res = RawExtractor.income_statement(res)
return res
@cached([JsonCache()])
def price_history(self, stock_symbol, ):
#changing the logger too late since cache runs first
res = self.downloader.price_history(stock_symbol)
res = RawExtractor.price_history(res)
return res
| StarcoderdataPython |
137273 | import os
import tempfile
import mock
import numpy as np
from yt.testing import assert_equal, fake_random_ds
from yt.units.unit_object import Unit
def setup():
from yt.config import ytcfg
ytcfg["yt", "__withintesting"] = "True"
def teardown_func(fns):
for fn in fns:
try:
os.remove(fn)
except OSError:
pass
@mock.patch("yt.visualization._mpl_imports.FigureCanvasAgg.print_figure")
def test_slice(pf):
fns = []
grid_eps = np.finfo(np.float64).eps
for nprocs in [8, 1]:
# We want to test both 1 proc and 8 procs, to make sure that
# parallelism isn't broken
ds = fake_random_ds(64, nprocs=nprocs)
dims = ds.domain_dimensions
xn, yn, zn = ds.domain_dimensions
dx = ds.arr(1.0 / (ds.domain_dimensions * 2), "code_length")
xi, yi, zi = ds.domain_left_edge + dx
xf, yf, zf = ds.domain_right_edge - dx
coords = np.mgrid[xi : xf : xn * 1j, yi : yf : yn * 1j, zi : zf : zn * 1j]
uc = [np.unique(c) for c in coords]
slc_pos = 0.5
# Some simple slice tests with single grids
for ax in range(3):
xax = ds.coordinates.x_axis[ax]
yax = ds.coordinates.y_axis[ax]
slc = ds.slice(ax, slc_pos)
shifted_slc = ds.slice(ax, slc_pos + grid_eps)
assert_equal(slc["ones"].sum(), slc["ones"].size)
assert_equal(slc["ones"].min(), 1.0)
assert_equal(slc["ones"].max(), 1.0)
assert_equal(np.unique(slc["px"]), uc[xax])
assert_equal(np.unique(slc["py"]), uc[yax])
assert_equal(np.unique(slc["pdx"]), 0.5 / dims[xax])
assert_equal(np.unique(slc["pdy"]), 0.5 / dims[yax])
pw = slc.to_pw(fields="density")
for p in pw.plots.values():
tmpfd, tmpname = tempfile.mkstemp(suffix=".png")
os.close(tmpfd)
p.save(name=tmpname)
fns.append(tmpname)
for width in [(1.0, "unitary"), 1.0, ds.quan(0.5, "code_length")]:
frb = slc.to_frb(width, 64)
shifted_frb = shifted_slc.to_frb(width, 64)
for slc_field in ["ones", "density"]:
fi = ds._get_field_info(slc_field)
assert_equal(frb[slc_field].info["data_source"], slc.__str__())
assert_equal(frb[slc_field].info["axis"], ax)
assert_equal(frb[slc_field].info["field"], slc_field)
assert_equal(frb[slc_field].units, Unit(fi.units))
assert_equal(frb[slc_field].info["xlim"], frb.bounds[:2])
assert_equal(frb[slc_field].info["ylim"], frb.bounds[2:])
assert_equal(frb[slc_field].info["center"], slc.center)
assert_equal(frb[slc_field].info["coord"], slc_pos)
assert_equal(frb[slc_field], shifted_frb[slc_field])
teardown_func(fns)
def test_slice_over_edges():
ds = fake_random_ds(64, nprocs=8, fields=["density"], negative=[False])
slc = ds.slice(0, 0.0)
slc["density"]
slc = ds.slice(1, 0.5)
slc["density"]
def test_slice_over_outer_boundary():
ds = fake_random_ds(64, nprocs=8, fields=["density"], negative=[False])
slc = ds.slice(2, 1.0)
slc["density"]
assert_equal(slc["density"].size, 0)
| StarcoderdataPython |
3354103 | <reponame>thevirtualbuddy/Python-freecodecamp
class Category:
#Constructor
def __init__(self, name):
self.name= name
self.ledger=list()
#Deposit method
def deposit(self, amount, description=""):
# We append an object to the ledger list
# in the form of
# {"amount": amount, "description": description}
#Initialising a dictionary
self.depositDetail = dict()
#Adding the amount and description to dictionary
self.depositDetail["amount"] = amount
self.depositDetail["description"]=description
#Adding the deposit to the ledger list
self.ledger.append(self.depositDetail)
#Check_fund method
def check_funds(self,amount):
fundAvailable = 0
n = len(self.ledger)
for i in range(n):
fundAvailable += self.ledger[i]["amount"]
if fundAvailable<amount:
return False
else:
return True
#Withdraw method
def withdraw(self,amount,description=""):
# We need to check if the amount to be withdrawn
# is greater than or equal to the total amount
check = self.check_funds(amount)
if check:
self.withdrawDetail = dict()
self.withdrawDetail["amount"]=-(amount)
self.withdrawDetail["description"]=description
self.ledger.append(self.withdrawDetail)
return True
else:
return False
# get balance method that returns the current balance
# of the budget category based on the deposits
# and withdrawals that have occurred.
def get_balance(self):
fundAvailable = 0
n = len(self.ledger)
for i in range(n):
fundAvailable += self.ledger[i]["amount"]
return fundAvailable
#Transfer method
def transfer(self, amount, objName):
objectName = ""
objectName = objName.name
a=self.withdraw(amount,f"Transfer to {objectName}")
if(a==True):
objName.deposit(amount,f"Transfer from {self.name}")
return True
else:
return False
### Withdrawl method for chart
def get_withdrawls(self):
fundAvailable = 0
n = len(self.ledger)
for i in range(n):
amt = self.ledger[i]["amount"]
if amt<0:
fundAvailable += amt
return fundAvailable
def __str__(self):
title = f"{self.name:*^30}\n"
items = ""
total = 0
for i in range(len(self.ledger)):
desc = f"{self.ledger[i]['description'][0:23]:23}"
amt = f"{self.ledger[i]['amount']:>7.2f}"
items += desc + amt + '\n'
total += self.ledger[i]['amount']
output = title + items + "Total: " + str(total)
return output
###
def truncate(n):
multiplier = 10
return int(n * multiplier) / multiplier
def getTotalCat(categories):
total = 0
breakdown = []
for category in categories:
total += category.get_withdrawls()
breakdown.append(category.get_withdrawls())
#Breakdown of spending rounded down to nearest 10th
rounded = list(map(lambda x: truncate(x/total), breakdown))
return rounded
def create_spend_chart(categories):
title=""
title = "Percentage spent by category\n"
i = 100
totals = getTotalCat(categories)
for i in range(100, -1, -10):
cat_spaces = " "
for total in totals:
if total * 100 >= i:
cat_spaces += "o "
else:
cat_spaces += " "
title+= str(i).rjust(3) + "|" + cat_spaces + ("\n")
dashes = "-" + "---"*len(categories)
names = []
cat_names = ""
for category in categories:
names.append(category.name)
maxLen = max(names, key=len)
for x in range(len(maxLen)):
nameStr = ' '
i=0
nameslen = len(names)
for name in names:
if i<nameslen:
if x >= len(name):
nameStr += " "
else:
nameStr += name[x] + " "
else:
if x >= len(name):
nameStr += ""
else:
nameStr += name[x] + ""
i=i+1
cat_names += '\n' +nameStr
title+= dashes.rjust(len(dashes)+4) + cat_names
return title
| StarcoderdataPython |
150619 |
########################################################################
# written by : <NAME>, <NAME>, CS, #
# Im<NAME> AlFaisal University #
#----------------------------------------------------------------------#
# #
# This interface is the user main menu where the users can #
# create new delivery task and change their passwords #
# #
########################################################################
import sqlite3
from common import id_generator,send_email
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
import login
import changePass
import ScanTape
import ChooseDestination
class userHome():
builder =None
window = None
Username=None
userType=None
#staring function
def __init__(self,username,kind):
#connect to the desired window from glade file
self.builder = Gtk.Builder()
self.builder.add_from_file("HomeUser.glade")
self.window = self.builder.get_object("window1")
#get username+type
self.Username=username
self.userType=kind
#get all objects
createTaskBtn=self.builder.get_object("createTaskBtn")
changePasswordBtn=self.builder.get_object("changePasswordBtn")
createTaskBtn.connect("clicked",self.createTask)
changePasswordBtn.connect("clicked",self.changePassword)
logoutBtn=self.builder.get_object("logoutBtn1")
logoutBtn.connect("clicked",self.onLogoutButtonPressedButtonPressed)
self.window.show()
#go to Scan Tape interface
def createTask(self,button):
self.window.destroy()
self.window=ScanTape.ScanTape(list(),self.Username,self.userType)
#go to change password interface
def changePassword(self,button):
self.window.destroy()
window2 = changePass.change_password(self.Username,self.userType)
#Logout
def onLogoutButtonPressedButtonPressed(self, button):
self.window.destroy()
self.window=login.loginClass()
class tapeInfo():
builder =None
window = None
projectName = None
tapeName = None
rackName = None
slotNumber = None
tapesList = None
barcode = None
hint = None
userType=None
Username=None
#starting function
def __init__(self,volser, tl,username, kind): # tl = tape list
#connect to the desired window from glade file
self.builder = Gtk.Builder()
self.builder.add_from_file("HomeUser.glade")
self.window = self.builder.get_object("window2")
#get the username+type
self.userType=kind
self.Username=username
#get all the objects
scanBtn=self.builder.get_object("scanBtn")
logoutBtn=self.builder.get_object("logoutBtn2")
logoutBtn.connect("clicked",self.onLogoutButtonPressedButtonPressed)
proceedBtn=self.builder.get_object("proceedBtn")
cancelBtn=self.builder.get_object("cancelBtn")
self.projectName=self.builder.get_object("projectName")
self.tapeName=self.builder.get_object("tapeName")
self.rackName=self.builder.get_object("rackName")
self.slotNumber=self.builder.get_object("slotNumber")
self.hint=self.builder.get_object("hint")
scanBtn.connect("clicked",self.scan)
proceedBtn.connect("clicked",self.proceed)
cancelBtn.connect("clicked",self.cancel)
self.tapesList= tl
self.barcode = volser
#if tapeslist.count() == 3 disable scann btn and hover or hint maxmam 3 tape
if self.tapesList!= None and len(self.tapesList) == 2:
scanBtn.set_sensitive(False)
self.hint.set_text("You reached the maximmum number of tapes")
#connect to db+bring the volser info
db = sqlite3.connect('SaedRobot.db')
c = db.cursor()
c.execute('SELECT * from inventory WHERE volser= ?' , (volser,))
data=c.fetchone()
#valid volser
if data !=None and len(data)>0:
self.projectName.set_text(data[1])
self.tapeName.set_text(data[0])
self.rackName.set_text(data[2])
self.slotNumber.set_text(str(data[3]))
self.tapesList.append(self.barcode)
self.window.show()
def scan(self,button):
# this method will append the barcode to the list and send the list back to ScanTape Interface
self.window.destroy()
self.window=ScanTape.ScanTape(self.tapesList,self.Username,self.userType)
def proceed(self,button):
self.window.destroy() # Go ahead to next interface with the tapelist >> Zainab's interface Choose distnation
self.window=ChooseDestination.ChooseDes(self.tapesList,self.Username,self.userType)
def cancel(self,button): #Go to ScanTape interface with the TapeList with no further changes
self.window.destroy()
index = len(self.tapesList)
del self.tapesList[index - 1]
self.window=ScanTape.ScanTape(self.tapesList,self.Username,self.userType)
#Logout
def onLogoutButtonPressedButtonPressed(self, button):
self.window.destroy()
self.window=login.loginClass()
| StarcoderdataPython |
58746 | # Copyright (c) 2017-2018 Wind River Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
from titanium_cloud.swagger.views import SwaggerJsonViewDepreciated
from titanium_cloud.swagger.views import APIv1SwaggerJsonViewDepreciated
from titanium_cloud.swagger.views import SwaggerJsonView
from titanium_cloud.swagger.views import APIv1SwaggerJsonView
URLPATTERNS = [
# API v0, depreciated
url(r'^api/multicloud-titanium_cloud/v0/swagger.json$', SwaggerJsonViewDepreciated.as_view()),
# API v1, depreciated
url(r'^api/multicloud-titanium_cloud/v1/swagger.json$', APIv1SwaggerJsonViewDepreciated.as_view()),
# API v0, new namespace: MULTICLOUD-335
url(r'^api/multicloud-titaniumcloud/v0/swagger.json$', SwaggerJsonView.as_view()),
# API v1, new namespace: MULTICLOUD-335
url(r'^api/multicloud-titaniumcloud/v1/swagger.json$', APIv1SwaggerJsonView.as_view()),
]
urlpatterns = format_suffix_patterns(URLPATTERNS)
| StarcoderdataPython |
4834545 | <reponame>wedwardbeck/ibase
from django.urls import path
from itembase.core.views.staffing_views import ProjectManagerListView, TeamLeadListView, TeamListView, \
TeamMemberCreateView, TeamMemberClientCreateView, TeamMemberDetailView, TeamMemberUpdateView
app_name = "staff"
urlpatterns = [
# Client URL Patterns
path('', TeamListView.as_view(), name='team-list'),
path('pm/', ProjectManagerListView.as_view(), name='pm-list'),
path('tl/', TeamLeadListView.as_view(), name='teamlead-list'),
path('newtm/', TeamMemberCreateView.as_view(), name='team-new'),
path('newtm/<slug:slug>/', TeamMemberClientCreateView.as_view(), name='client-team-new'),
path('<int:pk>/', TeamMemberDetailView.as_view(), name='team-view'),
path('edittm/', TeamMemberUpdateView.as_view(), name='team-edit'),
]
| StarcoderdataPython |
44904 | <reponame>boomsbloom/dtm-fmri
'''
==============================================
====== DYNAMIC TOPIC MODELING FOR FMRI =======
==============================================
Assumes subject timeseries have been
processed through:
1) binning
2) text creation (corr matrix as docs)
==============================================
==============================================
==============================================
'''
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
from subprocess import call
from gensim import models
from bct import community_louvain
from utilities import *
def main():
# set arguments
# OPTO
# args = {'num_topics': 20, # number of topics to estimate
# 'num_samples': 480, # length of timeseries
# 'num_subjects': 3, # number of subjects
# 'parent_dir': 'opto', # data directory
# 'run_model': False, # False assumes model has been previously run
# 'stim_design': 'topics/opto_k20/stim_design.txt', # Location of stimulus design file
# }
#WORKING MEMORY
# args = {'num_topics': 20, # number of topics to estimate
# 'num_samples': 405, # length of timeseries
# 'num_subjects': 120, # number of subjects #121
# 'parent_dir': 'WM_RL', # data directory #WM_LR
# 'run_model': False, # False assumes model has been previously run
# 'stim_design': 'stim_designs/WM_RL_stimdesign.txt', # Location of stimulus design file
# }
#MATH LEARNING
args = {'num_topics': 20, # number of topics to estimate
'num_samples': 4, # length of timeseries
'num_subjects': 398, # number of subjects #388
'parent_dir': 'math2', # data directory #math1
'run_model': False, # False assumes model has been previously run
}
# run model
if args['run_model']:
dtm = runDTM(args)
save_dynamics(dtm,args)
else:
try:
fit_model = 'fit_models/dtm_fit_%s_%sk'%(args['parent_dir'],args['num_topics'])
dtm = models.wrappers.DtmModel.load(fit_model)
except :
print('No model fit could be found.')
raise
gammas = dtm.gamma_
topic_sums = np.sum(gammas,axis=0)/np.sum(gammas)
#get the topics with meaningful information
gammas_clean, sig_topics = clean_gammas(gammas)
s = 0
e = args['num_samples']
grp_gammas = np.zeros([args['num_subjects'],args['num_samples'],np.shape(gammas_clean)[1]])
#grp_gammas = np.zeros([args['num_subjects'],12,args['num_topics']])
for sub in range(args['num_subjects']):
grp_gammas[sub,:,:] = gammas_clean[s:e,:]
s=e
e+=args['num_samples']
group_gammas = np.transpose(np.mean(grp_gammas,axis=0))
#behavioral_analysis(topic_labels,grp_gammas,'RL')
topic_labels, topic_ids = cluster_group(group_gammas,args['parent_dir'])
if 'math' not in args['parent_dir']:
group_gammas = merge_gammas(gammas_clean,topic_labels,args)
stim_design = np.loadtxt(args['stim_design']) # add stimulus design to final matrix
if args['parent_dir'] == 'opto': #Fixes error in how this data was saved
stim_design = np.roll(stim_design,6)
group_gammas = np.vstack([group_gammas,stim_design])
else: # grab the individual gamma matrix
group_gammas = merge_gammas_nomax(gammas_clean,topic_labels,args)
indiv_gammas = merge_indiv_gammas(gammas_clean,topic_labels,args)
with open('gammas_out/%s_indiv_gammas.pkl'%(args['parent_dir']),'wb') as f:
pickle.dump(indiv_gammas, f, pickle.HIGHEST_PROTOCOL)
# save grouped topic probabilities, cluster labels, and original topic ids
with open('gammas_out/%s_k%s_group_gammas.pkl'%(args['parent_dir'],args['num_topics']), 'wb') as out:
output = Output(sig_topics, topic_labels, group_gammas, topic_sums)
pickle.dump(output, out, pickle.HIGHEST_PROTOCOL)
class Output:
def __init__(self, ids, labels, gammas, topic_sums):
self.ids = ids
self.labels = labels
self.gammas = gammas
self.topic_sums = topic_sums
if __name__ == '__main__':
main()
| StarcoderdataPython |
3286357 | <filename>goose.py
# SPDX-FileCopyrightText: 2021 <NAME>
#
# SPDX-License-Identifier: MIT
# A simple example of how to set up a keymap and HID keyboard on Keybow 2040.
# You'll need to connect Keybow 2040 to a computer, as you would with a regular
# USB keyboard.
# Drop the keybow2040.py file into your `lib` folder on your `CIRCUITPY` drive.
# NOTE! Requires the adafruit_hid CircuitPython library also!
import board
from keybow2040 import Keybow2040
import usb_hid
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keyboard_layout_us import KeyboardLayoutUS
from adafruit_hid.keycode import Keycode
# Set up Keybow
i2c = board.I2C()
keybow = Keybow2040(i2c)
keys = keybow.keys
# Set up the keyboard and layout
keyboard = Keyboard(usb_hid.devices)
layout = KeyboardLayoutUS(keyboard)
# A map of keycodes that will be mapped sequentially to each of the keys, 0-15
keymap = [Keycode.SHIFT,
Keycode.C,
Keycode.LEFT_ARROW,
Keycode.Z,
Keycode.RIGHT_SHIFT,
Keycode.RIGHT_SHIFT,
Keycode.DOWN_ARROW,
Keycode.UP_ARROW,
Keycode.RIGHT_SHIFT,
Keycode.RIGHT_SHIFT,
Keycode.RIGHT_ARROW,
Keycode.SPACE,
Keycode.RIGHT_SHIFT,
Keycode.RIGHT_SHIFT,
Keycode.RIGHT_SHIFT,
Keycode.RIGHT_SHIFT]
# Th e colour to set the keys when pressed, cyan.
cyan = (0, 255, 255)
purple = (153, 102, 255)
green = (51, 204, 0)
white = (200, 200, 255)
yellow = (255, 255, 0)
orange = (255, 55, 0)
pink = (255, 102, 204)
colourMap = [
green,
cyan,
white,
cyan,
green,
orange,
white,
white,
green,
cyan,
white,
orange,
green,
cyan,
cyan,
cyan
]
holdStatus = [ False for _ in range(16)]
# Attach handler functions to all of the keys
for index, key in enumerate(keys):
# A press handler that sends the keycode and turns on the LED
@keybow.on_press(key)
def press_handler(key):
keycode = keymap[key.number]
keyboard.send(keycode)
key.set_led(*pink)
holdStatus[index] = True
# A release handler that turns off the LED
@keybow.on_release(key)
def release_handler(key):
rgb = colourMap[key.number]
key.set_led(*rgb)
#key.led_off()
holdStatus[index] = False
# A hold handler
@keybow.on_hold(key)
def hold_handler(key):
keycode = keymap[key.number]
keyboard.send(keycode)
key.set_led(*purple)
for key in keys:
rgb = colourMap[key.number]
key.set_led(*rgb)
if key.number != 3:
key.hold_time = 0.1
while True:
# Always remember to call keybow.update()!
keybow.update()
for key in keys:
if key.held:
keycode = keymap[key.number]
keyboard.send(keycode)
| StarcoderdataPython |
3220357 | import os
if __name__ == '__main__':
amplxe_cl_path = '/opt/intel/vtune_amplifier/bin64/amplxe-cl'
# dataset and parameters
dataset_dir_path = '/home/yche/GitRepos/ScanOptimizing/dataset/'
dataset_path_lst = map(lambda file_name: dataset_dir_path + file_name,
['snap_orkut', 'webgraph_webbase', 'webgraph_twitter', 'snap_friendster'])
eps_lst = map(str, [float(i + 1) / 10 for i in range(9)])
# exec path
ppscan0_path = '/home/yche/GitRepos/ScanOptimizing/pSCAN-refactor/build/pSCANParallelExp0'
ppscan1_path = '/home/yche/GitRepos/ScanOptimizing/pSCAN-refactor/build/pSCANParallelExp1'
exec_path_lst = [ppscan0_path, ppscan1_path]
# profiler tag, and filter list
profiler_tag_lst = ['advanced-hotspots', 'memory-access', 'general-exploration']
advanced_hotspots_tag_lst = ['-column="CPU Time:Self"', '-column="Instructions Retired:Self"']
memory_access_tag_lst = ['-column="CPU Time:Self"', '-column="Loads:Self"']
general_exploration_tag_lst = ['-column="Front-End Bound:Self"', '-column="Bad Speculation:Self"',
'-column="Back-End Bound:Core Bound:Self"',
'-column="Back-End Bound:Memory Bound:Self"',
'-column="Retiring:Self"']
profiler_filter_lst = [' '.join(advanced_hotspots_tag_lst), ' '.join(memory_access_tag_lst),
' '.join(general_exploration_tag_lst)]
profiler_tag_abbr_lst = ['ah', 'macc', 'ge']
# result file root folder
result_dir = '/home/yche/workspace/vtune_data/'
csv_report_root_dir = '/home/yche/workspace/vtune_report/'
for dataset_path in dataset_path_lst:
dataset_name = dataset_path.split(os.sep)[-1].split('_')[-1]
my_folder_prefix = csv_report_root_dir + dataset_name
os.system('mkdir -p ' + my_folder_prefix)
for eps in eps_lst:
for exec_path in exec_path_lst:
exec_name = exec_path.split(os.sep)[-1]
for idx, profiler_tag in enumerate(profiler_tag_lst):
profiler_filter_tag = profiler_filter_lst[idx]
result_path = result_dir + '-'.join([dataset_name, exec_name, eps, profiler_tag])
csv_file_path = my_folder_prefix + os.sep + '-'.join(
[eps, exec_name, profiler_tag_abbr_lst[idx]])
my_cmd = ' '.join([
amplxe_cl_path, '-R top-down', '-result-dir', result_path,
'-group-by function', '-filter function=GraphParallelExp::IntersectNeighborSets',
profiler_filter_tag, '-report-output', csv_file_path, '-format csv -csv-delimiter comma'])
print my_cmd
os.system(my_cmd)
| StarcoderdataPython |
3205030 | <filename>ae_service/main.py
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START app]
from flask import Flask
from flask_restful import reqparse, abort, Api, Resource
app = Flask(__name__)
api = Api(app)
ALGORITHM_EXECUTIONS = {
'execution1': {'algorithm': 'build an API'},
'execution2': {'algorithm': '?????'},
'execution3': {'algorithm': 'profit!'},
}
def abort_if_execution_doesnt_exist(execution_id):
if execution_id not in ALGORITHM_EXECUTIONS:
abort(404, message="Execution {} doesn't exist".format(execution_id))
parser = reqparse.RequestParser()
parser.add_argument('algorithm')
# Todo
# shows a single todo item and lets you delete a todo item
class Execution(Resource):
def get(self, execution_id):
abort_if_execution_doesnt_exist(execution_id)
return ALGORITHM_EXECUTIONS[execution_id]
def delete(self, execution_id):
abort_if_execution_doesnt_exist(execution_id)
del ALGORITHM_EXECUTIONS[execution_id]
return '', 204
def put(self, execution_id):
args = parser.parse_args()
execution = {'execution': args['execution']}
ALGORITHM_EXECUTIONS[execution_id] = execution
return execution, 201
# TodoList
# shows a list of all todos, and lets you POST to add new tasks
class ExecutionList(Resource):
def get(self):
return ALGORITHM_EXECUTIONS
def post(self):
args = parser.parse_args()
execution_id = int(max(ALGORITHM_EXECUTIONS.keys()).lstrip('execution')) + 1
execution_id = 'execution%i' % execution_id
ALGORITHM_EXECUTIONS[execution_id] = {'algorithm': args['algorithm']}
return ALGORITHM_EXECUTIONS[execution_id], 201
##
## Actually setup the Api resource routing here
##
api.add_resource(ExecutionList, '/executions')
api.add_resource(Execution, '/executions/<execution_id>')
if __name__ == '__main__':
app.run(debug=True)
| StarcoderdataPython |
3312618 | import gym
import time
import os
import sys
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from gym import wrappers
from gym.wrappers.monitoring import stats_recorder, video_recorder
from datetime import datetime
import tensorflow as tf
import random
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Embedding
from tensorflow.keras.optimizers import SGD, RMSprop, Adam, Adamax
from tensorflow.keras.models import load_model
from tensorflow.keras import backend as K
from pprint import pprint
import cv2
import datetime
from collections import deque
import glob
from shutil import copyfile
import pickle as pkl
import json
import ringbuffer
import argparse
def plot_running_avg(totalrewards, filename):
N = len(totalrewards)
running_avg = np.empty(N)
for t in range(N):
running_avg[t] = totalrewards[max(0, t-100):(t+1)].mean()
plt.plot(running_avg)
plt.title("Running Average")
plt.savefig(filename)
plt.close()
def reset_video_recorder_filename(filename,env):
if env.video_recorder:
env._close_video_recorder()
print("FILENAME IN VR:{}/{} ".format(env.directory, filename))
env.video_recorder = video_recorder.VideoRecorder(
env=env,
path=os.path.join(env.directory,filename),
metadata={'episode_id': env.episode_id},
enabled=env._video_enabled(),
)
env.video_recorder.capture_frame()
def transform(s):
bottom_black_bar = s[84:, 12:]
img = cv2.cvtColor(bottom_black_bar, cv2.COLOR_RGB2GRAY)
bottom_black_bar_bw = cv2.threshold(img, 1, 255, cv2.THRESH_BINARY)[1]
bottom_black_bar_bw = cv2.resize(bottom_black_bar_bw, (84, 12), interpolation = cv2.INTER_NEAREST)
upper_field = s[:84, 6:90] # we crop side of screen as they carry little information
img = cv2.cvtColor(upper_field, cv2.COLOR_RGB2GRAY)
upper_field_bw = cv2.threshold(img, 120, 255, cv2.THRESH_BINARY)[1]
upper_field_bw = cv2.resize(upper_field_bw, (10, 10), interpolation = cv2.INTER_NEAREST) # re scaled to 7x7 pixels
upper_field_bw = upper_field_bw.astype('float')/255
car_field = s[66:78, 43:53]
img = cv2.cvtColor(car_field, cv2.COLOR_RGB2GRAY)
car_field_bw = cv2.threshold(img, 80, 255, cv2.THRESH_BINARY)[1]
car_field_t = [car_field_bw[:, 3].mean()/255, car_field_bw[:, 4].mean()/255, car_field_bw[:, 5].mean()/255, car_field_bw[:, 6].mean()/255]
return bottom_black_bar_bw, upper_field_bw, car_field_t
# this function uses the bottom black bar of the screen and extracts steering setting, speed and gyro data
def compute_steering_speed_gyro_abs(a):
right_steering = a[6, 36:46].mean()/255
left_steering = a[6, 26:36].mean()/255
steering = (right_steering - left_steering + 1.0)/2
left_gyro = a[6, 46:60].mean()/255
right_gyro = a[6, 60:76].mean()/255
gyro = (right_gyro - left_gyro + 1.0)/2
speed = a[:, 0][:-2].mean()/255
# if speed>0:
# print("speed element: ", speed)
abs1 = a[:, 6][:-2].mean()/255
abs2 = a[:, 8][:-2].mean()/255
abs3 = a[:, 10][:-2].mean()/255
abs4 = a[:, 12][:-2].mean()/255
return [steering, speed, gyro, abs1, abs2, abs3, abs4]
vector_size = 10*10 + 7 + 4
def create_nn(model_to_load, stack_len, freeze_hidden=False, lr = 0.01):
try:
m = load_model(model_to_load)
print("Loaded pretrained model " + model_to_load)
init_weights = m.get_weights()
# only do this if loading a saved model. why would we freeze weights on a trained model?
if freeze_hidden == True:
m.layers[1].trainable = False # not sure if this is the right way to do this
m.compile(loss='mse', optimizer=Adamax(lr=0.01))
return m, init_weights
except Exception as e:
print("No model loaded, generating new")
model = Sequential()
model.add(Dense(512, input_shape=(stack_len*111,), kernel_initializer="lecun_uniform"))# 7x7 + 3. or 14x14 + 3 # a
model.add(Activation('relu'))
model.add(Dense(11, kernel_initializer="lecun_uniform"))
adamax = Adamax(lr=lr)
model.compile(loss='mse', optimizer=adamax)
if model_to_load:
model.save(model_to_load)
return model, model.get_weights()
class DQNAgent():
'''
This class is modified from https://gist.github.com/lmclupr/b35c89b2f8f81b443166e88b787b03ab#file-race-car-cv2-nn-network-td0-15-possible-actions-ipynb
'''
def __init__(self, num_episodes, model_name=None, carConfig=None, replay_freq=20, freeze_hidden=False, lr=0.01, video_callable=None, train_dir="train_logs_testing"):
K.clear_session()
env = gym.make('CarRacing-v0')
env = wrappers.Monitor(env, 'flaskapp/static', force=False, resume = True, video_callable= video_callable, mode='evaluation', write_upon_reset=False)
self.carConfig = carConfig
self.curr_pointer = 0
self.env = env
self.gamma = 0.99
self.K = 10
self.stack_len = 4 # number of continuous frames to stack
self.model_name = model_name
self.train_dir = train_dir
self.model, self.init_weights = create_nn(model_name, self.stack_len, freeze_hidden, lr) # consecutive steps, 111-element vector for each state
self.target_models = []
for _ in range(self.K):
target_model, _ = create_nn(model_name, self.stack_len)
target_model.set_weights(self.init_weights)
self.target_models.append(target_model)
self.model.summary()
self.replay_freq = replay_freq
if not model_name:
MEMORY_SIZE = 10000
self.memory = ringbuffer.RingBuffer(MEMORY_SIZE)
else:
MEMORY_SIZE = 5000 # smaller memory for retraining
self.memory = ringbuffer.RingBuffer(MEMORY_SIZE)
self.num_episodes = num_episodes
def predict(self, s):
return self.model.predict(np.reshape(s, (1, self.stack_len*111)), verbose=0)[0]
def target_predict(self, s):
total_pred = self.target_models[0].predict(np.reshape(s, (1, self.stack_len*111)), verbose=0)[0]
for i in range(1, self.K):
pred = self.target_models[i].predict(np.reshape(s, (1, self.stack_len*111)), verbose=0)[0]
total_pred += pred
next_pred = total_pred/self.K
return next_pred
def update_targets(self):
model_weights = self.model.get_weights()
self.target_models[self.curr_pointer%self.K].set_weights(model_weights)
def update(self, s, G, B):
self.model.fit(s, np.array(G).reshape(-1, 11), batch_size=B, epochs=1, use_multiprocessing=True, verbose=0)
def sample_action(self, s, eps):
qval = self.predict(s)
if np.random.random() < eps:
return random.randint(0, 10), qval
else:
return np.argmax(qval), qval
def convert_argmax_qval_to_env_action(self, output_value):
# to reduce the action space, gas and brake cannot be applied at the same time.
# as well, steering input and gas/brake cannot be applied at the same time.
# similarly to real life drive, you brake/accelerate in straight line, you coast while sterring.
gas = 0.0
brake = 0.0
steering = 0.0
# output value ranges from 0 to 10
if output_value <= 8:
# steering. brake and gas are zero.
output_value -= 4
steering = float(output_value)/4
elif output_value >= 9 and output_value <= 9:
output_value -= 8
gas = float(output_value)/3 # 33%
elif output_value >= 10 and output_value <= 10:
output_value -= 9
brake = float(output_value)/2 # 50% brakes
else:
print("error")
white = np.ones((round(brake * 100), 10))
black = np.zeros((round(100 - brake * 100), 10))
brake_display = np.concatenate((black, white))*255
white = np.ones((round(gas * 100), 10))
black = np.zeros((round(100 - gas * 100), 10))
gas_display = np.concatenate((black, white))*255
control_display = np.concatenate((brake_display, gas_display), axis=1)
return [steering, gas, brake]
def replay(self, batch_size):
batch = self.memory.sample(batch_size)
old_states = []
old_state_preds = []
for (old_state, argmax_qval, reward, next_state) in batch:
next_state_pred = self.target_predict(next_state)
max_next_pred = np.max(next_state_pred)
old_state_pred = self.predict(old_state)
target_q_value = reward + self.gamma * max_next_pred
y = old_state_pred[:]
y[argmax_qval] = target_q_value
old_states.append(old_state)
old_state_preds.append(y.reshape(1, 11))
old_states = np.reshape(old_states, (batch_size, 111*self.stack_len))
old_state_preds = np.array(old_state_preds).reshape(batch_size, 11)
self.model.fit(old_states, old_state_preds, batch_size=batch_size, epochs=1, verbose=0, workers=10, use_multiprocessing=True)
def play_one(self, eps,train=True,video_path=None):
if self.carConfig:
observation = self.env.reset(config=self.carConfig)
else:
observation = self.env.reset()
if video_path is not None:
print("Setting video path to: {}".format(video_path))
reset_video_recorder_filename(video_path,self.env)
done = False
full_reward_received = False
totalreward = 0
iters = 0
a, b, c = transform(observation)
state = np.concatenate((np.array([compute_steering_speed_gyro_abs(a)]).reshape(1,-1).flatten(), b.reshape(1,-1).flatten(), c), axis=0) # this is 3 + 7*7 size vector. all scaled in range 0..1
stacked_state = np.array([state]*self.stack_len, dtype='float32')
while not done:
argmax_qval, qval = self.sample_action(stacked_state, eps)
prev_state = stacked_state
action = self.convert_argmax_qval_to_env_action(argmax_qval)
observation, reward, done, info = self.env.step(action)
a, b, c = transform(observation)
curr_state = np.concatenate((np.array([compute_steering_speed_gyro_abs(a)]).reshape(1,-1).flatten(), b.reshape(1,-1).flatten(), c), axis=0) # this is 3 + 7*7 size vector. all scaled in range 0..1
curr_state.astype('float32')
stacked_state = np.append(stacked_state[1:], [curr_state], axis=0) # appending the lastest frame, pop the oldest
if train == True:
self.memory.append((prev_state, argmax_qval, reward, stacked_state))
if iters%250==0:
self.curr_pointer += 1
self.update_targets()
# replay batch from memory every n steps
if self.replay_freq!=0 and train==True:
if iters % self.replay_freq==0 and iters>10:
try:
self.replay(32)
except Exception as e: # will error if the memory size not enough for minibatch yet
print("error when replaying: ", e)
raise e
totalreward += reward
iters += 1
self.env.render()
if iters > 1500:
print("This episode is stuck")
break
return totalreward, iters
def train(self, retrain=False, eps_mode='dynamic'):
self.timestamp = time.strftime("%m%d_%H%M")
if retrain:
self.timestamp += "_retrain"
#create directory for this training run
dir_name = os.path.join(os.getcwd(), "{}/{}".format(self.train_dir,self.timestamp))
if not os.path.exists(dir_name):
os.makedirs(dir_name)
totalrewards = np.empty(self.num_episodes)
for n in range(self.num_episodes):
print("training ", str(n))
if eps_mode == 'dynamic':
if not self.model_name:
eps = 1/np.sqrt(n + 100)
else: # want to use a very small eps during retraining
eps = 0.01
else:
eps = eps_mode
totalreward, iters = self.play_one(eps)
totalrewards[n] = totalreward
print("episode:", n, "iters", iters, "total reward:", totalreward, "eps:", eps, "avg reward (last 100):", totalrewards[max(0, n-100):(n+1)].mean())
if n>=0 and n%50==0 and not self.model_name:
# save model (assuming this is NOT the flask app, which WILL pass a model name)
trained_model = os.path.join(os.getcwd(),"{}/{}/avg_dqn_ep_{}.h5".format(self.train_dir,self.timestamp, str(n)))
with open(os.path.join(os.getcwd(), "{}/{}/avg_dqn_ep_{}.pkl".format(self.train_dir,self.timestamp, str(n))),'wb+') as outfile:
pkl.dump(totalrewards, outfile)
self.model.save(trained_model)
if self.model_name:
# we assume that this IS the flask app; if you are trying to retrain FROM an h5, put it in the flask_model directory for now
model_name_no_extension = os.path.basename(self.model_name)
new_model_name = os.path.join(os.getcwd(), "{}/{}/{}".format(self.train_dir,self.timestamp, model_name_no_extension))
print('saving: ', new_model_name)
self.model.save(new_model_name)
rp_name = os.path.join(os.getcwd(), "{}/{}/rewards_plot_{}.png".format(self.train_dir,self.timestamp, model_name_no_extension))
plt.plot(totalrewards, label='retraining reward')
plt.title("Rewards")
plt.savefig(rp_name)
plt.close()
rap_name = os.path.join(os.getcwd(), "{}/{}/ra_plot_{}.png".format(self.train_dir,self.timestamp, model_name_no_extension))
plot_running_avg(totalrewards, rap_name)
with open(os.path.join(os.getcwd(), "{}/{}/{}_rewards_flask.pkl".format(self.train_dir,self.timestamp, model_name_no_extension)),'wb+') as outfile:
pkl.dump(totalrewards, outfile)
with open(os.path.join(os.getcwd(), "{}/{}/{}_car_config.json".format(self.train_dir,self.timestamp, model_name_no_extension)),'w+') as outfile:
json.dump(self.carConfig, outfile)
if not self.model_name:
plt.plot(totalrewards)
# rp_name = os.path.join(os.getcwd(), "train_logs/avg_dqn_lr001_replay20_cpweights250.png")
rp_name = os.path.join(os.getcwd(), "{}/{}/rewards_plot.png".format(self.train_dir,self.timestamp))
plt.title("Rewards")
plt.savefig(rp_name)
plt.close()
rap_name = os.path.join(os.getcwd(), "{}/{}/ra_plot.png".format(self.train_dir,self.timestamp))
plot_running_avg(totalrewards, rap_name)
# with open(os.path.join(os.getcwd(), "train_logs/avg_dqn_total_rewards_final_lr001_replay20_cpweights250.pkl"),'wb+') as outfile:
with open(os.path.join(os.getcwd(), "{}/{}/total_rewards.pkl".format(self.train_dir,self.timestamp)),'wb+') as outfile:
pkl.dump(totalrewards, outfile)
with open(os.path.join(os.getcwd(), "{}/{}/car_config.json".format(self.train_dir,self.timestamp)),'w+') as outfile:
json.dump(self.carConfig, outfile)
self.model.save(os.path.join(os.getcwd(), "{}/{}.h5".format(self.train_dir,self.timestamp)))
copyfile(rap_name, "{}/reward_plot.png".format(self.train_dir))
self.env.close()
return totalrewards
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='CarRacing DQN Agent')
parser.add_argument('--train', help='number of episodes to train for')
parser.add_argument('--videos', help='number of videos to generate')
args = parser.parse_args()
if args.train:
n_episodes = int(args.train)
trainer = DQNAgent(n_episodes, None, replay_freq=50, lr=0.001, train_dir="flaskapp/static/default")#, carConfig=car_config)
trainer.train()
elif args.videos:
agents = glob.glob('flaskapp/static/default/*.h5')
# get the most recent agent by timestamp
agents.sort(reverse=True)
agent = agents[0]
tester = DQNAgent(0, agent, video_callable=lambda x: True)
n_videos = int(args.videos)
for i in range(n_videos):
video_path = f"default/test_drive_{i}.mp4"
result = tester.play_one(train=False,video_path=video_path,eps=0.01)
| StarcoderdataPython |
1716406 | import cloudinary
import cloudinary.uploader
import cloudinary.api
cloudinary.config(
cloud_name="grupo-dasa",
api_key="677559119568421",
api_secret="<KEY>"
)
def create_file(file):
return cloudinary.uploader.upload(file)
| StarcoderdataPython |
1647121 | <filename>gs/profile/status/change/interfaces.py
# -*- coding: utf-8 -*-
############################################################################
#
# Copyright © 2015 OnlineGroups.net and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
############################################################################
from __future__ import absolute_import, print_function, unicode_literals
from zope.interface import Interface
from zope.schema import Bool
from . import GSMessageFactory as _
class ToggleStatusCommand(Interface):
skip = Bool(
title=_('skip-label', 'Skip the monthly profile-status'),
description=_('skip-help',
'Skip receiving the profile-status notification that '
'is sent out once a month'),
default=False,
required=True)
| StarcoderdataPython |
116971 | <filename>tests/test_annotation.py
# OBSS SAHI Tool
# Code written by <NAME>, 2020.
import unittest
class TestAnnotation(unittest.TestCase):
def test_bounding_box(self):
from sahi.annotation import BoundingBox
bbox_minmax = [30, 30, 100, 150]
shift_amount = [50, 40]
bbox = BoundingBox(bbox_minmax, shift_amount=[0, 0])
expanded_bbox = bbox.get_expanded_box(ratio=0.1)
bbox = BoundingBox(bbox_minmax, shift_amount=shift_amount)
shifted_bbox = bbox.get_shifted_box()
# compare
self.assertEqual(expanded_bbox.to_coco_bbox(), [18, 23, 94, 134])
self.assertEqual(expanded_bbox.to_voc_bbox(), [18, 23, 112, 157])
self.assertEqual(shifted_bbox.to_voc_bbox(), [80, 70, 150, 190])
def test_category(self):
from sahi.annotation import Category
category_id = 1
category_name = "car"
category = Category(id=category_id, name=category_name)
self.assertEqual(category.id, category_id)
self.assertEqual(category.name, category_name)
def test_mask(self):
from sahi.annotation import Mask
coco_segmentation = [[1, 1, 325, 125, 250, 200, 5, 200]]
full_shape_height, full_shape_width = 500, 600
full_shape = [full_shape_height, full_shape_width]
mask = Mask.from_coco_segmentation(segmentation=coco_segmentation, full_shape=full_shape)
self.assertEqual(mask.full_shape_height, full_shape_height)
self.assertEqual(mask.full_shape_width, full_shape_width)
self.assertEqual(mask.bool_mask[11, 2], True)
def test_object_annotation(self):
from sahi.annotation import ObjectAnnotation
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3344349 | from threading import RLock
from typing import Optional
from rx.core import typing
from rx.core.typing import Disposable
class SerialDisposable(Disposable):
"""Represents a disposable resource whose underlying disposable
resource can be replaced by another disposable resource, causing
automatic disposal of the previous underlying disposable resource.
"""
def __init__(self) -> None:
self.current: Optional[Disposable] = None
self.is_disposed = False
self.lock = RLock()
super().__init__()
def get_disposable(self) -> Optional[Disposable]:
return self.current
def set_disposable(self, value) -> None:
"""If the SerialDisposable has already been disposed, assignment
to this property causes immediate disposal of the given
disposable object. Assigning this property disposes the previous
disposable object."""
old: Optional[Disposable] = None
with self.lock:
should_dispose = self.is_disposed
if not should_dispose:
old = self.current
self.current = value
if old is not None:
old.dispose()
if should_dispose and value is not None:
value.dispose()
disposable = property(get_disposable, set_disposable)
def dispose(self) -> None:
"""Disposes the underlying disposable as well as all future
replacements."""
old: Optional[Disposable] = None
with self.lock:
if not self.is_disposed:
self.is_disposed = True
old = self.current
self.current = None
if old is not None:
old.dispose()
| StarcoderdataPython |
1784772 | from __future__ import annotations
from ...distributed.options import Options
from ...distributed.unit import Unit
from ...domain.sensor_type import SensorType
from ...domain.sensor_type_repository import SensorTypeRepository
from ...domain.options import Options as DomainOptions
from ..base_command import BaseCommand
from ..container import Container
from ...domain.unit_factory import UnitFactory
class RegisterSensorType(BaseCommand):
name: str
options: Options
factory: callable
unit: Unit
def __init__(self, name: str, options: Options, factory: callable, unit: Unit):
self.name = name
self.options = options
self.factory = factory
self.unit = unit
def get_handler(self, container: Container):
sensor_types = container.get_service('sensor_types')
return self.Handler(sensor_types)
class Handler:
_sensor_types: SensorTypeRepository
_unit_factory: UnitFactory
def __init__(self, sensor_types: SensorTypeRepository):
self._sensor_types = sensor_types
self._unit_factory = UnitFactory()
async def handle(self, command: RegisterSensorType):
unit = self._unit_factory.create_unit(command.unit.get_unit_type())
options = DomainOptions.from_dict(command.options.to_dict())
sensor_type = SensorType(
command.name,
options,
command.factory,
unit
)
return await self._sensor_types.register_sensor_type(command.name, sensor_type)
| StarcoderdataPython |
3287856 | """
https://github.com/FrederikSchorr/sign-language
Train a pre-trained I3D convolutional network to classify videos
"""
import os
import glob
import time
import sys
import numpy as np
import pandas as pd
import keras
from keras import backend as K
from datagenerator import VideoClasses, FramesGenerator
from model_i3d import Inception_Inflated3d, add_i3d_top
def layers_freeze(keModel:keras.Model) -> keras.Model:
print("Freeze all %d layers in Model %s" % (len(keModel.layers), keModel.name))
for layer in keModel.layers:
layer.trainable = False
return keModel
def layers_unfreeze(keModel:keras.Model) -> keras.Model:
print("Unfreeze all %d layers in Model %s" % (len(keModel.layers), keModel.name))
for layer in keModel.layers:
layer.trainable = True
return keModel
def count_params(keModel:keras.Model):
trainable_count = int(
np.sum([K.count_params(p) for p in set(keModel.trainable_weights)]))
non_trainable_count = int(
np.sum([K.count_params(p) for p in set(keModel.non_trainable_weights)]))
print('Total params: {:,}'.format(trainable_count + non_trainable_count))
print('Trainable params: {:,}'.format(trainable_count))
print('Non-trainable params: {:,}'.format(non_trainable_count))
return
def train_I3D_oflow_end2end(diVideoSet):
"""
* Loads pretrained I3D model,
* reads optical flow data generated from training videos,
* adjusts top-layers adequately for video data,
* trains only news top-layers,
* then fine-tunes entire neural network,
* saves logs and models to disc.
"""
# directories
sFolder = "%03d-%d"%(diVideoSet["nClasses"], diVideoSet["nFramesNorm"])
sClassFile = "data-set/%s/%03d/class.csv"%(diVideoSet["sName"], diVideoSet["nClasses"])
#sVideoDir = "data-set/%s/%03d"%(diVideoSet["sName"], diVideoSet["nClasses"])
#sImageDir = "data-temp/%s/%s/image"%(diVideoSet["sName"], sFolder)
#sImageFeatureDir = "data-temp/%s/%s/image-i3d"%(diVideoSet["sName"], sFolder)
sOflowDir = "data-temp/%s/%s/oflow"%(diVideoSet["sName"], sFolder)
#sOflowFeatureDir = "data-temp/%s/%s/oflow-i3d"%(diVideoSet["sName"], sFolder)
sModelDir = "model"
diTrainTop = {
"fLearn" : 1e-3,
"nEpochs" : 3}
diTrainAll = {
"fLearn" : 1e-4,
"nEpochs" : 17}
nBatchSize = 4
print("\nStarting I3D end2end training ...")
print(os.getcwd())
# read the ChaLearn classes
oClasses = VideoClasses(sClassFile)
# Load training data
genFramesTrain = FramesGenerator(sOflowDir + "/train", nBatchSize,
diVideoSet["nFramesNorm"], 224, 224, 2, oClasses.liClasses)
genFramesVal = FramesGenerator(sOflowDir + "/val", nBatchSize,
diVideoSet["nFramesNorm"], 224, 224, 2, oClasses.liClasses)
# Load pretrained i3d model and adjust top layer
print("Load pretrained I3D flow model ...")
keI3DOflow = Inception_Inflated3d(
include_top=False,
weights='flow_imagenet_and_kinetics',
input_shape=(diVideoSet["nFramesNorm"], 224, 224, 2))
print("Add top layers with %d output classes ..." % oClasses.nClasses)
keI3DOflow = layers_freeze(keI3DOflow)
keI3DOflow = add_i3d_top(keI3DOflow, oClasses.nClasses, dropout_prob=0.5)
# Prep logging
sLog = time.strftime("%Y%m%d-%H%M", time.gmtime()) + \
"-%s%03d-oflow-i3d"%(diVideoSet["sName"], diVideoSet["nClasses"])
# Helper: Save results
csv_logger = keras.callbacks.CSVLogger("log/" + sLog + "-acc.csv", append = True)
# Helper: Save the model
os.makedirs(sModelDir, exist_ok=True)
cpTopLast = keras.callbacks.ModelCheckpoint(filepath = sModelDir + "/" + sLog + "-above-last.h5", verbose = 0)
cpTopBest = keras.callbacks.ModelCheckpoint(filepath = sModelDir + "/" + sLog + "-above-best.h5",
verbose = 1, save_best_only = True)
cpAllLast = keras.callbacks.ModelCheckpoint(filepath = sModelDir + "/" + sLog + "-entire-last.h5", verbose = 0)
cpAllBest = keras.callbacks.ModelCheckpoint(filepath = sModelDir + "/" + sLog + "-entire-best.h5",
verbose = 1, save_best_only = True)
# Fit top layers
print("Fit I3D top layers with generator: %s" % (diTrainTop))
optimizer = keras.optimizers.Adam(lr = diTrainTop["fLearn"])
keI3DOflow.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
count_params(keI3DOflow)
keI3DOflow.fit_generator(
generator = genFramesTrain,
validation_data = genFramesVal,
epochs = diTrainTop["nEpochs"],
workers = 4,
use_multiprocessing = True,
max_queue_size = 8,
verbose = 1,
callbacks=[csv_logger, cpTopLast, cpTopBest])
# Fit entire I3D model
print("Finetune all I3D layers with generator: %s" % (diTrainAll))
keI3DOflow = layers_unfreeze(keI3DOflow)
optimizer = keras.optimizers.Adam(lr = diTrainAll["fLearn"])
keI3DOflow.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
count_params(keI3DOflow)
keI3DOflow.fit_generator(
generator = genFramesTrain,
validation_data = genFramesVal,
epochs = diTrainAll["nEpochs"],
workers = 4,
use_multiprocessing = True,
max_queue_size = 8,
verbose = 1,
callbacks=[csv_logger, cpAllLast, cpAllBest])
return
if __name__ == '__main__':
"""diVideoSet = {"sName" : "ledasila",
"nClasses" : 21, # number of classes
"nFramesNorm" : 40, # number of frames per video
"nMinDim" : 240, # smaller dimension of saved video-frames
"tuShape" : (288, 352), # height, width
"nFpsAvg" : 25,
"nFramesAvg" : 75,
"fDurationAvg" : 3.0} # seconds
"""
diVideoSet = {"sName" : "chalearn",
"nClasses" : 20, # number of classes
"nFramesNorm" : 40, # number of frames per video
"nMinDim" : 240, # smaller dimension of saved video-frames
"tuShape" : (240, 320), # height, width
"nFpsAvg" : 10,
"nFramesAvg" : 50,
"fDurationAvG" : 5.0} # seconds
train_I3D_oflow_end2end(diVideoSet) | StarcoderdataPython |
1700707 | <gh_stars>1-10
from polecat.project import Project as BaseProject
from .models import * # noqa
class Project(BaseProject):
pass
| StarcoderdataPython |
156376 | from __future__ import print_function, absolute_import
from .video_datasets import *
| StarcoderdataPython |
3304322 | #!/usr/bin/python3
from ast import parse
from types import new_class
from typing import DefaultDict
import config #copy config-example.py to config.py and set values
from datetime import datetime
import paho.mqtt.client as mqtt
from smip import graphql
import requests
import uuid
import argparse
import json
import sys
#print(sys.argv)
mqtt_broker = config.mqtt["broker"]
uuid = str(uuid.uuid4())[:8]
mqtt_clientid = config.mqtt["clientprefix"] + "GW" + uuid
mqtt_topic = "" #This should be enumerated, not hard coded
tanks_dic = {} # {tank_name: tank_id}
attributes_dic = {} # {tank_name: {attribute:attritube_id}}
type_id = config.type_id
parent_id = config.parent_id
# Connection information for your SMIP Instance GraphQL endpoint
graphql = graphql(config.smip["authenticator"], config.smip["password"], config.smip["name"], config.smip["role"], config.smip["url"])
mqtt_topic = graphql.args.modeltype
print (f"Listening for MQTT messages on topic: {mqtt_topic} ...")
def delete_all():
for item in tanks_dic:
print(item)
delete_equipment(tanks_dic[item])
def delete_equipment(equipment_id):
smp_mutation = f'''
mutation MyNewEquipmentMutation {{
deleteEquipment(
input: {{
id: "{equipment_id}"
}}
){{
equipment {{
id
displayName
}}
}}
}} '''
smp_response = ""
try:
print("start deletion")
smp_response = graphql.post(smp_mutation)
print(smp_response)
except requests.exceptions.HTTPError as e:
print("An error occured accessing the SM Platform!")
print(e)
def get_tank_info(equipment_type_id):
smp_query = f'''query get_id {{
equipments(
filter: {{typeId: {{equalTo: "{equipment_type_id}"}}}}
) {{
displayName
id
attributes {{
displayName
id
}}
}}
}}'''
smp_response = ""
try:
smp_response = graphql.post(smp_query)
equipments = smp_response['data']['equipments']
#print(equipments)
for ele in equipments:
tank_id = ele['id']
tank_name = ele['displayName']
tanks_dic[tank_name] = tank_id
attributes_dic[tank_name] = {'id': tank_id}
for attribute in ele['attributes']:
attributes_dic[tank_name][attribute['displayName']] = attribute['id']
print(attributes_dic)
print(tanks_dic)
except requests.exceptions.HTTPError as e:
print("An error occured accessing the SM Platform!")
print(e)
def create_new_equipment(equipment_name, equipment_id, parent_id):
print("im in")
smp_mutation = f'''
mutation MyNewEquipmentMutation {{
createEquipment(
input: {{
equipment: {{
displayName: "{equipment_name}"
typeId: "{equipment_id}"
partOfId: "{parent_id}"
}}
}}
) {{
equipment {{
id
displayName
attributes {{
displayName
id
}}
}}
}}
}}
'''
smp_response = ""
try:
smp_response = graphql.post(smp_mutation)
print(smp_response)
equipment = smp_response['data']['createEquipment']['equipment']
equipment_id = equipment['id']
tanks_dic[equipment_name] = equipment_id
attributes = equipment['attributes']
attributes_dic[equipment_name] = {}
for each in attributes:
attr_name = each['displayName']
attr_id = each['id']
attributes_dic[equipment_name][attr_name] = attr_id
print(tanks_dic)
print(attributes_dic)
return int(equipment_id)
except requests.exceptions.HTTPError as e:
print("An error occured accessing the SM Platform!")
print(e)
#create_new_equipment("test2")
def make_datetime_utc():
utc_time = str(datetime.utcnow())
time_parts = utc_time.split(" ")
utc_time = "T".join(time_parts)
time_parts = utc_time.split(".")
utc_time = time_parts[0] + "Z"
return utc_time
def update_smip(sample_value):
print("Posting Data to CESMII Smart Manufacturing Platform...")
print("in update")
sample_value = json.loads(sample_value)
tank_id = 0
tank_name = sample_value["tank_name"]
if tank_name in tanks_dic:
tank_id = tanks_dic[tank_name]
else:
tank_id = create_new_equipment(tank_name, type_id, parent_id)
tanks_dic[tank_name] = tank_id
for attribute in sample_value:
if attribute == 'tank_name': continue
value_send = str(sample_value[attribute]) # Value to be sent to the attribute ID
write_attribute_id = attributes_dic[tank_name][attribute] #The Equipment Attribute ID to be updated in your SMIP model
smp_query = f"""
mutation updateTimeSeries {{
replaceTimeSeriesRange(
input: {{attributeOrTagId: "{write_attribute_id}", entries: [ {{timestamp: "{make_datetime_utc()}", value: "{value_send}", status: "1"}} ] }}
) {{
clientMutationId,
json
}}
}}
"""
smp_response = ""
try:
smp_response = graphql.post(smp_query)
except requests.exceptions.HTTPError as e:
print("An error occured accessing the SM Platform!")
print(e)
print("Response from SM Platform was...")
#print(json.dumps(smp_response, indent=2))
print()
def on_message(client, userdata, message):
msg = str(message.payload.decode("utf-8"))
print("Received MQTT message: " + msg)
update_smip(msg)
# python3 ga
if mqtt_topic=="clean":
get_tank_info(type_id)
delete_all()
quit()
mqtt_client = mqtt.Client(mqtt_clientid)
mqtt_client.connect(mqtt_broker)
print ("MQTT Broker: " + mqtt_broker)
print ("MQTT Client ID: " + mqtt_clientid)
print ("Publish Topic: " + mqtt_topic)
get_tank_info(type_id)
mqtt_client.subscribe('#')
mqtt_client.on_message=on_message
mqtt_client.loop_forever()
| StarcoderdataPython |
3262596 | #! /bin/python3
import json
import pandas
import numpy
class mark_mandatory:
def __init__(self):
pass
# This class at the 'identifier' to the name of the mandatory fields,
def mark_as_mandatory(self, names_list, mandatory_list, mand_ident, identifier):
print('#-1')
print(numpy.nonzero(mandatory_list))
print('#0')
print(mand_ident)
ind = numpy.where(mandatory_list == mand_ident)
print('#1')
print(ind)
new_list = names_list
print('#2')
print(new_list)
new_list[ind]=[str(identifier)+ item for item in new_list[ind]]
print('#3')
print(new_list)
return(new_list)
| StarcoderdataPython |
1709687 | # Copyright 2021 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from humanfriendly import parse_timespan
from playhouse import db_url
from . import tokens
from . import tools
from ..database.database import database_proxy
from webapp.common import Reactor
from webapp.settings import get_secret
from webapp.settings import settings_pool as settings
class CodeCLI(Reactor):
def __init__(self, parent):
self.name = 'code'
self.parser = parent.add_parser(self.name,
help='Actions with access codes')
group = self.parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'-i',
'--info',
help='propagates changes to models in the database schema.',
metavar='<code>',
type=str,
)
group.add_argument(
'-c',
'--create',
help='Create a new access code',
nargs=2,
metavar=('<code>', '<desc>'),
type=str,
)
group.add_argument(
'-r',
'--revoke',
help='Shows the current version of the database schema',
metavar='<code>',
type=str,
)
@staticmethod
def init_db():
database_url = get_secret('AUTH_DATABASE_URL')
database_proxy.initialize(db_url.connect(database_url))
@staticmethod
def info(code):
code = tools.validate_code(code)
if code is None:
print('Error: code not found.')
return
print('UUID: %s' % code.uuid)
print('CODE: %s' % code.code)
print('DESC: %s' % code.desc)
print('EXPIRE DATE: %s' % code.expire)
print('REVOKED: %s' % code.revoke)
print('ISSUE DATE: %s' % code.date)
def process(self, args):
self.init_db()
if args.create:
tools.register_code(args.create[0], args.create[1])
elif args.info:
self.info(args.info)
else:
self.parser.print_help()
class TokenCLI(Reactor):
def __init__(self, parent):
self.name = 'token'
self.parser = parent.add_parser(self.name, help='Actions with tokens')
group = self.parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'-d',
'--decode',
help='Check if a token is valid, decode it and show its content.',
metavar='<token>',
type=str,
)
group.add_argument(
'-c',
'--create',
help='Generate a new access token.',
metavar='<resource>',
type=str,
)
group.add_argument(
'-r',
'--revoke',
help='Add a token to the blacklist.',
metavar='<token>',
type=str,
)
@staticmethod
def create_token(resource, **kwargs):
print(tokens.create_access_token(resource=resource, **kwargs))
@staticmethod
def decode_token(token):
print(tokens.decode_jwt_token(token))
def process(self, args):
tokens.idp_e.update_secret_key(get_secret('JWT_ENCODE_KEY'))
tokens.idp_e.update_algm(settings.auth.cipher_algorithm)
tokens.idp_e.update_ttl(
'access', parse_timespan(settings.auth.access_token_ttl)
)
tokens.idp_d.update_secret_key(get_secret('JWT_DECODE_KEY'))
tokens.idp_d.update_algm(settings.auth.cipher_algorithm)
if args.create:
self.create_token(args.create)
elif args.decode:
self.decode_token(args.decode)
| StarcoderdataPython |
4827598 | # windows.py
import raylibpy as rl
from ctypes import byref
def main():
rl.init_window(rl.get_screen_width(), rl.get_screen_height(), "raylib [core] example - basic window")
rl.toggle_fullscreen()
camera = rl.Camera(
rl.Vector3(4.0, 2.0, 4.0),
rl.Vector3(0.0, 1.0, 0.0),
rl.Vector3(0.0, 1.0, 0.0),
60.0,
rl.CAMERA_PERSPECTIVE)
rl.set_camera_mode(camera, rl.CAMERA_FREE)
rl.set_target_fps(60)
while not rl.window_should_close():
rl.update_camera(byref(camera))
rl.begin_drawing()
rl.clear_background(rl.DARKGRAY)
rl.begin_mode3d(camera)
rl.draw_cube(rl.Vector3(-16.0, 2.5, 0.0), 1.0, 5.0, 32.0, rl.BLUE)
rl.draw_cube(rl.Vector3(16.0, 2.5, 0.0), 1.0, 5.0, 32.0, rl.LIME)
rl.draw_cube(rl.Vector3(0.0, 2.5, 16.0), 32.0, 5.0, 1.0, rl.GOLD)
rl.draw_sphere(rl.Vector3(0.0, 0.0, 0.0), 5.0, rl.LIME)
rl.draw_text("Congrats! You created your first window!", 190, 200, 20, rl.WHITE)
rl.draw_grid(40, 1)
rl.end_mode3d()
rl.end_drawing()
rl.close_window()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1662626 | <gh_stars>0
import math
k, b = raw_input().split(' ')
k = int(k)
b = int(b)
numbits = 0
cap = (math.pow(2, b) - 1) % 1000000009
i = 1
multiple = k * i
while multiple <= cap:
for x in range(0, 32):
numbits += (multiple >> x) & 1
i += 1
multiple = k * i
print numbits
| StarcoderdataPython |
3252638 | import random
from typing import Tuple, List
import numpy.random
from Base.bp2DState import State
from Base.bp2DBox import Box
from Base.bpReadWrite import ReadWrite
from Base.bp2DPnt import Point
def state_generator(bin_size: Tuple[int, int], box_list: List[Tuple[int, Tuple[int, int]]], path: str = None, seed: int = 0):
random.seed(seed)
state = State(0, bin_size, [])
state.open_new_bin()
counter = 0
for number, box_dims in box_list:
for _ in range(number):
state.boxes_open.append(Box(box_dims[0], box_dims[1], n=counter))
counter += 1
random.shuffle(state.boxes_open)
if path is not None:
ReadWrite.write_state(path, state)
return state
def random_state_generator(bin_size: Tuple[int, int], box_num: int = 100, box_width_min: int = 1,
box_width_max: int = 4,
box_height_min: int = 1, box_height_max: int = 4, path: str = None, seed: int = 0):
state = State(0, bin_size, [])
state.open_new_bin()
random.seed(seed)
for i in range(box_num):
width = random.randint(box_width_min, box_width_max)
height = random.randint(box_height_min, box_height_max)
state.boxes_open.append(Box(width, height, n=i))
if path is not None:
ReadWrite.write_state(path, state)
return state
'''
Generates a random dataset by recursively dividing boxes. The returned state contains already packed boxes.
A peeling process removes margins of randomly selected boxes to leave a little wiggle room.
Example call >>> sliced_box_state_generator((10,10), bin_num=8, box_num=100, peel_area=100)
'''
def sliced_box_state_generator(bin_size: Tuple[int, int], bin_num: int=1, box_num: int = 100,
peel_area: int = 0, peel_margin: int = 1,
box_width_min: int = 1, box_width_max: int = 4,
box_height_min: int = 1, box_height_max: int = 4,
path: str = None, seed: int = 0):
state = State(0, bin_size, [])
random.seed(seed)
boxes = []
for i in range(bin_num):
box = Box(bin_size[0], bin_size[1])
emb = (i,(0,0))
sdir = random.randint(0,1) # slice direction
boxes.append((box, emb, sdir))
state.open_new_bin()
while len(boxes) < box_num:
box, emb, sdir = boxes.pop(0)
# cut direction = width
if sdir == 0:
if box.w < box_width_min*2: boxes.append((box, emb, sdir))
else:
cut_pos = random.randint(box_width_min, box.w-box_width_min)
boxes.append((Box(cut_pos, box.h), (emb[0], (emb[1][0], emb[1][1])), (sdir+1)%2))
boxes.append((Box(box.w-cut_pos, box.h), (emb[0], (emb[1][0]+cut_pos, emb[1][1])), (sdir+1)%2))
# cut direction = height
else:
if box.h < box_height_min*2: boxes.append((box, emb, sdir))
else:
cut_pos = random.randint(box_height_min, box.h-box_height_min)
boxes.append((Box(box.w, cut_pos), (emb[0], (emb[1][0], emb[1][1])), (sdir+1)%2))
boxes.append((Box(box.w, box.h-cut_pos), (emb[0], (emb[1][0], emb[1][1]+cut_pos)), (sdir+1)%2))
# peel margins of boxes
peeled = 0
while peeled < peel_area:
box, emb, sdir = random.choice(boxes)
if random.randint(0, 1) == 0:
if box.w >= peel_margin+1:
box.w -= peel_margin
peeled += box.h
else:
if box.h >= peel_margin+1:
box.h -= peel_margin
peeled += box.w
# enumerate and assign boxes
for i,(box,emb,sdir) in enumerate(boxes):
box.n = i
bin = emb[0]
pos = Point(emb[1][0], emb[1][1])
state.place_box_in_bin_at_pnt(box, bin, pos)
if path is not None:
ReadWrite.write_state(path, state)
return state
| StarcoderdataPython |
65684 | <reponame>cldf-datasets/rantanenurageo<filename>test.py
import sys
import csv
csv.field_size_limit(sys.maxsize)
def test_valid(cldf_dataset, cldf_logger):
assert cldf_dataset.validate(log=cldf_logger)
| StarcoderdataPython |
3236498 | <gh_stars>1-10
import time
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
import requests
from collections import OrderedDict
import os
import sys
from io import BufferedReader
# sys.path.append("..")
from utils.yamlparser import ordered_yaml_dump
import subprocess
import io
import logging
import json
from datetime import datetime
import yaml
from django.conf import settings
from httprunner.task import HttpRunner
from httprunner.exceptions import ParamsError
from rest_framework import status
from rest_framework.response import Response
from testcases.models import Testcases
from envs.models import Envs
from reports.models import Reports
from debugtalks.models import DebugTalks
from projects.models import Projects
from configures.models import Configures
from modules.models import Modules
logger = logging.getLogger('test')
os.environ[
"PATH"] += os.pathsep + r'C:\Users\dell\PycharmProjects\DeployDjango\django_app_docker\DjangoDev03\venv\Scripts'
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, bytes):
return str(obj, encoding='utf-8')
return json.JSONEncoder.default(self, obj)
def timestamp_to_datetime(summary, type=True):
if not type:
time_stamp = int(summary["time"]["start_at"])
summary['time']['start_datetime'] = datetime. \
fromtimestamp(time_stamp).strftime('%Y-%m-%d %H:%M:%S')
for detail in summary['details']:
try:
time_stamp = int(detail['time']['start_at'])
detail['time']['start_at'] = datetime.fromtimestamp(time_stamp).strftime('%Y-%m-%d %H:%M:%S')
except Exception:
pass
for record in detail['records']:
try:
time_stamp = int(record['meta_data']['request']['start_timestamp'])
record['meta_data']['request']['start_timestamp'] = \
datetime.fromtimestamp(time_stamp).strftime('%Y-%m-%d %H:%M:%S')
except Exception:
pass
return summary
def generate_testcase_files(instance, env, testcase_dir_path):
testcases_list = []
config = {
'config': {
'name': instance.name,
"variables": [],
'request': {
'base_url': env.base_url if env else ''
}
}
}
testcases_list.append(config)
# include = eval(instance.include)
# request = eval(instance.request)
# 获取当前用例的前置配置和前置用例
include = json.loads(instance.include, encoding='utf-8')
# 获取当前用例的请求信息
request = json.loads(instance.request, encoding='utf-8')
module_name = instance.module.name # 接口名称
project_name = instance.module.project.name # 项目名称
testcase_dir_path = os.path.join(testcase_dir_path, project_name)
# 创建项目名所在文件夹
if not os.path.exists(testcase_dir_path):
os.makedirs(testcase_dir_path)
debugtalk_obj = Projects.objects.get(name=project_name).debugtalk
if debugtalk_obj:
debugtalk = debugtalk_obj.debugtalk
else:
debugtalk = ""
# 创建debugtalk.py文件
with open(os.path.join(testcase_dir_path, 'debugtalk.py'),
mode='w',
encoding='utf-8') as one_file:
one_file.write(debugtalk)
testcase_dir_path = os.path.join(testcase_dir_path, module_name)
# 在项目目录下创建接口名所在文件夹
if not os.path.exists(testcase_dir_path):
os.makedirs(testcase_dir_path)
# {'config': 2, 'testcases': [2,5]}
# 如果include前置中有config, 那么添加到testcases_list中
# if 'config' in include:
# config_id = include.get('config')
# config_obj = Configures.objects.filter(id=config_id).first()
# if config_obj:
# # 需要将请求头(当前为嵌套字典的列表), 需要转化为字典
# # config_request = eval(config_obj.request)
# config_request = json.loads(config_obj.request, encoding='utf-8')
#
# # config_request = eval(config_obj.request)
# # config_request.get('config').get('request').setdefault('base_url', env.base_url)
# # config_dict = config_request.get('config')
# # config_dict['request']['base_url'] = env.base_url
# # config_request['config']['name'] = instance.name
# config_request['config']['request']['base_url'] = env.base_url
# # testcases_list.append(config_request)
# testcases_list[0] = config_request
# 如果include前置中有testcases, 那么添加到testcases_list中
if 'testcases' in include:
for t_id in include.get('testcases'):
testcase_obj = Testcases.objects.filter(id=t_id).first()
if testcase_obj:
try:
# testcase_request = eval(testcase_obj.request)
testcase_request = json.loads(testcase_obj.request, encoding='utf-8')
except Exception as e:
logger.error(e)
continue
else:
# 将前置用例提取的数据提前在全局变量中声明
extract = testcase_request["test"].get("extract")
if extract:
for e in extract:
testcases_list[0]["config"]["variables"].append({[i for i in e.keys()][0]: ''})
testcase_request["test"] = OrderedDict(testcase_request["test"])
testcases_list.append(OrderedDict(testcase_request))
# 将当前用例的request添加到testcases_list
request["test"] = OrderedDict(request["test"])
testcases_list.append(request)
with open(os.path.join(testcase_dir_path, instance.name + '.yml'),
mode="w", encoding="utf-8") as one_file:
ordered_yaml_dump(testcases_list, one_file, default_flow_style=False, allow_unicode=True)
def generate_debug_files(data, env, testcase_dir_path):
testcases_list = []
config = {
'config': {
'name': data["name"],
'request': {
'base_url': env.base_url if env else ''
}
}
}
testcases_list.append(config)
# include = eval(instance.include)
# request = eval(instance.request)
# 获取当前用例的前置配置和前置用例
include = json.loads(data["include"], encoding='utf-8')
# 获取当前用例的请求信息
request = json.loads(data["request"], encoding='utf-8')
module = Modules.objects.get(id=data["module"]["iid"]) # 模块id
project = Projects.objects.get(id=data["module"]["pid"]) # 项目id
testcase_dir_path = os.path.join(testcase_dir_path, project.name)
# 创建项目名所在文件夹
if not os.path.exists(testcase_dir_path):
os.makedirs(testcase_dir_path)
debugtalk_obj = project.debugtalk
if debugtalk_obj:
debugtalk = debugtalk_obj.debugtalk
else:
debugtalk = ""
# 创建debugtalk.py文件
with open(os.path.join(testcase_dir_path, 'debugtalk.py'),
mode='w',
encoding='utf-8') as one_file:
one_file.write(debugtalk)
testcase_dir_path = os.path.join(testcase_dir_path, module.name)
# 在项目目录下创建接口名所在文件夹
if not os.path.exists(testcase_dir_path):
os.makedirs(testcase_dir_path)
# {'config': 2, 'testcases': [2,5]}
# 如果include前置中有config, 那么添加到testcases_list中
# if 'config' in include:
# config_id = include.get('config')
# config_obj = Configures.objects.filter(id=config_id).first()
# if config_obj:
# # 需要将请求头(当前为嵌套字典的列表), 需要转化为字典
# # config_request = eval(config_obj.request)
# config_request = json.loads(config_obj.request, encoding='utf-8')
#
# # config_request = eval(config_obj.request)
# # config_request.get('config').get('request').setdefault('base_url', env.base_url)
# # config_dict = config_request.get('config')
# # config_dict['request']['base_url'] = env.base_url
# # config_request['config']['name'] = instance.name
# config_request['config']['request']['base_url'] = env.base_url
# # testcases_list.append(config_request)
# testcases_list[0] = config_request
# 如果include前置中有testcases, 那么添加到testcases_list中
if 'testcases' in include:
for t_id in include.get('testcases'):
testcase_obj = Testcases.objects.filter(id=t_id).first()
if testcase_obj:
try:
# testcase_request = eval(testcase_obj.request)
testcase_request = json.loads(testcase_obj.request, encoding='utf-8')
testcase_request["test"].pop('skip', 0)
testcase_request["test"].pop('skipIf', 0)
testcase_request["test"].pop('skipUnless', 0)
except Exception as e:
logger.error(e)
continue
else:
testcases_list.append(testcase_request)
# 将当前用例的request添加到testcases_list
testcases_list.append(request)
with open(os.path.join(testcase_dir_path, data["name"] + '.yml'),
mode="w", encoding="utf-8") as one_file:
yaml.dump(testcases_list, one_file, allow_unicode=True)
def generate_locust_files(instance, env, testcase_dir_path, repeat_list):
testcases_list = []
if 0 not in repeat_list:
config = {
'config': {
'name': instance.name,
'request': {
'base_url': env.base_url if env else ''
}
}
}
testcases_list.append(config)
repeat_list.append(0)
# include = eval(instance.include)
# request = eval(instance.request)
# 获取当前用例的前置配置和前置用例
include = json.loads(instance.include, encoding='utf-8')
# 获取当前用例的请求信息
request = json.loads(instance.request, encoding='utf-8')
project_name = instance.module.project.name # 项目名称
debugtalk_obj = Projects.objects.get(name=project_name).debugtalk
if debugtalk_obj:
debugtalk = debugtalk_obj.debugtalk
else:
debugtalk = ""
# 创建debugtalk.py文件
with open(os.path.join(testcase_dir_path, 'debugtalk.py'),
mode='w',
encoding='utf-8') as one_file:
one_file.write(debugtalk)
if 'testcases' in include:
for t_id in include.get('testcases'):
if t_id not in repeat_list:
testcase_obj = Testcases.objects.filter(id=t_id).first()
if testcase_obj:
try:
testcase_request = json.loads(testcase_obj.request, encoding='utf-8')
except Exception as e:
logger.error(e)
continue
else:
testcases_list.append(testcase_request)
repeat_list.append(t_id)
# 将当前用例的request添加到testcases_list
if instance.id not in repeat_list:
testcases_list.append(request)
repeat_list.append(instance.id)
with open(os.path.join(testcase_dir_path, "locust" + '.yml'),
mode="a", encoding="utf-8") as one_file:
yaml.dump(testcases_list, one_file, allow_unicode=True)
def create_report(runner, report_name=None):
"""
创建测试报告
:param runner:
:param report_name:
:return:
"""
time_stamp = int(runner.summary["time"]["start_at"])
start_datetime = datetime.fromtimestamp(time_stamp).strftime('%Y-%m-%d %H:%M:%S')
runner.summary['time']['start_datetime'] = start_datetime
# duration保留3位小数
runner.summary['time']['duration'] = round(runner.summary['time']['duration'], 3)
report_name = report_name if report_name else start_datetime
runner.summary['html_report_name'] = report_name
for item in runner.summary['details']:
try:
for record in item['records']:
record['meta_data']['response']['content'] = record['meta_data']['response']['content']. \
decode('utf-8')
record['meta_data']['response']['cookies'] = dict(record['meta_data']['response']['cookies'])
request_body = record['meta_data']['request']['body']
if 'files' in record['meta_data']['request'].keys():
files_request = record['meta_data']['request']['files']
record['meta_data']['request']['files'] = [files_request[file][0] for file in files_request]
record['meta_data']['request']['body'] = record['meta_data']['request']['files']
if isinstance(request_body, bytes):
record['meta_data']['request']['body'] = request_body.decode('utf-8')
except Exception as e:
print(e)
summary = json.dumps(runner.summary, cls=MyEncoder, ensure_ascii=False)
report_name = report_name + '_' + datetime.strftime(datetime.now(), '%Y%m%d%H%M%S')
report_path = runner.gen_html_report(html_report_name=report_name)
# with open(report_path, encoding='utf-8') as stream:
# reports = stream.read()
test_report = {
'name': report_name,
'result': runner.summary.get('success'),
'success': runner.summary.get('stat').get('successes'),
'count': runner.summary.get('stat').get('testsRun'),
'html': report_path,
}
report_obj = Reports.objects.create(**test_report)
return report_obj
def run_testcase(instance, testcase_dir_path, is_email=False, email='', debug=False):
"""
运行用例
:return:
:param instance: 实例
:param testcase_dir_path: 用例根目录路径
:return dict
"""
runner = HttpRunner()
# runner.run(testcase_dir_path)
try:
runner.run(testcase_dir_path)
except ParamsError:
logger.error("用例参数有误")
data = {
"msg": "用例参数有误"
}
return Response(data, status=400)
runner.summary = timestamp_to_datetime(runner.summary, type=False)
if debug:
return runner.summary
try:
report_name = instance.name
except Exception as e:
report_name = '被遗弃的报告' + '-' + datetime.strftime(datetime.now(), '%Y%m%d%H%M%S')
report = create_report(runner, report_name=report_name)
data_dict = {
"id": report.id
}
if is_email:
report_full_path = report.html
content = "执行结果:{},用例总数:{},成功总数:{},详情请查看测试报告。" \
.format("Pass" if report.result == 1 else "Fail", report.count, report.success)
send_email_text(report.name, content, report_full_path, email)
return Response(data_dict, status=status.HTTP_200_OK)
def is_locust(data, env, testcase_objs):
url = "http://192.168.0.147:8089"
if "win" in sys.platform:
url = "http://192.168.0.147:8089"
taskinfo = os.popen('netstat -ano | findstr 8089')
line = taskinfo.readline()
aList = line.split()
taskinfo.close()
if aList:
if aList[4] != '0':
data_dict = {
"code": 3,
"msg": "压测端口已被占用,是否解除占用",
}
return Response(data_dict, status=status.HTTP_200_OK)
elif "linux" in sys.platform:
url = "http://172.16.58.3:8089"
taskinfo = os.popen('lsof -i:8089')
line = taskinfo.readline()
aList = line.split()
taskinfo.close()
if aList:
if aList[0] != '0':
data_dict = {
"code": 3,
"msg": "压测端口已被占用,是否解除占用",
}
return Response(data_dict, status=status.HTTP_200_OK)
testcase_dir_path = os.path.join(settings.SUITES_DIR,
datetime.strftime(datetime.now(), "%Y%m%d%H%M%S%f" + "_locust"))
if not os.path.exists(testcase_dir_path):
os.makedirs(testcase_dir_path)
repeat_list = []
for one_obj in testcase_objs:
generate_locust_files(one_obj, env, testcase_dir_path, repeat_list)
run_locust(data, testcase_dir_path)
while True:
try:
r = requests.get(url)
r.raise_for_status()
if r.status_code == 200:
break
except Exception:
print("连接无效")
data_dict = {
"code": 2,
"url": url
}
return Response(data_dict, status=status.HTTP_200_OK)
def is_locust_noweb(data, env, testcase_objs):
testcase_dir_path = os.path.join(settings.SUITES_DIR,
datetime.strftime(datetime.now(), "%Y%m%d%H%M%S%f" + "_locust"))
if not os.path.exists(testcase_dir_path):
os.makedirs(testcase_dir_path)
repeat_list = []
for one_obj in testcase_objs:
generate_locust_files(one_obj, env, testcase_dir_path, repeat_list)
run_locust_noweb(testcase_dir_path, data)
csv_paths = list()
csv_paths.append(os.path.join(testcase_dir_path, data["name"] + "_failures.csv"))
csv_paths.append(os.path.join(testcase_dir_path, data["name"] + "_stats.csv"))
csv_paths.append(os.path.join(testcase_dir_path, data["name"] + "_stats_history.csv"))
send_email_text(data["name"], "压测详情请查看附件", csv_paths, data["email"])
data_dict = {
"msg": "压测完成",
}
return Response(data_dict, status=status.HTTP_200_OK)
def run_locust(data, testcase_dir_path):
cmd1 = "cd " + testcase_dir_path
cmd2 = "locusts -f locust.yml {} --web-host=0.0.0.0".format("--step-load" if data.get("step_load") else "")
cmd = cmd1 + " && " + cmd2
subprocess.Popen(cmd, shell=True)
def run_locust_noweb(testcase_dir_path, data):
cmd1 = "cd " + testcase_dir_path
if data.get("step_load"):
cmd2 = "locusts -f locust.yml --no-web -c {} -r {} -t {} --csv={}" \
" --step-load --step-clients {} --step-time {} ".format(data["clients"], data["hatch_rate"],
data["run_time"], data["name"],
data["step_clients"], data["step_time"]
)
else:
cmd2 = "locusts -f locust.yml --no-web -c {} -r {} -t {} --csv={}".format(data["clients"], data["hatch_rate"],
data["run_time"],
data["name"])
cmd = cmd1 + " && " + cmd2
locust = subprocess.Popen(cmd, shell=True)
locust.wait()
def to_json_file(json_file, data):
"""
写入json文件
:param json_file:
:param data:
:return:
"""
with io.open(json_file, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=4, separators=(',', ': '), ensure_ascii=False)
def to_file(file_name, data):
"""
写入文件
:param python_file:
:param data:
:return:
"""
with io.open(file_name, 'w', encoding='utf-8') as f:
if isinstance(data, (list, tuple)):
"""可以按行写入"""
f.writelines(data)
return None
f.write(data)
def send_email_text(subject, content, filepath, receive_email):
sender = ""
passwd = ""
receivers = receive_email # 收件人邮箱
msgRoot = MIMEMultipart()
msgRoot['Subject'] = subject
msgRoot['From'] = sender
msgRoot['To'] = receivers if isinstance(receivers, str) else ','.join(receivers) # 群发邮件
part = MIMEText(content)
msgRoot.attach(part)
#
# 添加附件部分
if isinstance(filepath, str):
file_name = filepath.split("\\")[-1]
part = MIMEApplication(open(filepath, 'rb').read())
part.add_header('Content-Disposition', 'attachment', filename=file_name)
msgRoot.attach(part)
else:
for path in filepath:
file_name = path.split("\\")[-1]
part = MIMEApplication(open(path, 'rb').read())
part.add_header('Content-Disposition', 'attachment', filename=file_name)
msgRoot.attach(part)
try:
if "win" in sys.platform:
s = smtplib.SMTP()
s.connect("smtp.qq.com")
elif "linux" in sys.platform:
s = smtplib.SMTP_SSL("smtp.qq.com", 465)
s.login(sender, passwd)
s.sendmail(sender, receivers, msgRoot.as_string())
print("邮件发送成功")
except smtplib.SMTPException as e:
print("发送失败", e)
finally:
s.quit()
| StarcoderdataPython |
4832718 | <gh_stars>0
"""
Selection Sort
Select an element and find the smallest number in the array and swap. continue until we reach the end of the array
Worst Case O(N2)
Average Case O(N2)
Best Case O(N2)
"""
def selection_sort(array):
n = len(array)
for i in range(n):
min_index = i
for j in range(i + 1, n):
if array[j] <= array[min_index]:
min_index = j
array[i], array[min_index] = array[min_index], array[i]
return array
def selection_sort_descending(array):
n = len(array)
for i in range(n):
max_index = i
for j in range(i + 1, n):
if array[j] >= array[max_index]:
max_index = j
array[i], array[max_index] = array[max_index], array[i]
return array
print selection_sort([6, 5, 4, 3, 2, 1])
print selection_sort_descending([1, 2, 3, 4, 5, 6])
| StarcoderdataPython |
163778 | import matplotlib as mpl
import uproot3 as uproot
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.ticker import (MultipleLocator, AutoMinorLocator)
import scipy
import numpy as np
import math
import pandas as pd
import seaborn as sns
import mplhep as hep
#import zfit
import inspect
import sys
import argparse
from concurrent.futures import ThreadPoolExecutor
plt.style.use(hep.style.ATLAS)
plt.rcParams.update({'font.sans-serif': "Arial",
'font.family': "sans-serif",
'font.size': 30,
'mathtext.fontset': 'custom',
'mathtext.rm': 'Arial',
})
import EICAnalysisTools as eat
# Computational Functions
def IPSignificance(row):
JetPt = row["Jet.PT"]
JetEta = row["Jet.Eta"]
JetPhi = row["Jet.Phi"]
JetConstituents = row["Jet.Particles"]
TrackPt = row["Track.PT"]
TrackEta = row["Track.Eta"]
TrackPhi = row["Track.Phi"]
TrackD0 = row["Track.D0"]
TrackErrD0 = row["Track.ErrorD0"]
TrackDZ = row["Track.DZ"]
TrackErrDZ = row["Track.ErrorDZ"]
TrackUID = row["Track.fUniqueID"]
TrackXd = row["Track.Xd"]
TrackYd = row["Track.Yd"]
TrackZd = row["Track.Zd"]
#TrackParentFlavor = np.zeros(len(TrackPt))
JetTrackIPs = []
for jet in JetPt:
JetTrackIPs.append([])
for jet in range(len(JetPt)):
jet_eta = JetEta[jet]
if np.abs(jet_eta) > 3.5:
continue
jet_phi = JetPhi[jet]
jet_pt = JetPt[jet]
track_ips = []
for constituent in JetConstituents[jet]:
track = -1
print(constituent)
print(TrackUID)
if constituent in TrackUID:
track = TrackUID.index(constituent)
else:
continue
track_pt = TrackPt[track]
if track_pt < 1.0:
continue
deltaR = np.sqrt( (TrackEta[track] - JetEta[jet])**2 + (TrackPhi[track] - JetPhi[jet])**2 )
if deltaR > 0.5:
continue
jpx = jet_pt*math.cos(jet_phi)
jpy = jet_pt*math.sin(jet_phi)
jpz = jet_pt*math.sinh(jet_eta)
tx = TrackXd[track]
ty = TrackYd[track]
tz = TrackZd[track]
sign = -1
if (jpx * tx + jpy * ty + jpz * tz) > 0.0:
sign = 1
d0 = TrackD0[track]
d0_error = TrackErrD0[track]
dz = TrackDZ[track]
dz_error = TrackErrDZ[track]
track_ips.append(sign * math.fabs( (d0/d0_error)**2 + (dz/dz_error)**2 ))
JetTrackIPs[jet] = track_ips
return JetTrackIPs
# Computational Functions
def IPSignificanceOld(row):
JetPt = row["Jet.PT"]
JetEta = row["Jet.Eta"]
JetPhi = row["Jet.Phi"]
TrackPt = row["Track.PT"]
TrackEta = row["Track.Eta"]
TrackPhi = row["Track.Phi"]
TrackD0 = row["Track.D0"]
TrackErrD0 = row["Track.ErrorD0"]
TrackDZ = row["Track.DZ"]
TrackErrDZ = row["Track.ErrorDZ"]
TrackUID = row["Track.fUniqueID"]
TrackXd = row["Track.Xd"]
TrackYd = row["Track.Yd"]
TrackZd = row["Track.Zd"]
#TrackParentFlavor = np.zeros(len(TrackPt))
TrackIP = np.ones(len(TrackPt))*-999.0
for jet in range(len(JetPt)):
jet_eta = JetEta[jet]
if np.abs(jet_eta) > 3.5:
continue
jet_phi = JetPhi[jet]
jet_pt = JetPt[jet]
for track in np.arange(len(TrackPt)):
if TrackIP[track] != -999.0:
continue
track_pt = TrackPt[track]
if track_pt < 1.0:
continue
deltaR = np.sqrt( (TrackEta[track] - JetEta[jet])**2 + (TrackPhi[track] - JetPhi[jet])**2 )
if deltaR > 0.5:
continue
jpx = jet_pt*math.cos(jet_phi)
jpy = jet_pt*math.sin(jet_phi)
jpz = jet_pt*math.sinh(jet_eta)
tx = TrackXd[track]
ty = TrackYd[track]
tz = TrackZd[track]
sign = -1
if (jpx * tx + jpy * ty + jpz * tz) > 0.0:
sign = 1
d0 = TrackD0[track]
d0_error = TrackErrD0[track]
dz = TrackDZ[track]
dz_error = TrackErrDZ[track]
TrackIP[track] = sign * math.fabs( (d0/d0_error)**2 + (dz/dz_error)**2 )
return TrackIP
def TrackSource(row):
JetPt = row["Jet.PT"]
JetEta = row["Jet.Eta"]
JetPhi = row["Jet.Phi"]
JetFlavor = row["Jet.Flavor"]
TrackPt = row["Track.PT"]
TrackEta = row["Track.Eta"]
TrackPhi = row["Track.Phi"]
TrackParentFlavor = np.zeros(len(TrackPt))
for jet in range(len(JetPt)):
jet_eta = JetEta[jet]
if np.abs(jet_eta) > 3.5:
continue
jet_pt = JetPt[jet]
for track in np.arange(len(TrackPt)):
parent_flavor = TrackParentFlavor[track]
if parent_flavor != -999.0 and (parent_flavor == 4 or parent_flavor == 5):
continue
track_pt = TrackPt[track]
if track_pt < 1.0:
continue
deltaR = np.sqrt( (TrackEta[track] - JetEta[jet])**2 + (TrackPhi[track] - JetPhi[jet])**2 )
if deltaR > 0.5:
continue
TrackParentFlavor[track] = JetFlavor[jet]
return TrackParentFlavor
def histplot(x, xrange, xbins, density=False):
(counts, bins) = np.histogram(x, range=xrange,
bins=xbins)
bin_widths = np.diff(bins)
bin_centers = bins[:-1] + bin_widths/2
errors = np.sqrt(counts)
rel_errors = errors/counts
# convert counts to dsigma/dpT * 100/fb
y = counts #/ bin_widths
if density:
y = y/len(x)/bin_widths
y_errors = rel_errors * y
return (bin_centers, bin_widths, y, y_errors)
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dir", type=str,
help="Directory containing input files")
parser.add_argument("-i", "--input", type=str,
help="Main input subfolder")
args = parser.parse_args()
df = eat.UprootLoad([f"{args.dir}/{args.input}/[0-4]/out.root"], "Delphes",
branches=["Jet.Flavor", "Jet.PT", "Jet.Eta", "Jet.Phi", "Jet.Particles",
"Track.fUniqueID", "Track.PT", "Track.Eta", "Track.Phi", "Track.D0", "Track.DZ",
"Track.ErrorDZ", "Track.ErrorD0", "Track.Xd", "Track.Yd", "Track.Zd"])
#df = df[:100]
n_gen = len(df)
print(f"n_gen = {n_gen}")
df["Track.IPSignificance"] = df.apply( IPSignificanceOld , axis=1)
df["Track.Source"] = df.apply( TrackSource , axis=1)
print(df.head())
track_ips = np.concatenate(df['Track.IPSignificance'].to_numpy()).ravel()
track_flavor = np.concatenate(df['Track.Source'].to_numpy()).ravel()
matched_ips = track_ips[ track_flavor >= 0 ]
matched_flavor = track_flavor[ track_flavor >= 0 ]
charm_ips = track_ips[ matched_flavor == 4 ]
light_ips = track_ips[ (matched_flavor < 4) | (matched_flavor == 21) ]
print(matched_ips)
# Draw the IP significance plot
fig, ax = plt.subplots(figsize=(12,8))
plt.axis('off')
gridspec = fig.add_gridspec(ncols=1, nrows=1, width_ratios=[1], height_ratios=[1])
ax1 = fig.add_subplot(gridspec[0, 0])
ax1.grid(which='both', axis='both')
ax1.xaxis.set_major_locator(MultipleLocator(10))
ax1.xaxis.set_major_formatter('{x:.0f}')
# For the minor ticks, use no labels; default NullFormatter.
ax1.xaxis.set_minor_locator(MultipleLocator(2))
xrange = [-30, 30]
#xbins = np.concatenate( ( np.arange(-30,-5,5),np.arange(-5,5,1),np.arange(5, 30, 5) ) )
#xbins = np.arange(-300,300,1)
xbins = np.concatenate( ( np.arange(-300,-30,10),np.arange(-30,30,1),np.arange(30, 300, 10) ) )
(bins, bin_widths, y, y_error) = histplot(light_ips, xrange=xrange, xbins=xbins, density=True)
ax1.errorbar(bins, y, xerr = bin_widths/2, yerr=y_error, label='light jets', marker='o', ms=10, ls='none', linewidth=2, color='red')
(bins, bin_widths, y, y_error) = histplot(charm_ips, xrange=xrange, xbins=xbins, density=True)
ax1.errorbar(bins, y, xerr = bin_widths/2, yerr=y_error, label='charm jets', marker='D', ms=10, ls='none', linewidth=2, color='blue')
plt.ylabel('$\mathrm{P(sIP_{3D} \, | \, Jet \; Flavor)}$')
plt.xlabel('$\mathrm{sIP_{3D}}$')
plt.title("CC-DIS, 10x275GeV, $Q^2>100\\mathrm{GeV^2}$", fontsize=20)
ax1.set_ylim([1e-6,2e0])
ax1.set_xlim(xrange)
ax1.legend(fontsize=18)
plt.yscale('log')
y_minor = mpl.ticker.LogLocator(base = 10.0, subs = np.arange(2.0, 10.0) * 0.1, numticks = 100)
ax1.yaxis.set_minor_locator(y_minor)
ax1.yaxis.set_minor_formatter(mpl.ticker.NullFormatter())
ax1.yaxis.set_major_locator(mpl.ticker.LogLocator(base = 10.0, subs = np.arange(1.0, 2.0), numticks = 100))
plt.tight_layout()
plt.savefig(f"track_ip_significance_{args.input}.png")
plt.savefig(f"track_ip_significance_{args.input}.pdf")
| StarcoderdataPython |
3214225 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Mapping fastq to reference genome
1. rRNA, spikein, optional
2. genome
"""
import os
import sys
import re
import io
import glob
import json
import fnmatch
import tempfile
import shlex
import subprocess
import logging
import pandas as pd
import pysam
import pybedtools
from operator import is_not
from functools import partial
from goldclip.helper import *
from goldclip.goldcliplib.log_parser import *
class Alignment(object):
"""Run alignment for SE reads using bowtie/bowtie2/STAR"""
def __init__(self, fqs, path_out, smp_name, genome, **kwargs):
"""parse arguments
required arguments: fqs, path_out, smp_name, genome, genome, spikein,
index_ext, threads, unique_only, n_map, aligner, align_to_rRNA,
genome_path, overwrite
"""
self.fqs = fqs
self.path_out = path_out
self.smp_name = smp_name
self.genome = genome
self.kwargs = kwargs
self.args = self._args_init()
def _args_init(self):
"""Inititate the arguments, assign the default values to arg
"""
args = self.kwargs
args['fqs'] = self.fqs
args['path_out'] = self.path_out
args['smp_name'] = self.smp_name
args['genome'] = self.genome
args['spikein'] = args.get('spikein', None)
args['index_ext'] = args.get('index_ext', None)
args['threads'] = args.get('threads', 1)
args['unique_only'] = args.get('unique_only', False)
args['n_map'] = args.get('n_map', 0)
args['aligner'] = args.get('aligner', 'bowtie')
args['align_to_rRNA'] = args.get('align_to_rRNA', True)
args['repeat_masked_genome'] = args.get('repeat_masked_genome', False)
args['merge_rep'] = args.get('merge_rep', True)
args['genome_path'] = args.get('genome_path', None)
args['overwrite'] = args.get('overwrite', False)
# check
if args['spikein'] == self.genome:
args['spikein'] = None #
return args
def _path_init(self, fq, index, reference=None, align_path=None):
"""Create folders for the alignment,
Alignment, genome versions
1.genome_rRNA
2.genome
3.spikein_rRNA
4.spikein
return files:
prefix, bam, bed, log, unmap
"""
args = self.args
if not reference:
reference = args['genome'] # default is reference genome
if not align_path:
align_path = args['path_out']
fq_prefix = file_prefix(fq)[0]
fq_prefix = re.sub('\.clean|\.nodup|\.cut', '', fq_prefix)
fq_type = seq_type(fq)
map_prefix = os.path.join(align_path, '%s.map_%s' % (fq_prefix, reference))
unmap_prefix = os.path.join(align_path, '%s.not_%s' % (fq_prefix, reference))
map_bam = map_prefix + '.bam'
# map_bed = map_prefix + '.bed'
map_log = map_prefix + '.%s.log' % args['aligner']
unmap_fq = unmap_prefix + '.%s' % fq_type
return [fq_prefix, map_bam, map_log, unmap_fq, reference]
def _index_builder(self, rRNA=False, genome=None):
"""Return the genome index
"""
args = self.args
aligner = args['aligner']
if not genome:
genome = self.genome
# check aligner
if aligner == 'bowtie':
index = Genome(genome, repeat_masked_genome=args['repeat_masked_genome']).bowtie_index(rRNA=rRNA)
elif aligner == 'bowtie2':
index = Genome(genome, repeat_masked_genome=args['repeat_masked_genome']).bowtie2_index(rRNA=rRNA)
elif aligner == 'STAR':
index = Genome(genome, repeat_masked_genome=args['repeat_masked_genome']).star_index(rRNA=rRNA)
else:
logging.error('unknown aligner: %s' % aligner)
index = None # unknonwn aligner
return index
def _index_list(self):
"""List the align index (es) for the job
rRNA, reference, genome
"""
args = self.args
# aligner index
idx = {
'genome_rRNA': self._index_builder(rRNA=True),
'genome': self._index_builder(rRNA=False),
'sp_rRNA': self._index_builder(rRNA=True, genome=args['spikein']),
'sp': self._index_builder(rRNA=False, genome=args['spikein'])}
# determine
if not args['align_to_rRNA']:
idx['genome_rRNA'] = None
# save in dict
return idx # dictionary
def wrap_log(self, log):
"""Wrapper alignment log file, save as json"""
args = self.args
j_file = Alignment_log(log, args['unique_only']).saveas() # save as json
def bowtie_se(self, fq, index, reference=None, unique_map=False, align_path=None):
"""Run bowtie
arguments:
reference: genome, genome_rRNA, spikein, spikein_rRNA
"""
args = self.args
bowtie_exe = which('bowtie')
if not align_path:
align_path = args['path_out']
# output directory
prefix, map_bam, map_log, unmap, reference = self._path_init(fq, index,
reference, align_path)
# determine parameters
n_map = args['n_map']
if n_map < 1:
n_map = 1 # default
if unique_map:
para_unique = '-m 1'
else:
para_unique = '-v 2 -k %s' % n_map # default: 1
if seq_type(fq) == 'fasta':
para_fq = '-f'
else:
para_fq = '-q'
# file exists
if os.path.exists(map_bam) and args['overwrite'] is False:
logging.info('bam file exists: %s' % map_bam)
else:
c1 = '%s %s %s -p %s --mm --best --sam --no-unal --un %s %s \
%s' % (bowtie_exe, para_fq, para_unique, args['threads'],
unmap, index, fq)
c2 = 'samtools view -bhS -F 0x4 -@ %s -' % args['threads']
c3 = 'samtools sort -@ %s -o %s -' % (args['threads'], map_bam)
with open(map_log, 'wt') as ff:
p1 = subprocess.Popen(shlex.split(c1), stdout=subprocess.PIPE,
stderr=ff)
p2 = subprocess.Popen(shlex.split(c2), stdin=p1.stdout,
stdout=subprocess.PIPE)
p3 = subprocess.Popen(shlex.split(c3), stdin=p2.stdout)
px = p3.communicate()
# process log file
self.wrap_log(map_log)
return [map_bam, unmap]
def bowtie2_se(self, fq, index, reference=None, unique_map=False, align_path=None):
"""Run bowtie2
arguments:
reference: genome, genome_rRNA, spikein, spikein_rRNA
"""
args = self.args
bowtie2_exe = which('bowtie2')
if not align_path:
align_path = args['path_out']
# output directory
prefix, map_bam, map_log, unmap, reference = self._path_init(fq, index,
reference, align_path)
# determine parameters
if unique_map:
para_unique = '-q 10'
else:
para_unique = '-q 0'
# multi map
n_map = args['n_map']
if n_map == 0:
# n_map = 1 # default 1, report 1 hit for each read
# default: #look for multiple alignments, report best, with MAPQ
para_fq = ''
else:
para_fq = '-k %s' % n_map
# fq type
if seq_type(fq) == 'fasta':
para_fq = para_fq + ' -f'
else:
para_fq = para_fq + ' -q'
# file exists
if os.path.exists(map_bam) and args['overwrite'] is False:
logging.info('bam file exists: %s' % map_bam)
else:
c1 = '%s %s -p %s --very-sensitive-local --mm --no-unal --un %s -x %s -U %s' % (bowtie2_exe,
para_fq, args['threads'], unmap, index, fq)
# c1 = '%s --very-sensitive-local --mm --no-unal -p %s --un %s -x %s -U %s' % (bowtie2_exe,
# args['threads'], unmap, index, fq)
c2 = 'samtools view -bhS -F 0x4 -@ %s %s -' % (args['threads'], para_unique)
c3 = 'samtools sort -@ %s -o %s -' % (args['threads'], map_bam)
with open(map_log, 'wt') as ff:
p1 = subprocess.Popen(shlex.split(c1), stdout=subprocess.PIPE,
stderr=ff)
p2 = subprocess.Popen(shlex.split(c2), stdin=p1.stdout,
stdout=subprocess.PIPE)
p3 = subprocess.Popen(shlex.split(c3), stdin=p2.stdout)
px = p3.communicate()
# process log file
self.wrap_log(map_log)
return [map_bam, unmap]
def star_se(self, fq, index, reference, unique_map=False, align_path=None):
"""Run STAR, default kwargs
args['unique_only'] is TRUE, unique_map=True:
"""
args = self.args
star_exe = which('STAR')
if not align_path:
align_path = args['path_out']
# output directory
prefix, map_bam, map_log, unmap, reference = self._path_init(fq, index,
reference, align_path)
# determine parameters
n_map = args['n_map']
if n_map > 1:
n_map = n_map # n_map default: 0
else:
n_map = 10 # STAR default: 10
para_unique = '--outFilterMultimapNmax %s' % n_map
fr = 'zcat' if is_gz(fq) else '-'
# file exists
map_prefix = os.path.join(align_path, prefix)
if os.path.exists(map_bam) and args['overwrite'] is False:
logging.info('bam file exists: %s' % map_bam)
else:
c1 = 'STAR --runMode alignReads \
--genomeDir %s \
--readFilesIn %s \
--readFilesCommand %s \
--outFileNamePrefix %s \
--runThreadN %s \
--limitOutSAMoneReadBytes 1000000 \
--genomeLoad NoSharedMemory \
--limitBAMsortRAM 10000000000 \
--outSAMtype BAM SortedByCoordinate \
--outFilterMismatchNoverLmax 0.07 \
--seedSearchStartLmax 20 \
--outReadsUnmapped Fastx %s %s' % (index, fq, fr, map_prefix,
args['threads'], unmap, para_unique)
p1 = subprocess.run(shlex.split(c1))
# filter unique mapped reads
if unique_map: # only unique mapped reads, -q 10
pysam.view('-bhS', '-q', '10', '-@', str(args['threads']),
'-o', map_bam, map_prefix + 'Aligned.sortedByCoord.out.bam',
catch_stdout=False)
else:
os.rename(map_prefix + 'Aligned.sortedByCoord.out.bam', map_bam)
os.rename(map_prefix + 'Unmapped.out.mate1', unmap)
os.rename(map_prefix + 'Log.final.out', map_log)
# process log file
self.wrap_log(map_log)
return [map_bam, unmap]
def align_se_batch(self, fq, align_path=None):
"""Align reads to multiple indexes in specific order,
return align.stat, map_bam, unmap_reads
determine the index order
return bam_files
"""
args = self.args
if not align_path:
align_path = args['path_out']
# define aligner
aligner_dict = {
'bowtie': self.bowtie_se,
'bowtie2': self.bowtie2_se,
'STAR': self.star_se}
aligner_exe = aligner_dict.get(args['aligner'], None) # determine aligner
if not aligner_exe:
raise ValueError('unknown aligner: %s' % args['aligner'])
# get all index in order
index_dict = self._index_list() # genome_rRNA, genome, sp_rRNA, sp
bam_files = []
fq_input = fq
# 1. genome_rRNA (rRNA: both unique, multiple)
idx1 = index_dict['genome_rRNA']
if idx1 is None:
raise ValueError('genome_rRNA index not found: %s' % args['genome'])
reference = self.genome + '_rRNA'
bam_idx1, unmap_idx1 = aligner_exe(fq=fq_input, index=idx1,
reference=reference, unique_map=False,
align_path=align_path)
fq_input = unmap_idx1
# 2. genome
idx2 = index_dict['genome']
if idx2 is None:
raise ValueError('genome index not found: %s' % args['genome'])
reference = self.genome
bam_idx2, unmap_idx2 = aligner_exe(fq=fq_input, index=idx2,
reference=reference, unique_map=args['unique_only'],
align_path=align_path)
fq_input = unmap_idx2
if args['spikein']: # add spikein
# 3. sp_rRNA (rRNA: both unique, multiple)
idx3 = index_dict['sp_rRNA']
reference = args['spikein'] + '_rRNA'
bam_idx3, unmap_idx3 = aligner_exe(fq=fq_input, index=idx3,
reference=reference, unique_map=False,
align_path=align_path)
fq_input = unmap_idx3
# 4. sp (optional)
idx4 = index_dict['sp']
reference = args['spikein']
bam_idx4, unmap_idx4 = aligner_exe(fq=fq_input, index=idx4,
reference=reference, unique_map=args['unique_only'],
align_path=align_path)
fq_input = unmap_idx4
bam_files = [bam_idx1, bam_idx2, bam_idx3, bam_idx4]
else: # no spikein
bam_idx3 = bam_idx4 = None
bam_files = [bam_idx1, bam_idx2, bam_idx3, bam_idx4]
bam_files = list(filter(partial(is_not, None), bam_files)) # remove None
return bam_files
def align_extra(self, fq, align_path=None):
"""Align reads to extra index
such as GFP, white, firefly, transposon
return bam_files
"""
args = self.args
if not align_path:
align_path = args['path_out']
# define aligner
aligner_dict = {
'bowtie': self.bowtie_se,
'bowtie2': self.bowtie2_se,
'STAR': self.star_se}
aligner_exe = aligner_dict.get(args['aligner'], None) # determine aligner
if not aligner_exe:
raise ValueError('unknown aligner: %s' % args['aligner'])
# get all index in order
# index_ext = args['index_ext']
bam_ext_list = []
for ext in args['index_ext']:
if index_validator(ext, args['aligner']):
reference = os.path.basename(ext)
bam_ext, unmap_ext = aligner_exe(fq=fq,
index=ext, reference=reference, unique_map=True,
align_path=align_path)
else:
bam_ext = None
bam_ext_list.append(bam_ext)
return bam_ext_list
def run_extra(self):
"""Run the alignment for specific fastq file onto extra index
1. run alignment for each replicate
2. merge replicates
3. run log parser, in json format
4. organize the log files, saved in one report, including the following groups:
"""
args = self.args
bam_out = []
# run alignment for replicates
for fq in args['fqs']:
logging.info('alignment: %s' % fq)
fq_prefix = file_prefix(fq)[0]
fq_prefix = re.sub('\.clean|\.nodup|\.cut', '', fq_prefix)
fq_path = os.path.join(args['path_out'], 'extra_mapping', fq_prefix)
assert is_path(fq_path)
logging.info('align to index_ext: %s' % fq_prefix)
bam_files = self.align_extra(fq, fq_path)
bam_out.append(bam_files) #
# stat alignment
Alignment_stat(fq_path).saveas()
# merge bam files
if args['merge_rep']:
merged_path = os.path.join(args['path_out'], 'extra_mapping', args['smp_name'])
merged_files = []
if len(bam_out) > 1: # for multiple bam files
assert is_path(merged_path)
for i in range(len(bam_out[0])):
rep_bam_files = [b[i] for b in bam_out]
merged_suffix = str_common(rep_bam_files, suffix=True)
merged_suffix = re.sub('^_[12]|_R[12]', '', merged_suffix)
merged_bam_name = args['smp_name'] + merged_suffix
merged_bam_file = os.path.join(merged_path, merged_bam_name)
if os.path.exists(merged_bam_file) and args['overwrite'] is False:
logging.info('file exists: %s' % merged_bam_file)
else:
tmp = bam_merge(rep_bam_files, merged_bam_file)
merged_files.append(merged_bam_file)
Alignment_stat(merged_path).saveas()
bam_out.append(merged_files)
return bam_out
def run(self):
"""Run the alignment for specific fastq file
1. run alignment for each replicate
2. merge replicates
3. run log parser, in json format
4. organize the log files, saved in one report, including the following groups:
genome_rRNA, genome_unique, genome_multi, sp_rRNA, sp_unique, sp_multi, unmap
"""
args = self.args
bam_out = []
# run alignment for replicates
for fq in args['fqs']:
logging.info('alignment: %s' % fq)
fq_prefix = file_prefix(fq)[0]
fq_prefix = re.sub('\.clean|\.nodup|\.cut', '', fq_prefix)
fq_path = os.path.join(args['path_out'], fq_prefix)
assert is_path(fq_path)
bam_files = self.align_se_batch(fq, fq_path)
bam_out.append(bam_files) #
# stat alignment
Alignment_stat(fq_path).saveas()
# merge bam files
if args['merge_rep']:
merged_path = os.path.join(args['path_out'], args['smp_name'])
merged_files = []
if len(bam_out) > 1: # for multiple bam files
assert is_path(merged_path)
for i in range(len(bam_out[0])):
rep_bam_files = [b[i] for b in bam_out]
merged_suffix = str_common(rep_bam_files, suffix=True)
merged_suffix = re.sub('^_[12]|_R[12]', '', merged_suffix)
merged_bam_name = args['smp_name'] + merged_suffix
merged_bam_file = os.path.join(merged_path, merged_bam_name)
if os.path.exists(merged_bam_file) and args['overwrite'] is False:
logging.info('file exists: %s' % merged_bam_file)
else:
tmp = bam_merge(rep_bam_files, merged_bam_file)
merged_files.append(merged_bam_file)
Alignment_stat(merged_path).saveas()
bam_out.append(merged_files)
# make short names for genome bam files
genome_bam_files = []
for b in bam_out: # nested array
bam_from = b[1]
bam_to = os.path.join(os.path.dirname(bam_from),
filename_shorter(bam_from))
if not os.path.exists(bam_to):
os.symlink(os.path.basename(bam_from), bam_to)
if not os.path.exists(bam_to + '.bai'):
if not os.path.exists(bam_from + '.bai'):
if os.path.getsize(bam_from) < 1000: # !!!! empty bam files
continue
else:
pysam.index(bam_from) # empty bam
os.symlink(os.path.basename(bam_from) + '.bai', bam_to + '.bai')
genome_bam_files.append(bam_to)
# run extra index mapping
ext_bam_files = None
if not args['index_ext'] is None:
ext_bam_files = self.run_extra()
return [genome_bam_files, ext_bam_files]
| StarcoderdataPython |
1620244 | #-*- coding:utf-8 -*-
#
# This file is part of CoTeTo - code templating tool
#
name = 'libSimModel'
description = 'SimXML file reader, return objects from SimXML files'
version = '0.1'
author = 'EnEff-BIM team'
helptxt = """
Help yourself"""
def fetchData(uriList, systemCfg, generatorCfg, logger):
from mapapi.MapClasses import MapProject
if not uriList:
logger.critical('libSimModel - no files specified!')
raise Exception('No files specified!')
return {'MapProject': MapProject(uriList[0])}
| StarcoderdataPython |
4830109 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-09-17 09:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rideshare', '0063_auto_20170917_0930'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='safety_score',
field=models.IntegerField(default=5),
),
]
| StarcoderdataPython |
1699466 | <reponame>DNL-inc/bit
from aiogram import types
from keyboards.inline import blank_callback, back_callback
from middlewares import _
from models import Subgroup, User
async def get_keyboard(group_id, editable=True, for_events=False, user=None):
keyboard = types.InlineKeyboardMarkup(row_width=1)
subgroups = await Subgroup().select_subgroups_in_group(group_id)
if user:
user_subgroups = await User().select_user_subgroups(user)
if user_subgroups:
subgroups_copy = list()
for subgroup in subgroups:
if subgroup in user_subgroups:
subgroups_copy.append(subgroup)
subgroups = subgroups_copy
else:
subgroups = None
if subgroups:
for subgroup in subgroups:
keyboard.add(types.InlineKeyboardButton(subgroup.title, callback_data='subgroup-' + str(subgroup.id)))
else:
keyboard.add(
types.InlineKeyboardButton(_("У тебя нет подгрупп"), callback_data=blank_callback.new(category='subgroup')))
if editable:
keyboard.add(types.InlineKeyboardButton(_('Добавить'), callback_data='add-subgroup'))
if for_events:
keyboard.add(types.InlineKeyboardButton(_('Все события'), callback_data='all-events'))
keyboard.add(types.InlineKeyboardButton(_('Назад'), callback_data=back_callback.new(category='lang')))
return keyboard
| StarcoderdataPython |
1776781 | import matplotlib.pyplot as plt
import decimal
from datetime import datetime
ctx = decimal.Context()
ctx.prec = 20
token_store = './output/tokens.json'
plt.style.use('./assets/presentation.mplstyle')
class Chart:
@staticmethod
def generate_line_chart(coin_id, y):
x = [x for x in range(len(y))]
fig, ax = plt.subplots()
ax.plot(x, y, color='black')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.margins(x=0)
fig.set_size_inches(mm_to_inch(50.55), mm_to_inch(25.71))
frame1 = plt.gca()
frame1.axes.get_xaxis().set_ticks([])
chart_path = './output/{coin_id}.png'.format(coin_id=coin_id)
plt.savefig(chart_path, transparent=True, bbox_inches='tight', dpi=130)
price = y[-1]
percentage = (price - y[0]) / y[0] * 100
formatted_price = (float_to_str(price) if price < 0.01 else "{0:,.2f}".format(price))
formatted_percentage = "{:.1f}".format(percentage) + '%'
last_updated = datetime.now().strftime("%d %b %Y, %H:%M")
return chart_path, formatted_price, formatted_percentage, last_updated
def mm_to_inch(mm):
return mm * 0.0393701
def float_to_str(f):
"""
Convert the given float to a string,
without resorting to scientific notation
"""
d1 = ctx.create_decimal(repr(f))
return format(d1, 'f')[:14]
| StarcoderdataPython |
2025 | # Copyright (c) 2013 - 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import urllib
from cinder import context
from cinder import exception
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_volume
from cinder.tests.unit.volume.drivers.emc import scaleio
from cinder.tests.unit.volume.drivers.emc.scaleio import mocks
class TestDeleteVolume(scaleio.TestScaleIODriver):
"""Test cases for ``ScaleIODriver.delete_volume()``"""
def setUp(self):
"""Setup a test case environment.
Creates a fake volume object and sets up the required API responses.
"""
super(TestDeleteVolume, self).setUp()
ctx = context.RequestContext('fake', 'fake', auth_token=True)
self.volume = fake_volume.fake_volume_obj(
ctx, **{'provider_id': fake.PROVIDER_ID})
self.volume_name_2x_enc = urllib.parse.quote(
urllib.parse.quote(self.driver._id_to_base64(self.volume.id))
)
self.HTTPS_MOCK_RESPONSES = {
self.RESPONSE_MODE.Valid: {
'types/Volume/instances/getByName::' +
self.volume_name_2x_enc: self.volume.id,
'instances/Volume::{}/action/removeMappedSdc'.format(
self.volume.provider_id): self.volume.provider_id,
'instances/Volume::{}/action/removeVolume'.format(
self.volume.provider_id
): self.volume.provider_id,
},
self.RESPONSE_MODE.BadStatus: {
'types/Volume/instances/getByName::' +
self.volume_name_2x_enc: mocks.MockHTTPSResponse(
{
'errorCode': 401,
'message': 'BadStatus Volume Test',
}, 401
),
'instances/Volume::{}/action/removeVolume'.format(
self.volume.provider_id
): mocks.MockHTTPSResponse(
{
'errorCode': 401,
'message': 'BadStatus Volume Test',
}, 401
),
},
}
def test_bad_login_and_volume(self):
self.set_https_response_mode(self.RESPONSE_MODE.BadStatus)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_volume,
self.volume)
def test_delete_volume(self):
"""Setting the unmap volume before delete flag for tests """
self.driver.configuration.set_override(
'sio_unmap_volume_before_deletion',
override=True)
self.driver.delete_volume(self.volume)
| StarcoderdataPython |
1708148 | <reponame>FowlerLab/hgvs-patterns
import unittest
import re
from mavehgvs.patterns.dna import (
dna_equal_c,
dna_equal_n,
dna_equal_gmo,
dna_sub_c,
dna_sub_n,
dna_sub_gmo,
dna_del_c,
dna_del_n,
dna_del_gmo,
dna_dup_c,
dna_dup_n,
dna_dup_gmo,
dna_ins_c,
dna_ins_n,
dna_ins_gmo,
dna_delins_c,
dna_delins_n,
dna_delins_gmo,
dna_variant_c,
dna_variant_n,
dna_variant_gmo,
dna_single_variant,
dna_multi_variant,
)
class TestDnaEqualC(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_equal_c, flags=re.ASCII)
cls.valid_strings = [
"=",
"18=",
"10_14=",
"122-6=",
"*24=",
"19+22=",
"19+22_88=",
"-27+3=",
]
cls.invalid_strings = ["=22", "(=)", "18(=)"]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaEqualN(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_equal_n, flags=re.ASCII)
cls.valid_strings = ["="]
cls.invalid_strings = [
"=22",
"(=)",
"18(=)",
"-27+3=",
"*24=",
"18=",
"10_14=",
"122-6=",
"19+22=",
"19+22_88=",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaEqualGMO(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_equal_gmo, flags=re.ASCII)
cls.valid_strings = ["=", "18=", "10_14="]
cls.invalid_strings = [
"=22",
"(=)",
"18(=)",
"122-6=",
"*24=",
"19+22=",
"19+22_88=",
"-27+3=",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaSubC(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_sub_c, flags=re.ASCII)
cls.valid_strings = ["48C>A", "122-6T>A", "*24G>C", "19+22A>G", "-27+3T>C"]
cls.invalid_strings = ["22g>u", "48C>W", "122=/T>A"]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaSubN(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_sub_n, flags=re.ASCII)
cls.valid_strings = ["48C>A", "122-6T>A", "19+22A>G"]
cls.invalid_strings = ["22g>u", "48C>W", "122=/T>A", "*24G>C", "-27+3T>C"]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaSubGmo(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_sub_gmo, flags=re.ASCII)
cls.valid_strings = ["48C>A"]
cls.invalid_strings = ["122-6T>A", "22g>u", "48C>W", "22=", "122=/T>A", "0C>T"]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaDelC(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_del_c, flags=re.ASCII)
cls.valid_strings = [
"44del",
"1_95del",
"78+5_78+10del",
"-25+1_-25+3del",
"*17del",
]
cls.invalid_strings = [
"(78+1_79-1)_(124+1_125-1)del",
"(?_85)_(124_?)del",
"122=/del",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaDelN(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_del_n, flags=re.ASCII)
cls.valid_strings = ["44del", "1_95del", "78+5_78+10del"]
cls.invalid_strings = [
"(78+1_79-1)_(124+1_125-1)del",
"(?_85)_(124_?)del",
"122=/del",
"-25+1_-25+3del",
"*17del",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaDelGmo(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_del_gmo, flags=re.ASCII)
cls.valid_strings = ["44del", "1_95del"]
cls.invalid_strings = [
"78+5_78+10del",
"-25+1_-25+3del",
"*17del",
"(78+1_79-1)_(124+1_125-1)del",
"(?_85)_(124_?)del",
"122=/del",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaDupC(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_dup_c, flags=re.ASCII)
cls.valid_strings = [
"22_24dup",
"77dup",
"101+1_101+7dup",
"-25+1_-25+3dup",
"*17dup",
]
cls.invalid_strings = [
"(78+1_79-1)_(124+1_125-1)dup",
"(?_85)_(124_?)dup",
"122_125=//dup",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaDupN(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_dup_n, flags=re.ASCII)
cls.valid_strings = ["22_24dup", "77dup", "101+1_101+7dup"]
cls.invalid_strings = [
"(78+1_79-1)_(124+1_125-1)dup",
"(?_85)_(124_?)dup",
"122_125=//dup",
"-25+1_-25+3dup",
"*17dup",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaDupGmo(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_dup_gmo, flags=re.ASCII)
cls.valid_strings = ["22_24dup", "77dup"]
cls.invalid_strings = [
"(78+1_79-1)_(124+1_125-1)dup",
"(?_85)_(124_?)dup",
"122_125=//dup",
"101+1_101+7dup",
"-25+1_-25+3dup",
"*17dup",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaInsC(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_ins_c, flags=re.ASCII)
cls.valid_strings = [
"234_235insT",
"84_85insCTG",
"*84_*85insCTG",
"99+6_99+7insA",
"124+100_124-100insTTG",
"124+101_124-100insTTG",
]
cls.invalid_strings = ["84_85ins100_125", "234_235ins(10)", "234_235ins(?)"]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaInsN(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_ins_n, flags=re.ASCII)
cls.valid_strings = [
"234_235insT",
"84_85insCTG",
"99+6_99+7insA",
"124+100_124-100insTTG",
"124+101_124-100insTTG",
]
cls.invalid_strings = [
"84_85ins100_125",
"234_235ins(10)",
"234_235ins(?)",
"*84_*85insCTG",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaInsGmo(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_ins_gmo, flags=re.ASCII)
cls.valid_strings = ["234_235insT", "84_85insCTG"]
cls.invalid_strings = [
"99+6_99+7insA",
"84_85ins100_125",
"234_235ins(10)",
"234_235ins(?)",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaDelinsC(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_delins_c, flags=re.ASCII)
cls.valid_strings = [
"22delinsAACG",
"83_85delinsT",
"43-6_595+12delinsCTT",
"*788delinsA",
]
cls.invalid_strings = ["84_85delinsAAN", "234delinsW"]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaDelinsN(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_delins_n, flags=re.ASCII)
cls.valid_strings = ["22delinsAACG", "83_85delinsT", "43-6_595+12delinsCTT"]
cls.invalid_strings = ["84_85delinsAAN", "234delinsW" "*788delinsA"]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaDelinsGmo(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_delins_gmo, flags=re.ASCII)
cls.valid_strings = ["22delinsAACG", "83_85delinsT"]
cls.invalid_strings = [
"43-6_595+12delinsCTT",
"*788delinsA",
"84_85delinsAAN",
"234delinsW",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaVariantC(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_variant_c, flags=re.ASCII)
cls.valid_strings = [
"48C>A",
"=",
"22=",
"4_6=",
"122-6T>A",
"*24G>C",
"19+22A>G",
"-27+3T>C",
"44del",
"1_95del",
"78+5_78+10del",
"-25+1_-25+3del",
"*17del",
"22_24dup",
"77dup",
"101+1_101+7dup",
"-25+1_-25+3dup",
"*17dup",
"234_235insT",
"84_85insCTG",
"99+6_99+7insA",
"22delinsAACG",
"83_85delinsT",
"43-6_595+12delinsCTT",
"*788delinsA",
]
cls.invalid_strings = [
"22g>u",
"48C>W",
"122=/T>A",
"(78+1_79-1)_(124+1_125-1)del",
"(?_85)_(124_?)del",
"122=/del",
"(78+1_79-1)_(124+1_125-1)dup",
"(?_85)_(124_?)dup",
"122_125=//dup",
"84_85ins100_125",
"234_235ins(10)",
"234_235ins(?)",
"84_85delinsAAN",
"234delinsW",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaVariantN(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_variant_n, flags=re.ASCII)
cls.valid_strings = [
"48C>A",
"=",
"122-6T>A",
"19+22A>G",
"44del",
"1_95del",
"78+5_78+10del",
"22_24dup",
"77dup",
"101+1_101+7dup",
"234_235insT",
"84_85insCTG",
"99+6_99+7insA",
"22delinsAACG",
"83_85delinsT",
"43-6_595+12delinsCTT",
]
cls.invalid_strings = [
"22=",
"1_3=",
"22g>u",
"48C>W",
"122=/T>A",
"(78+1_79-1)_(124+1_125-1)del",
"(?_85)_(124_?)del",
"122=/del",
"(78+1_79-1)_(124+1_125-1)dup",
"(?_85)_(124_?)dup",
"122_125=//dup",
"84_85ins100_125",
"234_235ins(10)",
"234_235ins(?)",
"84_85delinsAAN",
"234delinsW",
"*24G>C",
"-27+3T>C",
"-25+1_-25+3del",
"*17del",
"-25+1_-25+3dup",
"*17dup",
"*788delinsA",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaVariantGmo(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_variant_gmo, flags=re.ASCII)
cls.valid_strings = [
"48C>A",
"=",
"22=",
"1_3=",
"44del",
"1_95del",
"22_24dup",
"77dup",
"234_235insT",
"84_85insCTG",
"22delinsAACG",
"83_85delinsT",
]
cls.invalid_strings = [
"43-6_595+12delinsCTT",
"*788delinsA",
"99+6_99+7insA",
"101+1_101+7dup",
"-25+1_-25+3dup",
"*17dup",
"78+5_78+10del",
"-25+1_-25+3del",
"*17del",
"*24G>C",
"19+22A>G",
"122-6T>A",
"-27+3T>C",
"22g>u",
"48C>W",
"122=/T>A",
"(78+1_79-1)_(124+1_125-1)del",
"(?_85)_(124_?)del",
"122=/del",
"(78+1_79-1)_(124+1_125-1)dup",
"(?_85)_(124_?)dup",
"122_125=//dup",
"84_85ins100_125",
"234_235ins(10)",
"234_235ins(?)",
"84_85delinsAAN",
"234delinsW",
]
def test_valid_strings(self):
for s in self.valid_strings:
with self.subTest(s=s):
self.assertIsNotNone(
self.pattern.fullmatch(s), msg=f'failed to match "{s}"'
)
def test_invalid_strings(self):
for s in self.invalid_strings:
with self.subTest(s=s):
self.assertIsNone(
self.pattern.fullmatch(s), msg=f'incorrectly matched "{s}"'
)
class TestDnaSingleVariant(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pattern = re.compile(dna_single_variant, flags=re.ASCII)
cls.valid_strings = [
"48C>A",
"=",
"44del",
"1_95del",
"22_24dup",
"77dup",
"234_235insT",
"84_85insCTG",
"22delinsAACG",
"83_85delinsT",
]
cls.valid_strings_c_only = [
"*788delinsA",
"-25+1_-25+3dup",
"*17dup",
"-25+1_-25+3del",
"*17del",
"*24G>C",
"-27+3T>C",
]
cls.valid_strings_cn_only = [
"43-6_595+12delinsCTT",
"99+6_99+7insA",
"101+1_101+7dup",
"78+5_78+10del",
"19+22A>G",
"122-6T>A",
]
cls.valid_strings_cgmo_only = ["22=", "4_6="]
cls.invalid_strings = [
"22g>u",
"48C>W",
"122=/T>A",
"(78+1_79-1)_(124+1_125-1)del",
"(?_85)_(124_?)del",
"122=/del",
"(78+1_79-1)_(124+1_125-1)dup",
"(?_85)_(124_?)dup",
"122_125=//dup",
"84_85ins100_125",
"234_235ins(10)",
"234_235ins(?)",
"84_85delinsAAN",
"234delinsW",
]
def test_valid_strings(self):
for p in "cngmo":
for s in self.valid_strings:
with self.subTest(s=s, p=p):
v = f"{p}.{s}"
self.assertIsNotNone(
self.pattern.fullmatch(v), msg=f'failed to match "{v}"'
)
for p in "cgmo":
for s in self.valid_strings_cgmo_only:
with self.subTest(s=s, p=p):
v = f"{p}.{s}"
self.assertIsNotNone(
self.pattern.fullmatch(v), msg=f'failed to match "{v}"'
)
for p in "cn":
for s in self.valid_strings_cn_only:
with self.subTest(s=s, p=p):
v = f"{p}.{s}"
self.assertIsNotNone(
self.pattern.fullmatch(v), msg=f'failed to match "{v}"'
)
for p in "c":
for s in self.valid_strings_c_only:
with self.subTest(s=s, p=p):
v = f"{p}.{s}"
self.assertIsNotNone(
self.pattern.fullmatch(v), msg=f'failed to match "{v}"'
)
def test_invalid_strings(self):
for p in "cngmo":
for s in self.invalid_strings:
with self.subTest(s=s, p=p):
v = f"{p}.{s}"
self.assertIsNone(
self.pattern.fullmatch(v), msg=f'incorrectly matched "{v}"'
)
for p in "gmo":
for s in self.valid_strings_cn_only:
with self.subTest(s=s, p=p):
v = f"{p}.{s}"
self.assertIsNone(
self.pattern.fullmatch(v), msg=f'incorrectly matched "{v}"'
)
for p in "n":
for s in self.valid_strings_c_only:
with self.subTest(s=s, p=p):
v = f"{p}.{s}"
self.assertIsNone(
self.pattern.fullmatch(v), msg=f'incorrectly matched "{v}"'
)
class TestDnaMultiVariant(unittest.TestCase):
@unittest.expectedFailure
def test_something(self):
self.assertEqual(True, False)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
4836866 | <filename>pytorch/libs/nnet/loss.py
# -*- coding:utf-8 -*-
# Copyright xmuspeech (Author: Snowdar 2019-05-29)
import numpy as np
import torch
import torch.nn.functional as F
from libs.support.utils import to_device
from .components import *
## TopVirtualLoss ✿
class TopVirtualLoss(torch.nn.Module):
""" This is a virtual loss class to be suitable for pipline scripts, such as train.py. And it requires
to implement the function get_posterior to compute accuracy. But just using self.posterior to record the outputs
before computing loss in forward is more convenient.
For example,
def forward(self, inputs, targets):
outputs = softmax(inputs)
self.posterior = outputs
loss = CrossEntropy(outputs, targets)
return loss
It means that get_posterior should be called after forward.
"""
def __init__(self, *args, **kwargs):
super(TopVirtualLoss, self).__init__()
self.posterior = None
self.init(*args, **kwargs)
def init(self, *args, **kwargs):
raise NotImplementedError
def forward(self, *inputs):
raise NotImplementedError
def get_posterior(self):
assert self.posterior is not None
return self.posterior
@utils.for_device_free
def get_accuracy(self, targets):
"""
@return: return accuracy
"""
return self.compute_accuracy(self.get_posterior(), targets)
@utils.for_device_free
def predict(self, outputs):
"""
@outputs: the outputs tensor with [batch-size,n,1] shape comes from affine before computing softmax or
just softmax for n classes
@return: an 1-dimensional vector including class-id (0-based) for prediction
"""
with torch.no_grad():
prediction = torch.squeeze(torch.argmax(outputs, dim=1))
return prediction
@utils.for_device_free
def compute_accuracy(self, outputs, targets):
"""
@outputs: the outputs tensor with [batch-size,n,1] shape comes from affine before computing softmax or
just softmax for n classes
@return: the float accuracy
"""
assert outputs.shape[0] == len(targets)
with torch.no_grad():
prediction = self.predict(outputs)
num_correct = (targets==prediction).sum()
return num_correct.item()/len(targets)
#############################################
## Loss ✿
"""
Note, there are some principles about loss implements:
In process: torch.nn.CrossEntropyLoss = softmax + log + torch.nn.NLLLoss()
In function: torch.nn.NLLLoss() <-> - (sum(torch.tensor.gather())
so, in order to keep codes simple and efficient, do not using 'for' or any other complex grammar to implement what could be replaced by above.
"""
class SoftmaxLoss(TopVirtualLoss):
""" An usual log-softmax loss with affine component.
"""
def init(self, input_dim, num_targets, t=1, reduction='mean', special_init=False):
self.affine = TdnnAffine(input_dim, num_targets)
self.t = t # temperature
# CrossEntropyLoss() has included the LogSoftmax, so do not add this function extra.
self.loss_function = torch.nn.CrossEntropyLoss(reduction=reduction)
# The special_init is not recommended in this loss component
if special_init :
torch.nn.init.xavier_uniform_(self.affine.weight, gain=torch.nn.init.calculate_gain('sigmoid'))
def forward(self, inputs, targets):
"""Final outputs should be a (N, C) matrix and targets is a (1,N) matrix where there are
N targets-indexes (index value belongs to 0~9 when target-class C = 10) for N examples rather than
using one-hot format directly.
One example, one target.
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[2] == 1
posterior = self.affine(inputs)
self.posterior = posterior.detach()
# The frames-index is 1 now.
outputs = torch.squeeze(posterior, dim=2)
return self.loss_function(outputs/self.t, targets)
class FocalLoss(TopVirtualLoss):
"""Implement focal loss according to [<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>.
"Focal loss for dense object detection", IEEE international conference on computer vision, 2017.]
"""
def init(self, input_dim, num_targets, gamma=2, reduction='sum', eps=1.0e-10):
self.softmax_affine = SoftmaxAffineLayer(input_dim, num_targets, dim=1, log=False, bias=True)
self.loss_function = torch.nn.NLLLoss(reduction=reduction)
self.gamma = gamma
# self.alpha = alpha
self.eps = eps
def forward(self, inputs, targets):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[2] == 1
posterior = self.softmax_affine(inputs)
self.posterior = posterior.detach()
focal_posterior = (1 - posterior)**self.gamma * torch.log(posterior.clamp(min=self.eps))
outputs = torch.squeeze(focal_posterior, dim=2)
return self.loss_function(outputs, targets)
class MarginSoftmaxLoss(TopVirtualLoss):
"""Margin softmax loss.
There are AM, AAM, Double-AM, SM1 (Snowdar Margin softmax loss), SM2 and SM3.
Do not provide A-softmax loss again for its complex implementation and margin limitation.
Reference:
[1] <NAME>., <NAME>., <NAME>., & <NAME>. (2016). Large-margin softmax loss for convolutional neural networks.
Paper presented at the ICML.
[2] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2017). Sphereface: Deep hypersphere embedding for
face recognition. Paper presented at the Proceedings of the IEEE conference on computer vision and pattern
recognition.
[3] <NAME>., <NAME>., <NAME>., & <NAME>. (2017). Normface: l2 hypersphere embedding for face
verification. Paper presented at the Proceedings of the 25th ACM international conference on Multimedia.
[4] <NAME>., <NAME>., <NAME>., & <NAME>. (2018). Additive margin softmax for face verification. IEEE Signal
Processing Letters, 25(7), 926-930.
[5] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., . . . <NAME>. (2018). Cosface: Large margin cosine
loss for deep face recognition. Paper presented at the Proceedings of the IEEE Conference on Computer Vision
and Pattern Recognition.
[6] <NAME>., <NAME>., <NAME>., & <NAME>. (2019). Arcface: Additive angular margin loss for deep face
recognition. Paper presented at the Proceedings of the IEEE Conference on Computer Vision and Pattern
Recognition.
[7] <NAME>., <NAME>., <NAME>., & <NAME>. (2020). Double Additive Margin Softmax Loss for Face Recognition.
Applied Sciences, 10(1), 60.
"""
def init(self, input_dim, num_targets,
m=0.2, s=30., t=1.,
feature_normalize=True,
method="am",
double=False,
mhe_loss=False, mhe_w=0.01,
inter_loss=0.,
ring_loss=0.,
curricular=False,
reduction='mean', eps=1.0e-10, init=True):
self.input_dim = input_dim
self.num_targets = num_targets
self.weight = torch.nn.Parameter(torch.randn(num_targets, input_dim, 1))
self.s = s # scale factor with feature normalization
self.m = m # margin
self.t = t # temperature
self.feature_normalize = feature_normalize
self.method = method # am | aam | sm1 | sm2 | sm3
self.double = double
self.feature_normalize = feature_normalize
self.mhe_loss = mhe_loss
self.mhe_w = mhe_w
self.inter_loss = inter_loss
self.ring_loss = ring_loss
self.lambda_factor = 0
self.curricular = CurricularMarginComponent() if curricular else None
if self.ring_loss > 0:
self.r = torch.nn.Parameter(torch.tensor(20.))
self.feature_normalize = False
self.eps = eps
if feature_normalize :
p_target = [0.9, 0.95, 0.99]
suggested_s = [ (num_targets-1)/num_targets*np.log((num_targets-1)*x/(1-x)) for x in p_target ]
if self.s < suggested_s[0]:
print("Warning : using feature noamlization with small scalar s={s} could result in bad convergence. \
There are some suggested s : {suggested_s} w.r.t p_target {p_target}.".format(
s=self.s, suggested_s=suggested_s, p_target=p_target))
self.loss_function = torch.nn.CrossEntropyLoss(reduction=reduction)
# Init weight.
if init:
# torch.nn.init.xavier_normal_(self.weight, gain=1.0)
torch.nn.init.normal_(self.weight, 0., 0.01) # It seems better.
def forward(self, inputs, targets):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[2] == 1
## Normalize
normalized_x = F.normalize(inputs.squeeze(dim=2), dim=1)
normalized_weight = F.normalize(self.weight.squeeze(dim=2), dim=1)
cosine_theta = F.linear(normalized_x, normalized_weight) # Y = W*X
if not self.feature_normalize :
self.s = inputs.norm(2, dim=1) # [batch-size, l2-norm]
# The accuracy must be reported before margin penalty added
self.posterior = (self.s.detach() * cosine_theta.detach()).unsqueeze(2)
else:
self.posterior = (self.s * cosine_theta.detach()).unsqueeze(2)
if not self.training:
# For valid set.
outputs = self.s * cosine_theta
return self.loss_function(outputs, targets)
## Margin Penalty
# cosine_theta [batch_size, num_class]
# targets.unsqueeze(1) [batch_size, 1]
cosine_theta_target = cosine_theta.gather(1, targets.unsqueeze(1))
if self.inter_loss > 0:
inter_cosine_theta = torch.softmax(self.s * cosine_theta, dim=1)
inter_cosine_theta_target = inter_cosine_theta.gather(1, targets.unsqueeze(1))
inter_loss = torch.log((inter_cosine_theta.sum(dim=1) - inter_cosine_theta_target)/(self.num_targets - 1) + self.eps).mean()
if self.method == "am":
penalty_cosine_theta = cosine_theta_target - self.m
if self.double:
double_cosine_theta = cosine_theta + self.m
elif self.method == "aam":
# Another implementation w.r.t cosine(theta+m) = cosine_theta * cos_m - sin_theta * sin_m
# penalty_cosine_theta = self.cos_m * cosine_theta_target - self.sin_m * torch.sqrt((1-cosine_theta_target**2).clamp(min=0.))
penalty_cosine_theta = torch.cos(torch.acos(cosine_theta_target) + self.m)
if self.double:
double_cosine_theta = torch.cos(torch.acos(cosine_theta).add(-self.m))
elif self.method == "sm1":
# penalty_cosine_theta = cosine_theta_target - (1 - cosine_theta_target) * self.m
penalty_cosine_theta = (1 + self.m) * cosine_theta_target - self.m
elif self.method == "sm2":
penalty_cosine_theta = cosine_theta_target - (1 - cosine_theta_target**2) * self.m
elif self.method == "sm3":
penalty_cosine_theta = cosine_theta_target - (1 - cosine_theta_target)**2 * self.m
else:
raise ValueError("Do not support this {0} margin w.r.t [ am | aam | sm1 | sm2 | sm3 ]".format(self.method))
penalty_cosine_theta = 1 / (1 + self.lambda_factor) * penalty_cosine_theta + \
self.lambda_factor / (1 + self.lambda_factor) * cosine_theta_target
if self.double:
cosine_theta = 1/(1+self.lambda_factor) * double_cosine_theta + self.lambda_factor/(1+self.lambda_factor) * cosine_theta
if self.curricular is not None:
cosine_theta = self.curricular(cosine_theta, cosine_theta_target, penalty_cosine_theta)
outputs = self.s * cosine_theta.scatter(1, targets.unsqueeze(1), penalty_cosine_theta)
## Other extra loss
# Final reported loss will be always higher than softmax loss for the absolute margin penalty and
# it is a lie about why we can not decrease the loss to a mininum value. We should not report the
# loss after margin penalty did but we really report this invalid loss to avoid computing the
# training loss twice.
if self.ring_loss > 0:
ring_loss = torch.mean((self.s - self.r)**2)/2
else:
ring_loss = 0.
if self.mhe_loss:
sub_weight = normalized_weight - torch.index_select(normalized_weight, 0, targets).unsqueeze(dim=1)
# [N, C]
normed_sub_weight = sub_weight.norm(2, dim=2)
mask = torch.full_like(normed_sub_weight, True, dtype=torch.bool).scatter_(1, targets.unsqueeze(dim=1), False)
# [N, C-1]
normed_sub_weight_clean = torch.masked_select(normed_sub_weight, mask).reshape(targets.size()[0], -1)
# torch.mean means 1/(N*(C-1))
the_mhe_loss = self.mhe_w * torch.mean((normed_sub_weight_clean**2).clamp(min=self.eps)**-1)
return self.loss_function(outputs/self.t, targets) + the_mhe_loss + self.ring_loss * ring_loss
elif self.inter_loss > 0:
return self.loss_function(outputs/self.t, targets) + self.inter_loss * inter_loss + self.ring_loss * ring_loss
else:
return self.loss_function(outputs/self.t, targets) + self.ring_loss * ring_loss
def step(self, lambda_factor):
self.lambda_factor = lambda_factor
def extra_repr(self):
return '(~affine): (input_dim={input_dim}, num_targets={num_targets}, method={method}, double={double}, ' \
'margin={m}, s={s}, t={t}, feature_normalize={feature_normalize}, mhe_loss={mhe_loss}, mhe_w={mhe_w}, ' \
'eps={eps})'.format(**self.__dict__)
class CurricularMarginComponent(torch.nn.Module):
"""CurricularFace is implemented as a called component for MarginSoftmaxLoss.
Reference: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
and <NAME>. 2020. “CurricularFace: Adaptive Curriculum Learning Loss for Deep Face
Recognition.” ArXiv E-Prints arXiv:2004.00288.
Github: https://github.com/HuangYG123/CurricularFace. Note, the momentum of this github is a wrong value w.r.t
the above paper. The momentum 't' should not increase so fast and I have corrected it as follow.
By the way, it does not work in my experiments.
"""
def __init__(self, momentum=0.01):
super(CurricularMarginComponent, self).__init__()
self.momentum = momentum
self.register_buffer('t', torch.zeros(1))
def forward(self, cosine_theta, cosine_theta_target, penalty_cosine_theta):
with torch.no_grad():
self.t = (1 - self.momentum) * cosine_theta_target.mean() + self.momentum * self.t
mask = cosine_theta > penalty_cosine_theta
hard_example = cosine_theta[mask]
# Use clone to avoid problem "RuntimeError: one of the variables needed for gradient computation
# has been modified by an inplace operation"
cosine_theta_clone = cosine_theta.clone()
cosine_theta_clone[mask] = hard_example * (self.t + hard_example)
return cosine_theta_clone
class LogisticAffinityLoss(TopVirtualLoss):
"""LogisticAffinityLoss.
Reference: <NAME>., <NAME>., & <NAME>. (2019).
LOGISTIC SIMILARITY METRIC LEARNING VIA AFFINITY MATRIX FOR TEXT-INDEPENDENT SPEAKER VERIFICATION.
"""
def init(self, init_w=5., init_b=-1., reduction='mean'):
self.reduction = reduction
self.w = torch.nn.Parameter(torch.tensor(init_w))
self.b = torch.nn.Parameter(torch.tensor(init_b))
def forward(self, inputs, targets):
# This loss has no way to compute accuracy
S = F.normalize(inputs.squeeze(dim=2), dim=1)
A = torch.sigmoid(self.w * torch.mm(S, S.t()) + self.b) # This can not keep the diag-value equal to 1 and it maybe a question.
targets_matrix = targets + torch.zeros_like(A)
condition = targets_matrix - targets_matrix.t()
outputs = -torch.log(torch.where(condition==0, A, 1-A))
if self.reduction == 'sum':
return outputs.sum()
elif self.reduction == 'mean':
return outputs.sum() / targets.shape[0]
else:
raise ValueError("Do not support this reduction {0}".format(self.reduction))
class MixupLoss(TopVirtualLoss):
"""Implement a mixup component to augment data and increase the generalization of model training.
Reference:
[1] <NAME>, <NAME>, <NAME>, and <NAME>. n.d. Mixup: BEYOND EMPIRICAL RISK MINIMIZATION.
[2] Zhu, Yingke, <NAME>, and <NAME>. 2019. “Mixup Learning Strategies for Text-Independent Speaker Verification.”
Github: https://github.com/hongyi-zhang/mixup/blob/master/cifar/utils.py
"""
def init(self, base_loss, mixup_component):
self.base_loss = base_loss
self.mixup_component = mixup_component
def forward(self, inputs, targets):
if self.training:
lam = self.mixup_component.lam
index = self.mixup_component.index
loss = lam * self.base_loss(inputs, targets) + \
(1 - lam) * self.base_loss(inputs, targets[index])
else:
loss = self.base_loss(inputs, targets)
return loss
def get_accuracy(self, targets):
if self.training:
# It is not very clear to compute accuracy for mixed data.
lam = self.mixup_component.lam
index = self.mixup_component.index
return lam * self.compute_accuracy(self.base_loss.get_posterior(), targets) + \
(1 - lam) * self.compute_accuracy(self.base_loss.get_posterior(), targets[index])
else:
return self.compute_accuracy(self.base_loss.get_posterior(), targets)
| StarcoderdataPython |
123141 | def load_key():
with open("key.key","rb") as key:
key = key.read()
return key | StarcoderdataPython |
3304132 | <gh_stars>1-10
import cProfile
import timeit
import profile
import textwrap
import functools
import time
print('Troubleshooting Python Application Development: Chapter 1')
print('-' * 79)
# --------------------------------------------------------------------------------
# 1.1
print('Measuring time between two lines of code with timeit')
print('-' * 79)
t = timeit.Timer(
"print('this line is timed')",
"print('put setup code here')")
print('TIMEIT:')
print(t.timeit(3))
print('REPEAT:')
print(t.repeat(5, 2))
range_size = 2000
count = 2000
vars_for_testing = ';'.join([
"xs = [(str(x), x) for x in range(2000)]",
"d = {}",
])
code_for_testing = textwrap.dedent(
"""
for str_x, x in xs:
d[str_x] = x
""")
def show_results(result):
global count, range_size
print('{:6.2f} usec/pass'.format(
1000000 * (result / count)), end=' ')
print('{:6.2f} usec/item'.format(
(1000000 * (result / count)) / range_size))
print("list len = {}, trying {} iterations".format(
range_size, count))
print('experiment:', end=' ')
t = timeit.Timer(code_for_testing, vars_for_testing)
show_results(t.timeit(number=count))
# --------------------------------------------------------------------------------
# 1.2
print('Figuring out where time is spent with the profile module')
print('-' * 79)
def fib(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib(n - 1) + fib(n - 2)
def fib_seq(n):
seq = []
if n > 0:
seq.extend(fib_seq(n - 1))
seq.append(fib(n))
return seq
profile.run('print(fib_seq(20)); print()')
@functools.lru_cache(maxsize=None)
def fib_memoized(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib_memoized(n - 1) + fib_memoized(n - 2)
def fib_seq_memoized(n):
seq = []
if n > 0:
seq.extend(fib_seq_memoized(n - 1))
seq.append(fib_memoized(n))
return seq
profile.run('print(fib_seq_memoized(20)); print()')
# Running with context
profile.runctx(
'print(fib_seq(n)); print()',
globals(),
{'n': 20},
)
# --------------------------------------------------------------------------------
# 1.3
print('More precise time tracking with cProfile')
print('-' * 79)
print('Profiling 2 + 2 with cProfile:')
cProfile.run("2 + 2")
print('Profiling 3 functions with cProfile:')
def fast_function():
print('fast')
def medium_func():
print('medium')
time.sleep(1)
def slow_func():
print('slow')
time.sleep(2)
def test_func():
fast_function()
medium_func()
slow_func()
cProfile.run('test_func()')
# --------------------------------------------------------------------------------
# 1.4
print('Looking at memory consumption with memory_profiler')
print('-' * 79)
import memory_profiler
@memory_profiler.profile
def test_func():
numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9]
addition = 0
for num in numbers:
addition += num
return addition
test_func()
@memory_profiler.profile
def memory_clearing_func():
x = [1] * (10 ** 5)
y = [2] * (10 ** 7)
del y
return x
memory_clearing_func()
| StarcoderdataPython |
151558 | <reponame>asantos2000/master-data-structures-algorithms<filename>lista02/majoritario_2.py
'''
majoritario(V):
E = merge_sort(V)
conte = 0
metade = V.tamanho div 2
item_anterior = E[0]
para item em E
se item == item_anterior
conte 1
se conte > metade
retorna item
senao
conte 0
item_anterior = item
retorna -1
'''
import logging
from merge_sort import merge_sort
LOGGER = logging.getLogger(__name__)
def majoritario(V):
E = merge_sort(V)
conte = 0
m = len(V) // 2
ea = E[0]
for e in E:
if e == ea:
conte += 1
if conte > m:
return e
else:
conte = 0
ea = e
return -1
# python majoritario_2.py
if __name__ == "__main__":
# Case 1
V=[2,2,3,2,9,2]
print(majoritario(V))
print(max(set(V), key=V.count))
# # python:
# # set(V)={9, 2, 3}
# # V.count funcao que conta item na lista
# # max - maior elemento da lista de count
#print(V.count(2)) # python count ocurrences
# Case 2
V=[0, 1]
print(majoritario(V))
print(max(set(V), key=V.count))
#print(V.count(-1))
# Case 3
V=[3, 3, 3, 3, 3, 3]
print(majoritario(V))
print(max(set(V), key=V.count))
#print(V.count(3))
# Case 4
V=[1, 1, 4, 1, 1, 4, 1, 1, 4, 1, 1, 4, 1, 1, 4]
print(majoritario(V))
print(max(set(V), key=V.count))
#print(V.count(1))
# Case 5
V=[3, 3, 0, 3, 3, 0, 3, 3, 0, 3, 3, 0, 3, 3, 0]
print(majoritario(V))
print(max(set(V), key=V.count))
# python -m pytest majoritario.py
import random
import time
def test_majoritario():
for i in range(0,10):
V=5*[random.randrange(0,5),random.randrange(0,5),random.randrange(0,5)]
# funcao
inicio = time.time()
e = majoritario(V)
duracao = time.time() - inicio
# fim funcao
ne = V.count(e)
m = (len(V) // 2) + 1 # metade + 1
te = max(set(V), key=V.count)
LOGGER.warning(f'{V}, {e}, {te}, {ne}, {m}, {duracao}')
if e >= 0:
assert(e == te) # confere que eh majoritario
# @given(st.lists(st.permutations([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0]), min_size=3))
# def test_elections_are_transitive(X):
# flat_list = [item for sublist in X for item in sublist]
# event(f'{flat_list}') | StarcoderdataPython |
116928 | class Solution:
# Count Consecutive Groups (Top Voted), O(n) time and space
def countBinarySubstrings(self, s: str) -> int:
s = list(map(len, s.replace('01', '0 1').replace('10', '1 0').split()))
return sum(min(a, b) for a, b in zip(s, s[1:]))
# Linear Scan (Solution), O(n) time, O(1) space
def countBinarySubstrings(self, s: str) -> int:
ans, prev, cur = 0, 0, 1
for i in range(1, len(s)):
if s[i-1] != s[i]:
ans += min(prev, cur)
prev, cur = cur, 1
else:
cur += 1
return ans + min(prev, cur)
| StarcoderdataPython |
66068 | from twisted.internet.protocol import Protocol
from twisted.internet.protocol import Factory
from twisted.internet.endpoints import TCP4ServerEndpoint
from twisted.internet import reactor
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from threading import Thread
from tpm import *
import json
import base64
# Used to access the index in the exchanged messages
MESSAGE_TYPE = 0
MESSAGE = 1
# The types of the messages exchanged
TYPE_INPUTS = 0
TYPE_OUTPUT_SERVER = 1
TYPE_OUTPUT_CLIENT = 2
TYPE_ACK = 3
TYPE_TEST = 4
TYPE_TEST_OK = 5
TYPE_MESSAGE = 6
SYNC_THRESHOLD = 20
TEST_MESSAGE = 'SYNCED'
class NeuralCryptography(Protocol):
def __init__(self):
self.tpm = TreeParityMachine(4,3);
self.count = 0
self.syncronized = False
self.key = None
self.cipher = None
def syncronizer(self, data):
data = json.loads(data)
if self.count == SYNC_THRESHOLD:
self.test_sync()
elif data[MESSAGE_TYPE] == TYPE_INPUTS:
self.receive_inputs(data[MESSAGE])
elif data[MESSAGE_TYPE] == TYPE_OUTPUT_SERVER:
self.receive_output_from_server(data[MESSAGE])
elif data[MESSAGE_TYPE] == TYPE_OUTPUT_CLIENT:
self.receive_output_from_client(data[MESSAGE])
elif data[MESSAGE_TYPE] == TYPE_ACK:
self.receive_ack()
elif data[MESSAGE_TYPE] == TYPE_TEST:
self.receive_test(data[MESSAGE])
elif data[MESSAGE_TYPE] == TYPE_TEST_OK:
self.receive_test_ok()
def receive_inputs(self, inputs):
self.tpm(inputs)
self.transport.write(json.dumps([TYPE_OUTPUT_SERVER, self.tpm.y]))
def receive_output_from_server(self, output):
self.transport.write(json.dumps([TYPE_OUTPUT_CLIENT, self.tpm.y]))
if self.tpm.y == output:
self.count += 1
self.tpm.train()
else:
self.count = 0
def receive_output_from_client(self, output):
if self.tpm.y == output:
self.count += 1
self.tpm.train()
else:
self.count = 0
self.transport.write(json.dumps([TYPE_ACK, 0]))
def receive_ack(self):
self.tpm.generate_inputs()
self.tpm(self.tpm.x)
self.transport.write(json.dumps([TYPE_INPUTS, self.tpm.x]))
def synced(self):
return self.syncronized
def test_sync(self):
self.count = 0
self.generate_key()
self.cipher = AES.new(self.key, AES.MODE_CBC)
ciphertext = self.cipher.encrypt(self.pad(TEST_MESSAGE.encode('utf-8')))
ciphertext = base64.b64encode(ciphertext)
self.transport.write(json.dumps([TYPE_TEST, ciphertext]))
def receive_test(self, ciphertext):
self.generate_key()
self.cipher = AES.new(self.key, AES.MODE_CBC)
ciphertext = base64.b64decode(ciphertext)
plaintext = self.cipher.decrypt(ciphertext)
plaintext = self.unpad(plaintext)
if plaintext == TEST_MESSAGE:
self.transport.write(json.dumps([TYPE_TEST_OK, TEST_MESSAGE]))
self.syncronized = True
print self.tpm.weights()
self.start_service()
else:
self.transport.write(json.dumps([TYPE_ACK, 0]))
def receive_test_ok(self):
self.syncronized = True
self.start_service()
print self.tpm.weights()
def generate_key(self):
seed = str(self.tpm.weights())
sha = SHA256.new()
sha.update(seed)
self.key = sha.digest()
return self.key
def pad(self, s):
BS = 16
return s + (BS - len(s) % BS) * chr(BS - len(s) % BS)
def unpad(self, s):
return s[0:-ord(s[-1])]
def call(self, target, args):
self.thread = Thread(target=target, args=(args))
def receive(self, target):
self.data_received = target
def start_service(self):
self.thread.start()
def received(self, data):
data = json.loads(data)
ciphertext = data[MESSAGE]
ciphertext = base64.b64decode(ciphertext)
plaintext = self.cipher.decrypt(ciphertext)
plaintext = self.unpad(plaintext)
self.data_received(plaintext)
def send_message(self, data):
ciphertext = self.cipher.encrypt(self.pad(data))
ciphertext = base64.b64encode(ciphertext)
self.transport.write(json.dumps([TYPE_MESSAGE, ciphertext]))
| StarcoderdataPython |
3327036 | from brainiak.eventseg.event import EventSegment
from scipy.special import comb
import numpy as np
import pytest
from sklearn.exceptions import NotFittedError
def test_create_event_segmentation():
es = EventSegment(5)
assert es, "Invalid EventSegment instance"
def test_fit_shapes():
K = 5
V = 3
T = 10
es = EventSegment(K, n_iter=2)
sample_data = np.random.rand(V, T)
es.fit(sample_data.T)
assert es.segments_[0].shape == (T, K), "Segmentation from fit " \
"has incorrect shape"
assert np.isclose(np.sum(es.segments_[0], axis=1), np.ones(T)).all(), \
"Segmentation from learn_events not correctly normalized"
T2 = 15
sample_data2 = np.random.rand(V, T2)
test_segments, test_ll = es.find_events(sample_data2.T)
assert test_segments.shape == (T2, K), "Segmentation from find_events " \
"has incorrect shape"
assert np.isclose(np.sum(test_segments, axis=1), np.ones(T2)).all(), \
"Segmentation from find_events not correctly normalized"
es_invalid = EventSegment(K)
with pytest.raises(ValueError):
es_invalid.model_prior(K-1)
# ``with`` block is about to end with no error.
pytest.fail("T < K should cause error")
with pytest.raises(ValueError):
es_invalid.set_event_patterns(np.zeros((V, K-1)))
pytest.fail("#Events < K should cause error")
def test_simple_boundary():
es = EventSegment(2)
random_state = np.random.RandomState(0)
sample_data = np.array([[1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1]]) + \
random_state.rand(2, 7) * 10
es.fit(sample_data.T)
events = np.argmax(es.segments_[0], axis=1)
assert np.array_equal(events, [0, 0, 0, 1, 1, 1, 1]),\
"Failed to correctly segment two events"
events_predict = es.predict(sample_data.T)
assert np.array_equal(events_predict, [0, 0, 0, 1, 1, 1, 1]), \
"Error in predict interface"
def test_event_transfer():
es = EventSegment(2)
sample_data = np.asarray([[1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1]])
with pytest.raises(NotFittedError):
seg = es.find_events(sample_data.T)[0]
pytest.fail("Should need to set variance")
with pytest.raises(NotFittedError):
seg = es.find_events(sample_data.T, np.asarray([1, 1]))[0]
pytest.fail("Should need to set patterns")
es.set_event_patterns(np.asarray([[1, 0], [0, 1]]))
seg = es.find_events(sample_data.T, np.asarray([1, 1]))[0]
events = np.argmax(seg, axis=1)
assert np.array_equal(events, [0, 0, 0, 1, 1, 1, 1]),\
"Failed to correctly transfer two events to new data"
def test_weighted_var():
es = EventSegment(2)
D = np.zeros((8, 4))
for t in range(4):
D[t, :] = (1/np.sqrt(4/3)) * np.array([-1, -1, 1, 1])
for t in range(4, 8):
D[t, :] = (1 / np.sqrt(4 / 3)) * np.array([1, 1, -1, -1])
mean_pat = D[[0, 4], :].T
weights = np.zeros((8, 2))
weights[:, 0] = [1, 1, 1, 1, 0, 0, 0, 0]
weights[:, 1] = [0, 0, 0, 0, 1, 1, 1, 1]
assert np.array_equal(
es.calc_weighted_event_var(D, weights, mean_pat), [0, 0]),\
"Failed to compute variance with 0/1 weights"
weights[:, 0] = [1, 1, 1, 1, 0.5, 0.5, 0.5, 0.5]
weights[:, 1] = [0.5, 0.5, 0.5, 0.5, 1, 1, 1, 1]
true_var = (4 * 0.5 * 12)/(6 - 5/6) * np.ones(2) / 4
assert np.allclose(
es.calc_weighted_event_var(D, weights, mean_pat), true_var),\
"Failed to compute variance with fractional weights"
def test_sym():
es = EventSegment(4)
evpat = np.repeat(np.arange(10).reshape(-1, 1), 4, axis=1)
es.set_event_patterns(evpat)
D = np.repeat(np.arange(10).reshape(1, -1), 20, axis=0)
ev = es.find_events(D, var=1)[0]
# Check that events 1-4 and 2-3 are symmetric
assert np.all(np.isclose(ev[:, :2], np.fliplr(np.flipud(ev[:, 2:])))),\
"Fit with constant data is not symmetric"
def test_chains():
es = EventSegment(5, event_chains=np.array(['A', 'A', 'B', 'B', 'B']))
es.set_event_patterns(np.array([[1, 1, 0, 0, 0],
[0, 0, 1, 1, 1]]))
sample_data = np.array([[0, 0, 0], [1, 1, 1]])
seg = es.find_events(sample_data.T, 0.1)[0]
ev = np.nonzero(seg > 0.99)[1]
assert np.array_equal(ev, [2, 3, 4]),\
"Failed to fit with multiple chains"
def test_prior():
K = 10
T = 100
es = EventSegment(K)
mp = es.model_prior(T)[0]
p_bound = np.zeros((T, K-1))
norm = comb(T-1, K-1)
for t in range(T-1):
for k in range(K-1):
# See supplementary material of Neuron paper
# https://doi.org/10.1016/j.neuron.2017.06.041
p_bound[t+1, k] = comb(t, k) * comb(T-t-2, K-k-2) / norm
p_bound = np.cumsum(p_bound, axis=0)
mp_gt = np.zeros((T, K))
for k in range(K):
if k == 0:
mp_gt[:, k] = 1 - p_bound[:, 0]
elif k == K - 1:
mp_gt[:, k] = p_bound[:, k-1]
else:
mp_gt[:, k] = p_bound[:, k-1] - p_bound[:, k]
assert np.all(np.isclose(mp, mp_gt)),\
"Prior does not match analytic solution"
| StarcoderdataPython |
3338495 | <gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--target", type=Path, required=True)
parser.add_argument("--save_dir", type=Path, required=True)
parser.add_argument("--format", default="png")
parser.add_argument("--keep_dir", action="store_true")
args = parser.parse_args()
npz_obj = np.load(args.target)
save_dir = args.save_dir
save_dir.mkdir(parents=True, exist_ok=True)
format = args.format
for key, feat in npz_obj.items():
print(key)
plt.clf()
plt.plot(feat)
plt.title(key)
if not args.keep_dir:
key = key.replace("/", "_")
outfile = save_dir / f"{key}.{format}"
outfile.parent.mkdir(parents=True, exist_ok=True)
plt.savefig(outfile)
| StarcoderdataPython |
1629776 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import csv
def getstart(file_path):
data=[]
csvFile=open(file_path,'r', errors='ignore')
reader=csv.reader(csvFile)
for line in reader:data.append(line)
return data | StarcoderdataPython |
170430 | import math
vineyard_area = int(input())
production_area = vineyard_area * (40 / 100)
kg_grape = float(input()) * production_area
vine_for_sale = int(input())
workers = int(input())
vine = kg_grape / 2.5
if vine >= vine_for_sale:
vine_left = vine - vine_for_sale
vine_for_workers = vine_left / workers
print(f"Good harvest this year! Total wine: {math.floor(vine)} liters.\n{math.ceil(vine_left)}"
f" liters left -> {math.ceil(vine_for_workers)} liters per person.")
else:
vine_need = vine_for_sale - vine
print(f"It will be a tough winter! More {math.floor(vine_need)} liters wine needed.")
| StarcoderdataPython |
152884 | import json
import textwrap
from redict import utils
class JsonMinifyTestCase:
def template(self, json_string, expected):
in_dict = json.loads(utils.json_minify(json_string))
expected_dict = json.loads(expected)
assert in_dict == expected_dict
def test_1(self):
json_string = textwrap.dedent('''
// this is a JSON file with comments
{
"foo": "bar", // this is cool
"bar": [
"baz", "bum"
],
/* the rest of this document is just fluff
in case you are interested. */
"something": 10,
"else": 20
}
/* NOTE: You can easily strip the whitespace and comments
from such a file with the JSON.minify() project hosted
here on github at http://github.com/getify/JSON.minify
*/''')
self.template(
json_string,
'{"foo":"bar","bar":["baz","bum"],"something":10,"else":20}'
)
def test_2(self):
self.template(textwrap.dedent(
'{"/*":"*/","//":"",/*"//"*/"/*/"://"//"}'),
'{"/*":"*/","//":"","/*/":"//"}'
)
def test_3(self):
json_string = textwrap.dedent(
r'''
/*
this is a
multi line comment */{
"foo"
:
"bar/*"// something
, "b\"az":/*
something else */"blah"
}
'''
)
self.template(
json_string,
r'{"foo":"bar/*","b\"az":"blah"}'
)
def test_4(self):
self.template(textwrap.dedent(
r'''{"foo": "ba\"r//", "bar\\": "b\\\"a/*z", "baz\\\\": /* yay */ "fo\\\\\"*/o"}'''),
r'{"foo":"ba\"r//","bar\\":"b\\\"a/*z","baz\\\\":"fo\\\\\"*/o"}'
)
| StarcoderdataPython |
1777782 | <reponame>GiverPlay007/aprendendo-python
###########################
#Calcular função em Python#
###########################
calculos = [1, 2, 3, 4, 5, 6, 7, 8, 9]
for calculo in calculos:
print('=== Calcular função de X ===')
print('Sessão', calculo)
print(' ')
a = input('Qual o valor individual?\nR: ')
b = input('Qual o valor fixo?\nR: ')
var = input('Qual a variavel (X)?\nR: ')
calc = int(a) * int(var)
fx = calc + int(b)
print(' ')
print('Aguarde enquanto calculo pra você!!\nRelaxa, pode ir tomando seu café!')
print('Pera, pera, não da tempo de tomar café, ja calculei\n.')
print('O valor total é:', fx)
print(' \nViu como eu calculo rápido???\n')
print('================') | StarcoderdataPython |
1728101 | <filename>yui/apps/info/toranoana/tasks.py<gh_stars>10-100
import asyncio
import datetime
from collections import defaultdict
from typing import Union
import aiohttp
from more_itertools import chunked
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.sql.expression import and_
from sqlalchemy.sql.expression import exists
from sqlalchemy.sql.expression import or_
from .commons import HEADERS
from .commons import get_urls
from .models import Author
from .models import Character
from .models import Circle
from .models import Coupling
from .models import Genre
from .models import Item
from .models import ItemAuthor
from .models import ItemCharacter
from .models import ItemCircle
from .models import ItemCoupling
from .models import ItemTag
from .models import STOCK_LABEL
from .models import Stock
from .models import Tag
from .models import Target
from .models import Watch
from ....box import box
from ....types.slack.attachment import Attachment
from ....types.slack.attachment import Field
from ....utils.datetime import now
from ....utils.html import get_root
box.assert_channel_required('toranoana')
GenreURLlist = dict[Genre, list[str]]
def process(
*,
sess,
h,
genre: Genre,
dt: datetime.datetime,
is_male: bool,
):
rows = h.cssselect('#search-result-container li.product-list-item')
for row in rows:
with sess.no_autoflush:
tags: list[tuple[Tag, bool]] = []
authors: list[tuple[Author, bool]] = []
circles: list[tuple[Circle, bool]] = []
couplings: list[tuple[Coupling, bool]] = []
characters: list[tuple[Character, bool]] = []
code = row.cssselect('input#commodityCode')[0].get('value').strip()
is_new = False
is_adult = False
try:
item = sess.query(Item).filter_by(code=code).one()
except NoResultFound:
is_new = True
item = Item()
item.code = code
item.genre = genre
thumbnail_container = row.cssselect('.product-list-img a')[0]
item.image_url = str(
thumbnail_container[-1].get('data-src').strip()
)
item.title = str(
row.cssselect('.product-list-title')[0].text_content().strip()
)
item.price = int(
row.cssselect(
'.product-list-price .fs_L',
)[0]
.text_content()
.strip()
.replace(',', '')
)
item.stock = {
'stock_sufficient': Stock.ok,
'stock_little': Stock.few,
}.get(
row.cssselect('.product-list-basic')[0].get('class'),
Stock.soldout,
)
name_els = row.cssselect('.product-list-name a')
for el in name_els:
url = el.get('href', '').strip()
name = el.text_content().strip()
if '/circle/' in url:
code = url.split('/circle/', 1)[1].replace('/all/', '')
try:
circle = (
sess.query(Circle)
.filter_by(code=code, name=name)
.one()
)
circles.append((circle, True))
except NoResultFound:
circle = Circle(code=code, name=name)
circles.append((circle, False))
elif 'actorKindId' in url:
code = 'ACTR' + url.split('ACTR', 1)[1]
try:
author = (
sess.query(Author)
.filter_by(code=code, name=name)
.one()
)
authors.append((author, True))
except NoResultFound:
author = Author(code=code, name=name)
authors.append((author, False))
label_els = row.cssselect('.product-list-labels li a')
for el in label_els:
url = el.get('href', '').strip()
name = el.text_content().strip()
if '?coupling_facet=' in url: # coupling
code = name
try:
coupling = (
sess.query(Coupling)
.filter_by(code=code, name=name)
.one()
)
couplings.append((coupling, True))
except NoResultFound:
coupling = Coupling(code=code, name=name)
couplings.append((coupling, False))
elif '?charaId=' in url: # character
code = url.split('?charaId=', 1)[1]
try:
character = (
sess.query(Character)
.filter_by(code=code, name=name)
.one()
)
characters.append((character, True))
except NoResultFound:
character = Character(code=code, name=name)
characters.append((character, False))
tags_els = row.cssselect('.product-list-tags')
for el in tags_els:
code = (
el.get('class').split(' ')[-1].replace('catalogMark', '')
)
name = el.text_content().strip()
if code == '18':
is_adult = True
try:
tag = sess.query(Tag).filter_by(code=code, name=name).one()
tags.append((tag, True))
except NoResultFound:
tag = Tag(code=code, name=name)
tags.append((tag, False))
if is_male:
if is_adult:
item.male_target = Target.adult
else:
item.male_target = Target.common
else:
if is_adult:
item.female_target = Target.adult
else:
item.female_target = Target.common
queue: list[
Union[Author, Circle, Tag, Coupling, Character, Item]
] = []
old_tags: list[int] = []
old_authors: list[int] = []
old_circles: list[int] = []
old_couplings: list[int] = []
old_characters: list[int] = []
if not is_new:
old_tags = [
x.id
for x in sess.query(Tag).filter(
Tag.id == ItemTag.tag_id,
ItemTag.item == item,
)
]
old_authors = [
x.id
for x in sess.query(Author).filter(
Author.id == ItemAuthor.author_id,
ItemAuthor.item == item,
)
]
old_circles = [
x.id
for x in sess.query(Circle).filter(
Circle.id == ItemCircle.circle_id,
ItemCircle.item == item,
)
]
old_couplings = [
x.id
for x in sess.query(Coupling).filter(
Coupling.id == ItemCoupling.coupling_id,
ItemCoupling.item == item,
)
]
old_characters = [
x.id
for x in sess.query(Character).filter(
Character.id == ItemCharacter.character_id,
ItemCharacter.item == item,
)
]
for tag, wrote in tags:
if not wrote:
queue.append(tag)
if (
is_new
or not item.id
or not tag.id
or not sess.query(
exists().where(
and_(
ItemTag.item_id == item.id,
ItemTag.tag_id == tag.id,
)
)
).scalar()
):
item.tags.append(tag)
if not is_new and tag.id in old_tags:
old_tags.remove(tag.id)
for author, wrote in authors:
if not wrote:
queue.append(author)
if (
is_new
or not item.id
or not author.id
or not sess.query(
exists().where(
and_(
ItemAuthor.item_id == item.id,
ItemAuthor.author_id == author.id,
)
)
).scalar()
):
item.authors.append(author)
if not is_new and author.id in old_authors:
old_authors.remove(author.id)
for circle, wrote in circles:
if not wrote:
queue.append(circle)
if (
is_new
or not item.id
or not circle.id
or not sess.query(
exists().where(
and_(
ItemCircle.item_id == item.id,
ItemCircle.circle_id == circle.id,
)
)
).scalar()
):
item.circles.append(circle)
if not is_new and circle.id in old_circles:
old_circles.remove(circle.id)
for coupling, wrote in couplings:
if not wrote:
queue.append(coupling)
if (
is_new
or not item.id
or not coupling.id
or not sess.query(
exists().where(
and_(
ItemCoupling.item_id == item.id,
ItemCoupling.coupling_id == coupling.id,
)
)
).scalar()
):
item.couplings.append(coupling)
if not is_new and coupling.id in old_couplings:
old_couplings.remove(coupling.id)
for character, wrote in characters:
if not wrote:
queue.append(character)
if (
is_new
or not item.id
or not character.id
or not sess.query(
exists().where(
and_(
ItemCharacter.item_id == item.id,
ItemCharacter.character_id == character.id,
)
)
).scalar()
):
item.characters.append(character)
if not is_new and character.id in old_characters:
old_characters.remove(character.id)
if is_new or sess.is_modified(item):
item.updated_at = dt
item.checked_at = dt
queue.append(item)
with sess.begin():
for record in queue:
sess.add(record)
if not is_new:
sess.query(ItemTag).filter(
ItemTag.item == item,
ItemTag.tag_id.in_(old_tags),
).delete(synchronize_session=False)
sess.query(ItemAuthor).filter(
ItemAuthor.item == item,
ItemAuthor.author_id.in_(old_authors),
).delete(synchronize_session=False)
sess.query(ItemCircle).filter(
ItemCircle.item == item,
ItemCircle.circle_id.in_(old_circles),
).delete(synchronize_session=False)
sess.query(ItemCoupling).filter(
ItemCoupling.item == item,
ItemCoupling.coupling_id.in_(old_couplings),
).delete(synchronize_session=False)
sess.query(ItemCharacter).filter(
ItemCharacter.item == item,
ItemCharacter.character_id.in_(old_characters),
).delete(synchronize_session=False)
def get_dedupe_genre_url_map(sess) -> GenreURLlist:
result: defaultdict[Genre, set[str]] = defaultdict(set)
for watch in sess.query(Watch):
genre = watch.genre
code = genre.code
result[genre] |= set(get_urls(code, watch.male, watch.female))
return {genre: list(urls) for genre, urls in result.items()}
async def scan_all_pages(
*,
bot,
sess,
session,
genre: Genre,
url: str,
is_male: bool,
dt: datetime.datetime,
):
page = 1
end_page = 1
while page <= end_page:
paginated_url = f'{url}¤tPage={page}'
if page > 1:
await asyncio.sleep(1)
async with session.get(paginated_url) as resp:
blob = await resp.text()
h = get_root(blob)
if page == 1:
pager = h.cssselect('#pager')
if pager:
end_page = int(pager[0].get('data-maxpage', 1))
await bot.run_in_other_thread(
process,
sess=sess,
h=h,
genre=genre,
dt=dt,
is_male=is_male,
)
page += 1
def get_watches(*, sess, item: Item):
return (
sess.query(Watch)
.join(Item, Item.genre_id == Watch.genre_id)
.filter(
Item.id == item.id,
or_(
and_(
Item.male_target == Target.common,
Watch.male.in_([Target.wildcard, Target.common]),
),
and_(
Item.male_target == Target.adult,
Watch.male.in_([Target.wildcard, Target.adult]),
),
and_(
Item.female_target == Target.common,
Watch.female.in_([Target.wildcard, Target.common]),
),
and_(
Item.female_target == Target.adult,
Watch.female.in_([Target.wildcard, Target.adult]),
),
),
)
)
@box.cron('0,30 * * * *')
async def crawl(bot, sess):
dt = now()
url_map = await bot.run_in_other_thread(get_dedupe_genre_url_map, sess)
async with aiohttp.ClientSession(headers=HEADERS) as session:
loop1_first = True
for genre, urls in url_map.items():
if loop1_first:
loop1_first = False
else:
await asyncio.sleep(5)
loop2_first = True
for url in urls:
if loop2_first:
loop2_first = False
else:
await asyncio.sleep(1)
is_male = 'joshi' not in url
await scan_all_pages(
bot=bot,
sess=sess,
session=session,
genre=genre,
url=url,
is_male=is_male,
dt=dt,
)
data: defaultdict[str, list[Attachment]] = defaultdict(list)
for item in sess.query(Item).filter_by(updated_at=dt):
author_name = ', '.join(author.name for author in item.authors)
circle_name = ', '.join(circle.name for circle in item.circles)
if author_name:
if circle_name:
author_line = f'{author_name} ({circle_name})'
else:
author_line = author_name
elif circle_name:
author_line = circle_name
else:
author_line = 'Unknown Author'
targets = []
color = '3399ff'
if Target.adult == item.male_target:
color = 'ff0000'
targets.append('남성향 성인물')
elif item.male_target == Target.common:
targets.append('남성향 일반물')
if item.female_target == Target.adult:
color = 'ff0000'
targets.append('여성향 성인물')
elif item.female_target == Target.common:
targets.append('여성향 일반물')
attachment = Attachment(
color=color,
title=item.title,
title_link=item.url,
author_name=author_line,
image_url=item.image_url,
fields=[
Field(
title='장르',
value=item.genre.name_ko or item.genre.name,
short=True,
),
Field(
title='카테고리',
value='\n'.join(targets),
short=True,
),
Field(
title='가격',
value=f'{item.price} JPY',
short=True,
),
Field(title='재고', value=STOCK_LABEL[item.stock], short=True),
],
)
if item.couplings:
attachment.fields.append(
Field(
title='커플링',
value=', '.join(
coupling.name_ko or coupling.name
for coupling in item.couplings
),
short=True,
)
)
if item.characters:
attachment.fields.append(
Field(
title='등장인물',
value=', '.join(
character.name_ko or character.name
for character in item.characters
),
short=True,
)
)
watches = await bot.run_in_other_thread(
get_watches,
sess=sess,
item=item,
)
for watch in watches:
data[watch.print_target_id].append(attachment)
for target, attachments in data.items():
if target.startswith('U'):
resp = await bot.api.conversations.open(users=[target])
channel_id = resp.body['channel']['id']
else:
channel_id = target
for a in chunked(attachments, 20):
await bot.say(
channel_id,
'토라노아나 변경항목을 전달해드릴게요!',
attachments=a,
)
| StarcoderdataPython |
3225780 | <filename>hackerrank/algorithms/implementation/medium/extra_long_factorials/py/solution.py
#!/bin/python3
import sys
fact = lambda n: 1 if n <= 1 else n * fact(n - 1)
n = int(input().strip())
fct = fact(n)
print(fct)
| StarcoderdataPython |
1701573 | <gh_stars>1-10
"""
********************************************
test_generator_modul_test_einstellungen.py
@digitalfellowship - Stand 07/2021
Autor: <NAME>
********************************************
Dieses Modul dient der Erstellung von Testeinstellungen für einen ILIAS-Test
Es sind nicht alle Einstellmöglichkeiten aus ILIAS verfügbar, sondern
lediglich die Einstellungen unter "Allgemeine Einstellungen" im ILIAS
"""
from tkinter import *
from tkscrolledframe import ScrolledFrame #Bewegbares Fesnter (Scrollbalken)
import sqlite3
import xml.etree.ElementTree as ET
from datetime import datetime
import datetime
import os
class Test_Einstellungen_GUI:
def __init__(self, project_root_path, test_qti_file_path_output):
# Projekt-Pfad
self.project_root_path = project_root_path
# Pfad für qti_(XML)-Datei für erstellten Test
self.test_qti_file_path_output = test_qti_file_path_output
# Name für Datenbank und Tabelle
self.settings_database = "test_settings_profiles_db.db"
self.settings_database_table = "my_profiles_table"
# Pfad für die Datenbank
self.settings_database_path = os.path.normpath(os.path.join(self.project_root_path, "Test_Generator_Datenbanken", self.settings_database))
# New Window must be "Toplevel" not "Tk()" in order to get Radiobuttons to work properly
self.test_settings_window = Toplevel()
self.test_settings_window.title("Test Einstellungen verwalten")
# Create a ScrolledFrame widget
self.sf_test_settings = ScrolledFrame(self.test_settings_window, width=300,
height=300)
self.sf_test_settings.pack(expand=1, fill="both")
# Bind the arrow keys and scroll wheel
### Bind the arrow keys and scroll wheel
### Funktion hat keine auswirkungen, erzeugt jedoch (vernachlässigbare) Fehler
# self.sf_test_settings.bind_arrow_keys(app)
# self.sf_test_settings.bind_scroll_wheel(app)
# Create a frame within the ScrolledFrame
self.test_settings = self.sf_test_settings.display_widget(Frame)
self.frame1 = LabelFrame(self.test_settings, text="Test Einstellungen", padx=5, pady=5)
self.frame1.grid(row=0, column=0, padx=20, pady=10, sticky=NW)
self.frame2 = LabelFrame(self.test_settings, text="Test Einstellungen", padx=5, pady=5)
self.frame2.grid(row=0, column=1, padx=20, pady=10, sticky=NW)
self.frame3 = LabelFrame(self.test_settings, text="Test Einstellungen", padx=5, pady=5)
self.frame3.grid(row=0, column=2, padx=20, pady=10, sticky=NW)
self.res12_min_listbox_label = Label(self.frame1, text="EINSTELLUNGEN DES TESTS",
font=('Helvetica', 10, 'bold'))
self.res12_min_listbox_label.grid(row=0, column=0, sticky=W, padx=10, pady=(20, 0))
self.res90_min_listbox_label = Label(self.frame1, text="Test-Titel")
self.res90_min_listbox_label.grid(row=1, column=0, sticky=W, padx=10)
self.res91_max_listbox_label = Label(self.frame1, text="Beschreibung")
self.res91_max_listbox_label.grid(row=2, column=0, sticky=W, padx=10)
self.res1_max_listbox_label = Label(self.frame1, text="Auswahl der Testfragen")
self.res1_max_listbox_label.grid(row=4, column=0, sticky=W, padx=10)
self.res1_prec_listbox_label = Label(self.frame1, text="Datenschutz")
self.res1_prec_listbox_label.grid(row=7, column=0, sticky=W, padx=10)
self.res1_tol_listbox_label = Label(self.frame1, text="VERFÜGBARKEIT", font=('Helvetica', 10, 'bold'))
self.res1_tol_listbox_label.grid(row=9, column=0, sticky=W, padx=10, pady=(20, 0))
self.res1_points_listbox_label = Label(self.frame1, text="Online --- not working")
self.res1_points_listbox_label.grid(row=10, column=0, sticky=W, padx=10)
self.res13_points_listbox_label = Label(self.frame1,
text="Zeitlich begrenzte Verfügbarkeit --- not working")
self.res13_points_listbox_label.grid(row=11, column=0, sticky=W, padx=10)
self.res22_tol_listbox_label = Label(self.frame1, text="INFORMATIONEN ZUM EINSTIEG",
font=('Helvetica', 10, 'bold'))
self.res22_tol_listbox_label.grid(row=14, column=0, sticky=W, padx=10, pady=(20, 0))
self.res23_points_listbox_label = Label(self.frame1, text="Einleitung")
self.res23_points_listbox_label.grid(row=15, column=0, sticky=W, padx=10)
self.res24_points_listbox_label = Label(self.frame1, text="Testeigenschaften anzeigen")
self.res24_points_listbox_label.grid(row=16, column=0, sticky=W, padx=10)
self.res31_tol_listbox_label = Label(self.frame1, text="DURCHFÜHRUNG: ZUGANG", font=('Helvetica', 10, 'bold'))
self.res31_tol_listbox_label.grid(row=17, column=0, sticky=W, padx=10, pady=(20, 0))
self.test_time_year_label = Label(self.frame1, text="Jahr")
self.test_time_year_label.grid(row=17, column=1, sticky=W)
self.test_time_month_label = Label(self.frame1, text="Mon.")
self.test_time_month_label.grid(row=17, column=1, sticky=W, padx=35)
self.test_time_day_label = Label(self.frame1, text="Tag")
self.test_time_day_label.grid(row=17, column=1, sticky=W, padx=70)
self.test_time_hour_label = Label(self.frame1, text="Std.")
self.test_time_hour_label.grid(row=17, column=1, sticky=W, padx=105)
self.test_time_minute_label = Label(self.frame1, text="Min.")
self.test_time_minute_label.grid(row=17, column=1, sticky=W, padx=140)
self.res32_points_listbox_label = Label(self.frame1, text="Test-Start")
self.res32_points_listbox_label.grid(row=18, column=0, sticky=W, padx=10)
self.res33_points_listbox_label = Label(self.frame1, text="Test-Ende")
self.res33_points_listbox_label.grid(row=19, column=0, sticky=W, padx=10)
self.res34_tol_listbox_label = Label(self.frame1, text="Test-Passwort")
self.res34_tol_listbox_label.grid(row=20, column=0, sticky=W, padx=10)
self.res35_points_listbox_label = Label(self.frame1, text="Nur ausgewählte Teilnehmer")
self.res35_points_listbox_label.grid(row=21, column=0, sticky=W, padx=10)
self.res36_points_listbox_label = Label(self.frame1, text="Anzahl gleichzeitiger Teilnehmer begrenzen")
self.res36_points_listbox_label.grid(row=22, column=0, sticky=W, padx=10)
self.res37_points_listbox_label = Label(self.frame1, text="Inaktivitätszeit der Teilnehmner (in Sek.)")
self.res37_points_listbox_label.grid(row=23, column=0, sticky=W, padx=30)
self.res41_tol_listbox_label = Label(self.frame1, text="DURCHFÜHRUNG: STEUERUNG TESTDURCHLAUF",
font=('Helvetica', 10, 'bold'))
self.res41_tol_listbox_label.grid(row=24, column=0, sticky=W, padx=10, pady=(20, 0))
self.res42_points_listbox_label = Label(self.frame1, text="Anzahl von Testdurchläufen begrenzen")
self.res42_points_listbox_label.grid(row=25, column=0, sticky=W, padx=10)
self.res43_points_listbox_label = Label(self.frame1, text="Wartezeit zwischen Durchläufen erzwingen")
self.res43_points_listbox_label.grid(row=26, column=0, sticky=W, padx=10)
self.res44_tol_listbox_label = Label(self.frame1, text="Bearbeitungsdauer begrenzen")
self.res44_tol_listbox_label.grid(row=27, column=0, sticky=W, padx=10)
self.res44_tol_listbox_label = Label(self.frame1, text="Bearbeitungsdauer (in Min).")
self.res44_tol_listbox_label.grid(row=28, column=0, sticky=W, padx=30)
self.res44_tol_listbox_label = Label(self.frame1, text="Max. Bearbeitungsdauer für jeden Testlauf zurücksetzen")
self.res44_tol_listbox_label.grid(row=29, column=0, sticky=W, padx=30)
self.res45_points_listbox_label = Label(self.frame1, text="Prüfungsansicht")
self.res45_points_listbox_label.grid(row=30, column=0, sticky=W, padx=10)
self.res45_1_points_listbox_label = Label(self.frame1, text="Titel des Tests")
self.res45_1_points_listbox_label.grid(row=31, column=0, sticky=W, padx=30)
self.res45_2_points_listbox_label = Label(self.frame1, text="Name des Teilnehmers")
self.res45_2_points_listbox_label.grid(row=32, column=0, sticky=W, padx=30)
self.res46_points_listbox_label = Label(self.frame1, text="ILIAS-Prüfungsnummer anzeigen")
self.res46_points_listbox_label.grid(row=33, column=0, sticky=W, padx=10)
self.res51_tol_listbox_label = Label(self.frame2, text="DURCHFÜHRUNG: VERHALTEN DER FRAGE",
font=('Helvetica', 10, 'bold'))
self.res51_tol_listbox_label.grid(row=0, column=2, sticky=W, padx=10, pady=(20, 0))
self.res52_points_listbox_label = Label(self.frame2, text="Anzeige der Fragentitel")
self.res52_points_listbox_label.grid(row=1, column=2, sticky=W, padx=10)
self.res53_points_listbox_label = Label(self.frame2, text="Automatisches speichern")
self.res53_points_listbox_label.grid(row=4, column=2, sticky=W, padx=10)
self.res54_tol_listbox_label = Label(self.frame2, text="Fragen mischen")
self.res54_tol_listbox_label.grid(row=5, column=2, sticky=W, padx=10)
self.res55_points_listbox_label = Label(self.frame2, text="Lösungshinweise")
self.res55_points_listbox_label.grid(row=6, column=2, sticky=W, padx=10)
self.res56_points_listbox_label = Label(self.frame2, text="Direkte Rückmeldung --- not working")
self.res56_points_listbox_label.grid(row=7, column=2, sticky=W, padx=10)
self.res57_tol_listbox_label = Label(self.frame2, text="Teilnehmerantworten")
self.res57_tol_listbox_label.grid(row=8, column=2, sticky=W, padx=10)
self.res58_points_listbox_label = Label(self.frame2, text="Verpflichtende Fragen")
self.res58_points_listbox_label.grid(row=12, column=2, sticky=W, padx=10)
self.res61_tol_listbox_label = Label(self.frame2, text="DURCHFÜHRUNG: FUNKTIONEN FÜR TEILNEHMER",
font=('Helvetica', 10, 'bold'))
self.res61_tol_listbox_label.grid(row=13, column=2, sticky=W, padx=10, pady=(20, 0))
self.res62_points_listbox_label = Label(self.frame2, text="Verwendung vorheriger Lösungen")
self.res62_points_listbox_label.grid(row=14, column=2, sticky=W, padx=10)
self.res63_points_listbox_label = Label(self.frame2, text="\"Test unterbrechen\" anzeigen")
self.res63_points_listbox_label.grid(row=15, column=2, sticky=W, padx=10)
self.res64_tol_listbox_label = Label(self.frame2, text="Nicht beantwortete Fragen")
self.res64_tol_listbox_label.grid(row=16, column=2, sticky=W, padx=10)
self.res65_points_listbox_label = Label(self.frame2, text="Fragenliste und Bearbeitungsstand anzeigen")
self.res65_points_listbox_label.grid(row=18, column=2, sticky=W, padx=10)
self.res66_points_listbox_label = Label(self.frame2, text="Fragen markieren")
self.res66_points_listbox_label.grid(row=19, column=2, sticky=W, padx=10)
self.res71_tol_listbox_label = Label(self.frame2, text="TEST ABSCHLIESSEN", font=('Helvetica', 10, 'bold'))
self.res71_tol_listbox_label.grid(row=20, column=2, sticky=W, padx=10, pady=(20, 0))
self.res72_points_listbox_label = Label(self.frame2, text="Übersicht gegebener Antworten")
self.res72_points_listbox_label.grid(row=21, column=2, sticky=W, padx=10)
self.res73_points_listbox_label = Label(self.frame2, text="Abschließende Bemerkung")
self.res73_points_listbox_label.grid(row=22, column=2, sticky=W, padx=10)
self.res74_tol_listbox_label = Label(self.frame2, text="Weiterleitung")
self.res74_tol_listbox_label.grid(row=23, column=2, sticky=W, padx=10)
self.res75_points_listbox_label = Label(self.frame2, text="Benachrichtigung")
self.res75_points_listbox_label.grid(row=24, column=2, sticky=W, padx=10)
# --------------------------- DEFINE CHECKBOXES WITH ENTRYS ---------------------------------------
# --------------------------- CHECKBOXES ---------------------------------------
self.var_online = IntVar()
self.check_online = Checkbutton(self.frame1, text="", variable=self.var_online, onvalue=1, offvalue=0)
self.check_online.deselect()
self.check_online.grid(row=10, column=1, sticky=W)
self.var_time_limited = IntVar()
self.time_limited_start_label = Label(self.frame1, text="Start")
self.time_limited_start_day_label = Label(self.frame1, text="Tag")
self.time_limited_start_day_entry = Entry(self.frame1, width=3)
self.time_limited_start_month_label = Label(self.frame1, text="Mo")
self.time_limited_start_month_entry = Entry(self.frame1, width=3)
self.time_limited_start_year_label = Label(self.frame1, text="Jahr")
self.time_limited_start_year_entry = Entry(self.frame1, width=4)
self.time_limited_start_hour_label = Label(self.frame1, text="Std")
self.time_limited_start_hour_entry = Entry(self.frame1, width=3)
self.time_limited_start_minute_label = Label(self.frame1, text="Min")
self.time_limited_start_minute_entry = Entry(self.frame1, width=3)
self.time_limited_end_label = Label(self.frame1, text="Ende")
self.time_limited_end_day_label = Label(self.frame1, text="Tag")
self.time_limited_end_day_entry = Entry(self.frame1, width=3)
self.time_limited_end_month_label = Label(self.frame1, text="Mo")
self.time_limited_end_month_entry = Entry(self.frame1, width=3)
self.time_limited_end_year_label = Label(self.frame1, text="Jahr")
self.time_limited_end_year_entry = Entry(self.frame1, width=4)
self.time_limited_end_hour_label = Label(self.frame1, text="Std")
self.time_limited_end_hour_entry = Entry(self.frame1, width=3)
self.time_limited_end_minute_label = Label(self.frame1, text="Min")
self.time_limited_end_minute_entry = Entry(self.frame1, width=3)
# self.entry.grid(row=11, column=1, sticky=W, padx=20)
self.check_time_limited = Checkbutton(self.frame1, text="", variable=self.var_time_limited, onvalue=1,
offvalue=0,
command=lambda
v=self.var_time_limited: Test_Einstellungen_GUI.show_entry_time_limited_start(
self, v))
self.check_time_limited.deselect()
self.check_time_limited.grid(row=11, column=1, sticky=W)
self.var_introduction = IntVar()
self.check_introduction = Checkbutton(self.frame1, text="", variable=self.var_introduction, onvalue=1,
offvalue=0,
command=lambda
v=self.var_introduction: Test_Einstellungen_GUI.show_introduction_textfield(
self, v))
self.check_introduction.deselect()
self.check_introduction.grid(row=15, column=1, sticky=W)
self.var_test_prop = IntVar()
self.check_test_prop = Checkbutton(self.frame1, text="", variable=self.var_test_prop, onvalue=1, offvalue=0)
self.check_test_prop.deselect()
self.check_test_prop.grid(row=16, column=1, sticky=W)
# self.var_test_password = IntVar()
# self.check_test_password = Checkbutton(self.frame1, text="", variable=self.var_test_password, onvalue=1, offvalue=0)
# self.check_test_password.deselect()
# self.check_test_password.grid(row=20, column=1, sticky=W)
self.var_specific_users = IntVar()
self.check_specific_users = Checkbutton(self.frame1, text="", variable=self.var_specific_users, onvalue=1,
offvalue=0)
self.check_specific_users.deselect()
self.check_specific_users.grid(row=21, column=1, sticky=W)
# self.var_fixed_users = IntVar()
# self.check_fixed_users = Checkbutton(self.frame1, text="", variable=self.var_fixed_users, onvalue=1, offvalue=0)
# self.check_fixed_users.deselect()
# self.check_fixed_users.grid(row=22, column=1, sticky=W)
# self.var_limit_test_runs = IntVar()
# self.check_limit_test_runs = Checkbutton(self.frame1, text="", variable=self.var_limit_test_runs, onvalue=1, offvalue=0)
# self.check_limit_test_runs.deselect()
# self.check_limit_test_runs.grid(row=22, column=1, sticky=W)
# self.var_time_betw_test_runs = IntVar()
# self.check_time_betw_test_runs = Checkbutton(self.frame1, text="", variable=self.var_time_betw_test_runs, onvalue=1, offvalue=0)
# self.check_time_betw_test_runs.deselect()
# self.check_time_betw_test_runs.grid(row=25, column=1, sticky=W)
self.var_processing_time = IntVar()
self.check_processing_time = Checkbutton(self.frame1, text="", variable=self.var_processing_time, onvalue=1,
offvalue=0)
self.check_processing_time.deselect()
self.check_processing_time.grid(row=27, column=1, sticky=W)
self.var_processing_time_reset = IntVar()
self.check_processing_time_reset = Checkbutton(self.frame1, text="", variable=self.var_processing_time_reset,
onvalue=1, offvalue=0)
self.check_processing_time_reset.deselect()
self.check_processing_time_reset.grid(row=29, column=1, sticky=W)
self.var_examview = IntVar()
self.check_examview = Checkbutton(self.frame1, text="", variable=self.var_examview, onvalue=1, offvalue=0)
self.check_examview.deselect()
self.check_examview.grid(row=30, column=1, sticky=W)
self.var_examview_test_title = IntVar()
self.check_examview_test_title = Checkbutton(self.frame1, text="", variable=self.var_examview_test_title,
onvalue=1, offvalue=0)
self.check_examview_test_title.deselect()
self.check_examview_test_title.grid(row=31, column=1, sticky=W)
self.var_examview_user_name = IntVar()
self.check_examview_user_name = Checkbutton(self.frame1, text="", variable=self.var_examview_user_name,
onvalue=1, offvalue=0)
self.check_examview_user_name.deselect()
self.check_examview_user_name.grid(row=32, column=1, sticky=W)
self.var_show_ilias_nr = IntVar()
self.check_show_ilias_nr = Checkbutton(self.frame1, text="", variable=self.var_show_ilias_nr, onvalue=1,
offvalue=0)
self.check_show_ilias_nr.deselect()
self.check_show_ilias_nr.grid(row=33, column=1, sticky=W)
self.var_autosave = IntVar()
self.check_autosave = Checkbutton(self.frame2, text="", variable=self.var_autosave, onvalue=1, offvalue=0,
command=lambda v=self.var_autosave: Test_Einstellungen_GUI.enable_autosave(self,
v))
self.check_autosave_interval_label = Label(self.frame2, text="Speicherintervall (in Sek.):")
self.check_autosave_interval_entry = Entry(self.frame2, width=10)
self.check_autosave.deselect()
self.check_autosave.grid(row=4, column=3, sticky=W)
self.var_mix_questions = IntVar()
self.check_mix_questions = Checkbutton(self.frame2, text="", variable=self.var_mix_questions, onvalue=1,
offvalue=0)
self.check_mix_questions.deselect()
self.check_mix_questions.grid(row=5, column=3, sticky=W)
self.var_show_solution_notes = IntVar()
self.check_show_solution_notes = Checkbutton(self.frame2, text="", variable=self.var_show_solution_notes,
onvalue=1, offvalue=0)
self.check_show_solution_notes.deselect()
self.check_show_solution_notes.grid(row=6, column=3, sticky=W)
self.var_direct_response = IntVar()
self.check_direct_response = Checkbutton(self.frame2, text="", variable=self.var_direct_response, onvalue=1,
offvalue=0)
self.check_direct_response.deselect()
self.check_direct_response.grid(row=7, column=3, sticky=W)
self.var_mandatory_questions = IntVar()
self.check_mandatory_questions = Checkbutton(self.frame2, text="", variable=self.var_mandatory_questions,
onvalue=1, offvalue=0)
self.check_mandatory_questions.deselect()
self.check_mandatory_questions.grid(row=12, column=3, sticky=W)
self.var_use_previous_solution = IntVar()
self.check_use_previous_solution = Checkbutton(self.frame2, text="", variable=self.var_use_previous_solution,
onvalue=1, offvalue=0)
self.check_use_previous_solution.deselect()
self.check_use_previous_solution.grid(row=14, column=3, sticky=W)
self.var_show_test_cancel = IntVar()
self.check_show_test_cancel = Checkbutton(self.frame2, text="", variable=self.var_show_test_cancel, onvalue=1,
offvalue=0)
self.check_show_test_cancel.deselect()
self.check_show_test_cancel.grid(row=15, column=3, sticky=W)
self.var_show_question_list_process_status = IntVar()
self.check_show_question_list_process_status = Checkbutton(self.frame2, text="",
variable=self.var_show_question_list_process_status,
onvalue=1, offvalue=0)
self.check_show_question_list_process_status.deselect()
self.check_show_question_list_process_status.grid(row=18, column=3, sticky=W)
self.var_question_mark = IntVar()
self.check_question_mark = Checkbutton(self.frame2, text="", variable=self.var_question_mark, onvalue=1,
offvalue=0)
self.check_question_mark.deselect()
self.check_question_mark.grid(row=19, column=3, sticky=W)
self.var_overview_answers = IntVar()
self.check_overview_answers = Checkbutton(self.frame2, text="", variable=self.var_overview_answers, onvalue=1,
offvalue=0)
self.check_overview_answers.grid(row=21, column=3, sticky=W)
self.var_show_end_comment = IntVar()
self.check_show_end_comment = Checkbutton(self.frame2, text="", variable=self.var_show_end_comment, onvalue=1,
offvalue=0,
command=lambda
v=self.var_show_end_comment: Test_Einstellungen_GUI.show_concluding_remarks(
self, v))
self.check_show_end_comment.deselect()
self.check_show_end_comment.grid(row=22, column=3, sticky=W)
self.var_forwarding = IntVar()
self.check_forwarding = Checkbutton(self.frame2, text="", variable=self.var_forwarding, onvalue=1, offvalue=0)
self.check_forwarding.deselect()
self.check_forwarding.grid(row=23, column=3, sticky=W)
self.var_notification = IntVar()
self.check_notification = Checkbutton(self.frame2, text="", variable=self.var_notification, onvalue=1,
offvalue=0)
self.check_notification.deselect()
self.check_notification.grid(row=24, column=3, sticky=W)
# --------------------------- RADIO BUTTONS ---------------------------------------
self.select_question = IntVar()
self.select_question.set(0)
self.select_question_radiobtn1 = Radiobutton(self.frame1, text="Fest definierte Fragenauswahl",
variable=self.select_question, value=0)
self.select_question_radiobtn1.grid(row=4, column=1, pady=0, sticky=W) # FIXED_QUEST_SET
self.select_question_radiobtn2 = Radiobutton(self.frame1, text="Zufällige Fragenauswahl",
variable=self.select_question, value=1)
self.select_question_radiobtn2.grid(row=5, column=1, pady=0, sticky=W) # RANDOM_QUEST_SET
self.select_question_radiobtn3 = Radiobutton(self.frame1,
text="Wiedervorlagemodus - alle Fragen eines Fragenpools",
variable=self.select_question, value=2)
self.select_question_radiobtn3.grid(row=6, column=1, pady=0, sticky=W) # DYNAMIC_QUEST_SET
self.select_anonym = IntVar()
self.select_anonym.set(0)
self.select_anonym_radiobtn1 = Radiobutton(self.frame1, text="Testergebnisse ohne Namen",
variable=self.select_anonym, value=0, borderwidth=0,
command=self.select_anonym.get())
self.select_anonym_radiobtn1.grid(row=7, column=1, pady=0, sticky=W)
self.select_anonym_radiobtn2 = Radiobutton(self.frame1, text="Testergebnisse mit Namen",
variable=self.select_anonym, value=1, borderwidth=0,
command=self.select_anonym.get())
self.select_anonym_radiobtn2.grid(row=8, column=1, pady=0, sticky=W)
self.select_show_question_title = IntVar()
self.select_show_question_title.set(0)
self.select_show_question_title_radiobtn1 = Radiobutton(self.frame2, text="Fragentitel und erreichbare Punkte",
variable=self.select_show_question_title, value=0,
borderwidth=0,
command=self.select_show_question_title.get())
self.select_show_question_title_radiobtn1.grid(row=1, column=3, pady=0, sticky=W)
self.select_show_question_title_radiobtn2 = Radiobutton(self.frame2, text="Nur Fragentitel",
variable=self.select_show_question_title, value=1,
borderwidth=0,
command=self.select_show_question_title.get())
self.select_show_question_title_radiobtn2.grid(row=2, column=3, pady=0, sticky=W)
self.select_show_question_title_radiobtn3 = Radiobutton(self.frame2,
text="Weder Fragentitel noch erreichbare Punkte",
variable=self.select_show_question_title, value=2,
borderwidth=0,
command=self.select_show_question_title.get())
self.select_show_question_title_radiobtn3.grid(row=3, column=3, pady=0, sticky=W)
self.select_user_response = IntVar()
self.select_user_response.set(0)
self.select_user_response_radiobtn1 = Radiobutton(self.frame2,
text="Antworten während des Testdurchlaufs nicht festschreiben",
variable=self.select_user_response, value=0, borderwidth=0,
command=self.select_user_response.get())
self.select_user_response_radiobtn1.grid(row=8, column=3, pady=0, sticky=W)
self.select_user_response_radiobtn2 = Radiobutton(self.frame2,
text="Antworten bei Anzeige der Rückmeldung festschreiben",
variable=self.select_user_response, value=1, borderwidth=0,
command=self.select_user_response.get())
self.select_user_response_radiobtn2.grid(row=9, column=3, pady=0, sticky=W)
self.select_user_response_radiobtn3 = Radiobutton(self.frame2,
text="Antworten bei Anzeige der Folgefrage festschreiben",
variable=self.select_user_response, value=2, borderwidth=0,
command=self.select_user_response.get())
self.select_user_response_radiobtn3.grid(row=10, column=3, pady=0, sticky=W)
self.select_user_response_radiobtn4 = Radiobutton(self.frame2,
text="Antworten mit der Anzeige von Rückmeldungen oder der Folgefrage festschreiben",
variable=self.select_user_response, value=3, borderwidth=0,
command=self.select_user_response.get())
self.select_user_response_radiobtn4.grid(row=11, column=3, pady=0, sticky=W)
self.select_not_answered_questions = IntVar()
self.select_not_answered_questions.set(0)
self.select_not_answered_questions_radiobtn1 = Radiobutton(self.frame2,
text="Nicht beantwortete Fragen bleiben an ihrem Platz",
variable=self.select_not_answered_questions, value=0,
borderwidth=0,
command=self.select_not_answered_questions.get())
self.select_not_answered_questions_radiobtn1.grid(row=16, column=3, pady=0, sticky=W)
self.select_not_answered_questions_radiobtn2 = Radiobutton(self.frame2,
text="Nicht beantwortete Fragen werden ans Testende gesschoben",
variable=self.select_not_answered_questions, value=1,
borderwidth=0,
command=self.select_not_answered_questions.get())
self.select_not_answered_questions_radiobtn2.grid(row=17, column=3, pady=0, sticky=W)
# --------------------------- ENTRY BOXES ---------------------------------------
self.titel_entry = Entry(self.frame1, width=47)
self.titel_entry.grid(row=1, column=1)
self.introduction_bar = Scrollbar(self.frame1)
self.introduction_infobox = Text(self.frame1, height=4, width=40, font=('Helvetica', 9))
self.test_start_year_entry = Entry(self.frame1, width=5)
self.test_start_year_entry.grid(row=18, column=1, sticky=W)
self.test_start_year_entry.insert(0, "YYYY")
self.test_start_month_entry = Entry(self.frame1, width=5)
self.test_start_month_entry.grid(row=18, column=1, sticky=W, padx=35)
self.test_start_month_entry.insert(0, "MM")
self.test_start_day_entry = Entry(self.frame1, width=5)
self.test_start_day_entry.grid(row=18, column=1, sticky=W, padx=70)
self.test_start_day_entry.insert(0, "DD")
self.test_start_hour_entry = Entry(self.frame1, width=5)
self.test_start_hour_entry.grid(row=18, column=1, sticky=W, padx=105)
self.test_start_hour_entry.insert(0, "HH")
self.test_start_minute_entry = Entry(self.frame1, width=5)
self.test_start_minute_entry.grid(row=18, column=1, sticky=W, padx=140)
self.test_start_minute_entry.insert(0, "mm")
self.test_end_year_entry = Entry(self.frame1, width=5)
self.test_end_year_entry.grid(row=19, column=1, sticky=W, pady=5)
self.test_end_year_entry.insert(0, "YYYY")
self.test_end_month_entry = Entry(self.frame1, width=5)
self.test_end_month_entry.grid(row=19, column=1, sticky=W, padx=35)
self.test_end_month_entry.insert(0, "MM")
self.test_end_day_entry = Entry(self.frame1, width=5)
self.test_end_day_entry.grid(row=19, column=1, sticky=W, padx=70)
self.test_end_day_entry.insert(0, "DD")
self.test_end_hour_entry = Entry(self.frame1, width=5)
self.test_end_hour_entry.grid(row=19, column=1, sticky=W, padx=105)
self.test_end_hour_entry.insert(0, "HH")
self.test_end_minute_entry = Entry(self.frame1, width=5)
self.test_end_minute_entry.grid(row=19, column=1, sticky=W, padx=140)
self.test_end_minute_entry.insert(0, "mm")
self.test_password_entry = Entry(self.frame1, width=20)
self.test_password_entry.grid(row=20, column=1, sticky=W, pady=3)
self.description_bar = Scrollbar(self.frame1)
self.description_infobox = Text(self.frame1, height=4, width=40, font=('Helvetica', 9))
self.description_bar.grid(row=2, column=2)
self.description_infobox.grid(row=2, column=1, pady=10)
self.description_bar.config(command=self.description_infobox.yview)
self.description_infobox.config(yscrollcommand=self.description_bar.set)
self.limit_users_max_amount_entry = Entry(self.frame1, width=5)
self.limit_users_max_amount_entry.grid(row=22, column=1, sticky=W)
self.inactivity_time_for_users_entry = Entry(self.frame1, width=5)
self.inactivity_time_for_users_entry.grid(row=23, column=1, sticky=W)
self.inactivity_time_for_users_entry.insert(0, "300")
self.limit_test_runs_entry = Entry(self.frame1, width=10)
self.limit_test_runs_entry.grid(row=25, column=1, sticky=W)
self.limit_test_runs_entry.insert(0, "3")
self.limit_time_betw_test_runs_month_entry = Entry(self.frame1, width=5)
self.limit_time_betw_test_runs_month_entry.grid(row=26, column=1, sticky=W, pady=5)
self.limit_time_betw_test_runs_month_entry.insert(0, "MM")
self.limit_time_betw_test_runs_day_entry = Entry(self.frame1, width=5)
self.limit_time_betw_test_runs_day_entry.grid(row=26, column=1, sticky=W, padx=35)
self.limit_time_betw_test_runs_day_entry.insert(0, "DD")
self.limit_time_betw_test_runs_hour_entry = Entry(self.frame1, width=5)
self.limit_time_betw_test_runs_hour_entry.grid(row=26, column=1, sticky=W, padx=70)
self.limit_time_betw_test_runs_hour_entry.insert(0, "HH")
self.limit_time_betw_test_runs_minute_entry = Entry(self.frame1, width=5)
self.limit_time_betw_test_runs_minute_entry.grid(row=26, column=1, sticky=W, padx=105)
self.limit_time_betw_test_runs_minute_entry.insert(0, "mm")
self.limit_processing_time_minutes_entry = Entry(self.frame1, width=5)
self.limit_processing_time_minutes_entry.grid(row=28, column=1, sticky=W)
self.limit_processing_time_minutes_entry.insert(0, "90")
self.concluding_remarks_bar = Scrollbar(self.frame2)
self.concluding_remarks_infobox = Text(self.frame2, height=4, width=40, font=('Helvetica', 9))
self.profile_name_label = Label(self.frame3, text="Speichern unter...")
self.profile_name_label.grid(row=0, column=0)
self.profile_name_entry = Entry(self.frame3, width=15)
self.profile_name_entry.grid(row=0, column=1)
# self.profile_oid_label = Label(self.frame3, text="Choose oid to delete")
# self.profile_oid_label.grid(row=4, column=0)
self.profile_oid_entry = Entry(self.frame3, width=10)
self.profile_oid_entry.grid(row=4, column=1)
self.load_settings_entry = Entry(self.frame3, width=10)
self.load_settings_entry.grid(row=3, column=1)
# self.delete_settings_btn = Button(self.frame3, text="Delete Profile from ID", command=Test_Einstellungen_GUI.profile_save_settings(self))
# self.delete_settings_btn.grid(row=4, column=0)
self.profile_oid_listbox_label = Label(self.frame3, text=" DB\nID")
self.profile_oid_listbox_label.grid(row=1, column=4, sticky=W)
self.profile_name_listbox_label = Label(self.frame3, text="Name")
self.profile_name_listbox_label.grid(row=1, column=5, sticky=W)
self.my_listbox_profile_oid = Listbox(self.frame3, width=5)
self.my_listbox_profile_oid.grid(row=2, column=4, sticky=W)
self.my_listbox_profile_name = Listbox(self.frame3, width=15)
self.my_listbox_profile_name.grid(row=2, column=5, sticky=W)
self.save_settings_btn = Button(self.frame3, text="Speichern", command=lambda: Test_Einstellungen_GUI.profile_save_settings(self))
self.save_settings_btn.grid(row=2, column=0)
self.load_settings_btn = Button(self.frame3, text="Profil laden", command=lambda: Test_Einstellungen_GUI.profile_load_settings(self))
self.load_settings_btn.grid(row=3, column=0)
self.delete_profile_btn = Button(self.frame3, text="Profil löschen", command=lambda: Test_Einstellungen_GUI.profile_delete(self))
self.delete_profile_btn.grid(row=4, column=0)
self.show_profiles_btn = Button(self.frame3, text="Alle gespeicherten Profile anzeigen", command=lambda: Test_Einstellungen_GUI.profile_show_db(self))
self.show_profiles_btn.grid(row=5, column=0)
#self.create_profile_btn = Button(self.frame3, text="Create Profile-Settings", command=lambda: Test_Einstellungen_GUI.create_settings(self))
#self.create_profile_btn.grid(row=6, column=0)
#Test_Einstellungen_GUI.create_settings(self, self.settings_database_path, self.settings_database_table, self.settings_db_profile_name)
def show_entry_time_limited_start(self, var):
if var.get() == 0:
self.time_limited_start_label.grid_forget()
self.time_limited_start_year_label.grid_forget()
self.time_limited_start_year_entry.grid_forget()
self.time_limited_start_month_label.grid_forget()
self.time_limited_start_month_entry.grid_forget()
self.time_limited_start_day_label.grid_forget()
self.time_limited_start_day_entry.grid_forget()
self.time_limited_start_hour_label.grid_forget()
self.time_limited_start_hour_entry.grid_forget()
self.time_limited_start_minute_label.grid_forget()
self.time_limited_start_minute_entry.grid_forget()
self.time_limited_end_label.grid_forget()
self.time_limited_end_year_label.grid_forget()
self.time_limited_end_year_entry.grid_forget()
self.time_limited_end_month_label.grid_forget()
self.time_limited_end_month_entry.grid_forget()
self.time_limited_end_day_label.grid_forget()
self.time_limited_end_day_entry.grid_forget()
self.time_limited_end_hour_label.grid_forget()
self.time_limited_end_hour_entry.grid_forget()
self.time_limited_end_minute_label.grid_forget()
self.time_limited_end_minute_entry.grid_forget()
else:
self.time_limited_start_label.grid(row=10, column=1, sticky=W, padx=50)
self.time_limited_start_day_label.grid(row=11, column=1, sticky=W, padx=30)
self.time_limited_start_month_label.grid(row=11, column=1, sticky=W, padx=55)
self.time_limited_start_year_label.grid(row=11, column=1, sticky=W, padx=80)
self.time_limited_start_hour_label.grid(row=11, column=1, sticky=W, padx=110)
self.time_limited_start_minute_label.grid(row=11, column=1, sticky=W, padx=135)
self.time_limited_end_label.grid(row=10, column=1, sticky=E, padx=50)
self.time_limited_end_day_label.grid(row=11, column=1, sticky=E, padx=110)
self.time_limited_end_month_label.grid(row=11, column=1, sticky=E, padx=85)
self.time_limited_end_year_label.grid(row=11, column=1, sticky=E, padx=55)
self.time_limited_end_hour_label.grid(row=11, column=1, sticky=E, padx=30)
self.time_limited_end_minute_label.grid(row=11, column=1, sticky=E, padx=5)
self.time_limited_start_day_entry.grid(row=12, column=1, sticky=W, padx=30)
self.time_limited_start_month_entry.grid(row=12, column=1, sticky=W, padx=55)
self.time_limited_start_year_entry.grid(row=12, column=1, sticky=W, padx=80)
self.time_limited_start_hour_entry.grid(row=12, column=1, sticky=W, padx=110)
self.time_limited_start_minute_entry.grid(row=12, column=1, sticky=W, padx=135)
self.time_limited_end_day_entry.grid(row=12, column=1, sticky=E, padx=110)
self.time_limited_end_month_entry.grid(row=12, column=1, sticky=E, padx=85)
self.time_limited_end_year_entry.grid(row=12, column=1, sticky=E, padx=55)
self.time_limited_end_hour_entry.grid(row=12, column=1, sticky=E, padx=30)
self.time_limited_end_minute_entry.grid(row=12, column=1, sticky=E, padx=5)
def show_introduction_textfield(self, introduction_var):
print(introduction_var.get())
if introduction_var.get() == 0:
self.introduction_bar.grid_forget()
self.introduction_infobox.grid_forget()
else:
self.introduction_bar.grid(row=15, column=1, sticky=E)
self.introduction_infobox.grid(row=15, column=1, padx=30)
self.introduction_bar.config(command=self.introduction_infobox.yview)
self.introduction_infobox.config(yscrollcommand=self.introduction_bar.set)
def enable_autosave(self, var):
if var.get() == 0:
self.check_autosave_interval_entry.grid_forget()
self.check_autosave_interval_label.grid_forget()
else:
self.check_autosave_interval_entry.grid(row=4, column=3, padx=10)
self.check_autosave_interval_label.grid(row=4, column=3, padx=50, sticky=W)
def show_concluding_remarks(self, var):
if var.get() == 0:
self.concluding_remarks_bar.grid_forget()
self.concluding_remarks_infobox.grid_forget()
else:
self.concluding_remarks_bar.grid(row=22, column=3, sticky=E)
self.concluding_remarks_infobox.grid(row=22, column=3, padx=30)
self.concluding_remarks_bar.config(command=self.concluding_remarks_infobox.yview)
self.concluding_remarks_infobox.config(yscrollcommand=self.concluding_remarks_bar.set)
def profile_show_db(self):
conn = sqlite3.connect(self.settings_database_path)
c = conn.cursor()
c.execute("SELECT *, oid FROM " + self.settings_database_table)
profile_records = c.fetchall()
# Clear List Boxes
self.my_listbox_profile_name.delete(0, END)
self.my_listbox_profile_oid.delete(0, END)
# Loop thru Results
for profile_record in profile_records:
self.my_listbox_profile_name.insert(END, profile_record[0])
self.my_listbox_profile_oid.insert(END, profile_record[len(profile_record) - 1])
self.profile_records_len = len(profile_records)
# print(profile_records[len(profile_records)-1])
conn.commit()
conn.close()
print("LOOP THROUGH... SHOW PROFILES!")
def profile_save_settings(self):
conn = sqlite3.connect(self.settings_database_path)
c = conn.cursor()
# Insert into Table
c.execute(
"INSERT INTO " + self.settings_database_table + " VALUES ("
":profile_name, :entry_description, :radio_select_question, :radio_select_anonymous, :check_online, :check_time_limited, "
":check_introduction, :entry_introduction, :check_test_properties, "
":entry_test_start_year, :entry_test_start_month, :entry_test_start_day, :entry_test_start_hour, :entry_test_start_minute,"
":entry_test_end_year, :entry_test_end_month, :entry_test_end_day, :entry_test_end_hour, :entry_test_end_minute,"
":entry_test_password, :check_specific_users, :entry_limit_users, :entry_user_inactivity, :entry_limit_test_runs,"
":entry_limit_time_betw_test_run_month, :entry_limit_time_betw_test_run_day, :entry_limit_time_betw_test_run_hour, :entry_limit_time_betw_test_run_minute,"
":check_processing_time, :entry_processing_time_in_minutes, :check_processing_time_reset,"
":check_examview, :check_examview_titel, :check_examview_username, :check_show_ilias_nr,"
":radio_select_show_question_title, :check_autosave, :entry_autosave_interval, :check_mix_questions, :check_show_solution_notes, :check_direct_response,"
":radio_select_user_response, :check_mandatory_questions, :check_use_previous_solution, :check_show_test_cancel, :radio_select_not_answered_questions,"
":check_show_question_list_process_status, :check_question_mark, :check_overview_answers, :check_show_end_comment, :entry_end_comment, :check_forwarding, :check_notification)",
{
'profile_name': self.profile_name_entry.get(),
'entry_description': self.description_infobox.get("1.0", 'end-1c'),
'radio_select_question': self.select_question.get(),
'radio_select_anonymous': self.select_anonym.get(),
'check_online': self.var_online.get(),
'check_time_limited': self.var_time_limited.get(),
'check_introduction': self.var_introduction.get(),
'entry_introduction': self.introduction_infobox.get("1.0", 'end-1c'),
'check_test_properties': self.var_test_prop.get(),
'entry_test_start_year': self.test_start_year_entry.get(),
'entry_test_start_month': self.test_start_month_entry.get(),
'entry_test_start_day': self.test_start_day_entry.get(),
'entry_test_start_hour': self.test_start_hour_entry.get(),
'entry_test_start_minute': self.test_start_minute_entry.get(),
'entry_test_end_year': self.test_end_year_entry.get(),
'entry_test_end_month': self.test_end_month_entry.get(),
'entry_test_end_day': self.test_end_day_entry.get(),
'entry_test_end_hour': self.test_end_hour_entry.get(),
'entry_test_end_minute': self.test_end_minute_entry.get(),
'entry_test_password': self.test_password_entry.get(),
'check_specific_users': self.var_specific_users.get(),
'entry_limit_users': self.limit_users_max_amount_entry.get(),
'entry_user_inactivity': self.inactivity_time_for_users_entry.get(),
'entry_limit_test_runs': self.limit_test_runs_entry.get(),
'entry_limit_time_betw_test_run_month': self.limit_time_betw_test_runs_month_entry.get(),
'entry_limit_time_betw_test_run_day': self.limit_time_betw_test_runs_day_entry.get(),
'entry_limit_time_betw_test_run_hour': self.limit_time_betw_test_runs_hour_entry.get(),
'entry_limit_time_betw_test_run_minute': self.limit_time_betw_test_runs_minute_entry.get(),
'check_processing_time': self.var_processing_time.get(),
'entry_processing_time_in_minutes': self.limit_processing_time_minutes_entry.get(),
'check_processing_time_reset': self.var_processing_time_reset.get(),
'check_examview': self.var_examview.get(),
'check_examview_titel': self.var_examview_test_title.get(),
'check_examview_username': self.var_examview_user_name.get(),
'check_show_ilias_nr': self.var_show_ilias_nr.get(),
'radio_select_show_question_title': self.select_show_question_title.get(),
'check_autosave': self.var_autosave.get(),
'entry_autosave_interval': self.check_autosave_interval_entry.get(),
'check_mix_questions': self.var_mix_questions.get(),
'check_show_solution_notes': self.var_show_solution_notes.get(),
'check_direct_response': self.var_direct_response.get(),
'radio_select_user_response': self.select_user_response.get(),
'check_mandatory_questions': self.var_mandatory_questions.get(),
'check_use_previous_solution': self.var_use_previous_solution.get(),
'check_show_test_cancel': self.var_show_test_cancel.get(),
'radio_select_not_answered_questions': self.select_not_answered_questions.get(),
'check_show_question_list_process_status': self.var_show_question_list_process_status.get(),
'check_question_mark': self.var_question_mark.get(),
'check_overview_answers': self.var_overview_answers.get(),
'check_show_end_comment': self.var_show_end_comment.get(),
'entry_end_comment': self.concluding_remarks_infobox.get("1.0", 'end-1c'),
'check_forwarding': self.var_forwarding.get(),
'check_notification': self.var_notification.get()
}
)
conn.commit()
conn.close()
print("GOT VALUES")
def profile_load_settings(self):
print("LOAD")
conn = sqlite3.connect(self.settings_database_path)
c = conn.cursor()
c.execute("SELECT * FROM " + self.settings_database_table + " WHERE oid =" + self.load_settings_entry.get())
profile_records = c.fetchall()
# Loop thru Results
for profile_record in profile_records:
self.profile_name_entry.get()
# profil_name_entry -> profile_record[0]
self.description_infobox.delete('1.0', END)
self.description_infobox.insert('1.0', profile_record[1])
self.select_question.set(profile_record[2])
self.select_anonym.set(profile_record[3])
self.var_online.set(profile_record[4])
self.var_time_limited.set(profile_record[5])
self.var_introduction.set(profile_record[6])
self.introduction_infobox.delete('1.0', END)
self.introduction_infobox.insert('1.0', profile_record[7])
self.var_test_prop.set(profile_record[8])
self.test_start_year_entry.delete(0, END)
self.test_start_year_entry.insert(0, profile_record[9])
self.test_start_month_entry.delete(0, END)
self.test_start_month_entry.insert(0, profile_record[10])
self.test_start_day_entry.delete(0, END)
self.test_start_day_entry.insert(0, profile_record[11])
self.test_start_hour_entry.delete(0, END)
self.test_start_hour_entry.insert(0, profile_record[12])
self.test_start_minute_entry.delete(0, END)
self.test_start_minute_entry.insert(0, profile_record[13])
self.test_end_year_entry.delete(0, END)
self.test_end_year_entry.insert(0, profile_record[14])
self.test_end_month_entry.delete(0, END)
self.test_end_month_entry.insert(0, profile_record[15])
self.test_end_day_entry.delete(0, END)
self.test_end_day_entry.insert(0, profile_record[16])
self.test_end_hour_entry.delete(0, END)
self.test_end_hour_entry.insert(0, profile_record[17])
self.test_end_minute_entry.delete(0, END)
self.test_end_minute_entry.insert(0, profile_record[18])
self.test_password_entry.delete(0, END)
self.test_password_entry.insert(0, profile_record[19])
self.var_specific_users.set(profile_record[20])
self.limit_users_max_amount_entry.delete(0, END)
self.limit_users_max_amount_entry.insert(0, profile_record[21])
self.inactivity_time_for_users_entry.delete(0, END)
self.inactivity_time_for_users_entry.insert(0, profile_record[22])
self.limit_test_runs_entry.delete(0, END)
self.limit_test_runs_entry.insert(0, profile_record[23])
self.limit_time_betw_test_runs_month_entry.delete(0, END)
self.limit_time_betw_test_runs_month_entry.insert(0, profile_record[24])
self.limit_time_betw_test_runs_day_entry.delete(0, END)
self.limit_time_betw_test_runs_day_entry.insert(0, profile_record[25])
self.limit_time_betw_test_runs_hour_entry.delete(0, END)
self.limit_time_betw_test_runs_hour_entry.insert(0, profile_record[26])
self.limit_time_betw_test_runs_minute_entry.delete(0, END)
self.limit_time_betw_test_runs_minute_entry.insert(0, profile_record[27])
self.var_processing_time.set(profile_record[28])
self.limit_processing_time_minutes_entry.delete(0, END)
self.limit_processing_time_minutes_entry.insert(0, profile_record[29])
self.var_processing_time_reset.set(profile_record[30])
self.var_examview.set(profile_record[31])
self.var_examview_test_title.set(profile_record[32])
self.var_examview_user_name.set(profile_record[33])
self.var_show_ilias_nr.set(profile_record[34])
self.select_show_question_title.set(profile_record[35])
self.var_autosave.set(profile_record[36])
self.check_autosave_interval_entry.delete(0, END)
self.check_autosave_interval_entry.insert(0, profile_record[37])
self.var_mix_questions.set(profile_record[38])
self.var_show_solution_notes.set(profile_record[39])
self.var_direct_response.set(profile_record[40])
self.select_user_response.set(profile_record[41])
self.var_mandatory_questions.set(profile_record[42])
self.var_use_previous_solution.set(profile_record[43])
self.var_show_test_cancel.set(profile_record[44])
self.select_not_answered_questions.set(profile_record[45])
self.var_show_question_list_process_status.set(profile_record[46])
self.var_question_mark.set(profile_record[47])
self.var_overview_answers.set(profile_record[48])
self.var_show_end_comment.set(profile_record[49])
self.concluding_remarks_infobox.delete('1.0', END)
self.concluding_remarks_infobox.insert('1.0', profile_record[50])
self.var_forwarding.set(profile_record[51])
self.var_notification.set(profile_record[52])
conn.commit()
conn.close()
def profile_delete(self):
conn = sqlite3.connect(self.settings_database_path)
c = conn.cursor()
c.execute("DELETE from " + self.settings_database_table + " WHERE oid= " + self.profile_oid_entry.get())
# self.profile_oid_entry(0, END)
conn.commit()
conn.close()
def profile_delete_last(self):
conn = sqlite3.connect(self.settings_database_path)
c = conn.cursor()
self.profile_oid_entry.insert(0, self.profile_records_len)
c.execute("DELETE from " + self.settings_database_table + " WHERE oid= " + self.profile_oid_entry.get())
print("LAST DB ENTRY DELETED")
# self.profile_oid_entry(0, END)
conn.commit()
conn.close()
# For create test settings --> Toplevel must be opened (Test-Settings Window)
def create_settings(self, settings_database_path, settings_database_table, selected_settings_db_profile_name):
self.settings_database_path = settings_database_path
self.settings_database_table = settings_database_table
self.settings_db_profile_name = selected_settings_db_profile_name
print("=======")
print(self.settings_database_path)
print(self.settings_database_table)
print(self.settings_db_profile_name)
print("=======")
###################### DATENBANK ENTRIES UND INDEX DICT ERSTELLEN ###################
# Dictionary aus zwei Listen erstellen
self.settings_db_find_entries = []
self.settings_db_find_indexes = []
self.settings_db_column_names_list = []
self.settings_collection_of_question_titles = []
connect = sqlite3.connect(self.settings_database_path)
cursor = connect.execute('select * from ' + self.settings_database_table)
self.settings_db_column_names_list = list(map(lambda x: x[0], cursor.description))
self.db_column_names_string = ', :'.join(self.settings_db_column_names_list)
self.db_column_names_string = ":" + self.db_column_names_string
for i in range(len(self.settings_db_column_names_list)):
self.settings_db_find_indexes.append(i)
"""
# Durch list(map(lambdax: x[0])) werden die Spaltennamen aus der DB ausgelesen
cursor = conn.execute('select * from ' + self.ff_database_table)
db_column_names_list = list(map(lambda x: x[0], cursor.description))
db_column_names_string = ', :'.join(db_column_names_list)
db_column_names_string = ":" + db_column_names_string
"""
self.settings_db_entry_to_index_dict = dict(
zip((self.settings_db_column_names_list), (self.settings_db_find_indexes)))
connect.commit()
connect.close()
#####
# mit Datenbank verbinden
conn = sqlite3.connect(self.settings_database_path)
c = conn.cursor()
#c.execute("SELECT * FROM " + self.settings_database_table + " WHERE profile_name =" + self.settings_db_profile_name)
c.execute("SELECT * FROM " + self.settings_database_table)
profile_records = c.fetchall()
# Loop through Results
for profile_record in profile_records:
if profile_record[self.settings_db_entry_to_index_dict["profile_name"]] == self.settings_db_profile_name:
self.profile_name = profile_record[self.settings_db_entry_to_index_dict["profile_name"]]
self.description = profile_record[self.settings_db_entry_to_index_dict["entry_description"]]
self.question_type = profile_record[self.settings_db_entry_to_index_dict["radio_select_question"]]
self.anonym = profile_record[self.settings_db_entry_to_index_dict["radio_select_anonymous"]]
self.online = profile_record[self.settings_db_entry_to_index_dict["check_online"]]
self.time_limited = profile_record[self.settings_db_entry_to_index_dict["check_time_limited"]]
self.introduction = profile_record[self.settings_db_entry_to_index_dict["check_introduction"]]
self.introduction_infobox = profile_record[self.settings_db_entry_to_index_dict["entry_introduction"]]
self.test_prop = profile_record[self.settings_db_entry_to_index_dict["check_test_properties"]]
self.test_start_year = profile_record[self.settings_db_entry_to_index_dict["entry_test_start_year"]]
self.test_start_month = profile_record[self.settings_db_entry_to_index_dict["entry_test_start_month"]]
self.test_start_day = profile_record[self.settings_db_entry_to_index_dict["entry_test_start_day"]]
self.test_start_hour = profile_record[self.settings_db_entry_to_index_dict["entry_test_start_hour"]]
self.test_start_minute = profile_record[self.settings_db_entry_to_index_dict["entry_test_start_minute"]]
self.test_end_year = profile_record[self.settings_db_entry_to_index_dict["entry_test_end_year"]]
self.test_end_month = profile_record[self.settings_db_entry_to_index_dict["entry_test_end_month"]]
self.test_end_day = profile_record[self.settings_db_entry_to_index_dict["entry_test_end_day"]]
self.test_end_hour = profile_record[self.settings_db_entry_to_index_dict["entry_test_end_hour"]]
self.test_end_minute = profile_record[self.settings_db_entry_to_index_dict["entry_test_end_minute"]]
self.test_password = profile_record[self.settings_db_entry_to_index_dict["entry_test_password"]]
self.specific_users = profile_record[self.settings_db_entry_to_index_dict["check_specific_users"]]
self.limit_users_max = profile_record[self.settings_db_entry_to_index_dict["entry_limit_users"]]
self.inactivity_time_for_users = profile_record[self.settings_db_entry_to_index_dict["entry_user_inactivity"]]
self.limit_test_runs = profile_record[self.settings_db_entry_to_index_dict["entry_limit_test_runs"]]
self.limit_time_betw_test_runs_month = profile_record[self.settings_db_entry_to_index_dict["entry_limit_time_betw_test_run_month"]]
self.limit_time_betw_test_runs_day = profile_record[self.settings_db_entry_to_index_dict["entry_limit_time_betw_test_run_day"]]
self.limit_time_betw_test_runs_hour = profile_record[self.settings_db_entry_to_index_dict["entry_limit_time_betw_test_run_hour"]]
self.limit_time_betw_test_runs_minute = profile_record[self.settings_db_entry_to_index_dict["entry_limit_time_betw_test_run_minute"]]
self.processing_time = profile_record[self.settings_db_entry_to_index_dict["check_processing_time"]]
self.limit_processing_time_minutes = profile_record[self.settings_db_entry_to_index_dict["entry_processing_time_in_minutes"]]
self.processing_time_reset = profile_record[self.settings_db_entry_to_index_dict["check_processing_time_reset"]]
self.examview = profile_record[self.settings_db_entry_to_index_dict["check_examview"]]
self.examview_test_title = profile_record[self.settings_db_entry_to_index_dict["check_examview_titel"]]
self.examview_user_name = profile_record[self.settings_db_entry_to_index_dict["check_examview_username"]]
self.show_ilias_nr = profile_record[self.settings_db_entry_to_index_dict["check_show_ilias_nr"]]
self.select_show_question_title = profile_record[self.settings_db_entry_to_index_dict["radio_select_show_question_title"]]
self.autosave = profile_record[self.settings_db_entry_to_index_dict["check_autosave"]]
self.autosave_interval = profile_record[self.settings_db_entry_to_index_dict["entry_autosave_interval"]]
self.mix_questions = profile_record[self.settings_db_entry_to_index_dict["check_mix_questions"]]
self.show_solution_notes = profile_record[self.settings_db_entry_to_index_dict["check_show_solution_notes"]]
self.direct_response = profile_record[self.settings_db_entry_to_index_dict["check_direct_response"]]
self.select_user_response = profile_record[self.settings_db_entry_to_index_dict["radio_select_user_response"]]
self.mandatory_questions = profile_record[self.settings_db_entry_to_index_dict["check_mandatory_questions"]]
self.use_previous_solution = profile_record[self.settings_db_entry_to_index_dict["check_use_previous_solution"]]
self.show_test_cancel = profile_record[self.settings_db_entry_to_index_dict["check_show_test_cancel"]]
self.select_not_answered_questions = profile_record[self.settings_db_entry_to_index_dict["radio_select_not_answered_questions"]]
self.show_question_list_process_status = profile_record[self.settings_db_entry_to_index_dict["check_show_question_list_process_status"]]
self.question_mark = profile_record[self.settings_db_entry_to_index_dict["check_question_mark"]]
self.overview_answers = profile_record[self.settings_db_entry_to_index_dict["check_overview_answers"]]
self.show_end_comment = profile_record[self.settings_db_entry_to_index_dict["check_show_end_comment"]]
self.concluding_remarks_infobox = profile_record[self.settings_db_entry_to_index_dict["entry_end_comment"]]
self.forwarding = profile_record[self.settings_db_entry_to_index_dict["check_forwarding"]]
self.notification = profile_record[self.settings_db_entry_to_index_dict["check_notification"]]
self.mytree = ET.parse(self.test_qti_file_path_output)
self.myroot = self.mytree.getroot()
# hours_from_minutes = str(datetime.timedelta(minutes=int(self.limit_processing_time_minutes)))
self.duration_time = int(self.limit_processing_time_minutes)
self.duration_time_hours = self.duration_time // 60
self.duration_time_minutes = self.duration_time % 60
# Format of duration: P0Y0M0DT1H30M0S
self.duration = "P0Y0M0DT" + str(self.duration_time_hours) + "H" + str(self.duration_time_minutes) + "M0S"
for qticomment in self.myroot.iter('qticomment'):
qticomment.text = self.description
break
for duration in self.myroot.iter('duration'):
duration.text = self.duration
break
questestinterop = ET.Element('questestinterop')
assessment = ET.SubElement(questestinterop, 'assessment')
qticomment = ET.SubElement(assessment, 'qticomment')
qticomment.text = self.description
for qtimetadatafield in self.myroot.iter('qtimetadatafield'):
if qtimetadatafield.find('fieldlabel').text == "anonymity":
qtimetadatafield.find('fieldentry').text = self.anonym
if self.anonym == "":
qtimetadatafield.find('fieldentry').text = "0"
print("NO ENTRY IN <ANONYM>")
if qtimetadatafield.find('fieldlabel').text == "question_set_type":
if self.question_type == 0:
qtimetadatafield.find('fieldentry').text = "FIXED_QUEST_SET"
# print("WRITE FIXED-Question")
elif self.question_type == 1:
qtimetadatafield.find('fieldentry').text = "RANDOM_QUEST_SET"
# print("WRITE RANDOM-Question")
elif self.question_type == 2:
qtimetadatafield.find('fieldentry').text = "DYNAMIC_QUEST_SET"
# print("WRITE DYNAMIC-Question")
else:
qtimetadatafield.find('fieldentry').text = "FIXED_QUEST_SET"
print("NO ENTRY IN <QUESTION_TYPE> ")
# if qtimetadatafield.find('fieldlabel').text == "author":
# qtimetadatafield.find('fieldentry').text = str(Formelfrage.autor_entry.get())
if qtimetadatafield.find('fieldlabel').text == "reset_processing_time":
qtimetadatafield.find('fieldentry').text = str(self.processing_time_reset)
if self.processing_time_reset == "":
qtimetadatafield.find('fieldentry').text = "0"
print("NO ENTRY IN <RESET PROCESSING TIME>")
if qtimetadatafield.find('fieldlabel').text == "password":
qtimetadatafield.find('fieldentry').text = str(self.test_password)
if qtimetadatafield.find('fieldlabel').text == "allowedUsers":
qtimetadatafield.find('fieldentry').text = str(self.limit_users_max)
if qtimetadatafield.find('fieldlabel').text == "allowedUsersTimeGap":
qtimetadatafield.find('fieldentry').text = str(self.inactivity_time_for_users)
if qtimetadatafield.find('fieldlabel').text == "nr_of_tries":
qtimetadatafield.find('fieldentry').text = str(self.limit_test_runs)
if qtimetadatafield.find('fieldlabel').text == "pass_waiting":
qtimetadatafield.find('fieldentry').text = str(self.limit_time_betw_test_runs_month) + ":0" + str(
self.limit_time_betw_test_runs_day) + ":" + str(
self.limit_time_betw_test_runs_hour) + ":" + str(self.limit_time_betw_test_runs_minute) + ":00"
if self.limit_time_betw_test_runs_month == "MM":
qtimetadatafield.find('fieldentry').text = "00:000:00:00:00"
print(
" >WARNING< NO limit_time_betw_test_runs SET.. --> set limit_time to \"00:000:00:00:00\" ")
# Prüfungsansicht: Alle drei haken (Titel+Ansicht): "7" / Zwei Haken (Titel) = "3" / Zwei Haken (Name) = "5" / Ein Haken = "1" / "0" -> deaktiviert
if qtimetadatafield.find('fieldlabel').text == "kiosk":
if self.examview == 0:
qtimetadatafield.find('fieldentry').text = "0"
elif self.examview == 1:
qtimetadatafield.find('fieldentry').text = "1"
elif self.examview == 1 and self.examview_test_title == 1:
qtimetadatafield.find('fieldentry').text = "3"
elif self.examview == 1 and self.examview_user_name == 1:
qtimetadatafield.find('fieldentry').text = "5"
elif self.examview == 1 and self.examview_user_name == 1 and self.examview_test_title == 1:
qtimetadatafield.find('fieldentry').text = "7"
# if qtimetadatafield.find('fieldlabel').text == "use_previous_answers":
# qtimetadatafield.find('fieldentry').text = "0"
# if qtimetadatafield.find('fieldlabel').text == "title_output":
# qtimetadatafield.find('fieldentry').text = "0"
# if qtimetadatafield.find('fieldlabel').text == "examid_in_test_pass":
# qtimetadatafield.find('fieldentry').text = "0"
# if qtimetadatafield.find('fieldlabel').text == "show_summary":
# qtimetadatafield.find('fieldentry').text = "0"
if qtimetadatafield.find('fieldlabel').text == "show_cancel":
qtimetadatafield.find('fieldentry').text = str(self.show_test_cancel)
# if qtimetadatafield.find('fieldlabel').text == "show_marker":
# qtimetadatafield.find('fieldentry').text = "99"
# if qtimetadatafield.find('fieldlabel').text == "fixed_participants":
# qtimetadatafield.find('fieldentry').text = "99"
# if qtimetadatafield.find('fieldlabel').text == "showinfo":
# qtimetadatafield.find('fieldentry').text = "99"
if qtimetadatafield.find('fieldlabel').text == "shuffle_questions":
qtimetadatafield.find('fieldentry').text = str(self.mix_questions)
if qtimetadatafield.find('fieldlabel').text == "processing_time":
# self.minutes = self.limit_processing_time_minutes
hours_from_minutes = str(datetime.timedelta(minutes=int(self.limit_processing_time_minutes)))
print("len_min_to_hours: " + str(hours_from_minutes))
qtimetadatafield.find('fieldentry').text = "0" + hours_from_minutes
if qtimetadatafield.find('fieldlabel').text == "enable_examview":
qtimetadatafield.find('fieldentry').text = str(self.examview)
# if qtimetadatafield.find('fieldlabel').text == "show_examview_pdf":
# qtimetadatafield.find('fieldentry').text = "99"
if qtimetadatafield.find('fieldlabel').text == "starting_time":
qtimetadatafield.find('fieldentry').text = "P" + str(self.test_start_year) + "Y" + str(
self.test_start_month) + "M" + str(self.test_start_day) + "DT" + str(
self.test_start_hour) + "H" + str(self.test_start_minute) + "M" + "0S"
if self.test_start_year == "YYYY":
qtimetadatafield.find('fieldentry').text = "P2020Y1M1DT00H0M0S"
print(" >WARNING< NO STARTING TIME SET.. --> set START to \"P2020Y1M1DT00H0M0S\"")
if qtimetadatafield.find('fieldlabel').text == "ending_time":
qtimetadatafield.find('fieldentry').text = "P" + str(self.test_end_year) + "Y" + str(self.test_end_month) + "M" + str(self.test_end_day) + "DT" + str(self.test_end_hour) + "H" + str(self.test_end_minute) + "M" + "0S"
if self.test_end_year == "YYYY":
qtimetadatafield.find('fieldentry').text = "P2020Y12M30DT00H0M0S"
print(" >WARNING< NO ENDING TIME SET.. --> set END to \"P2020Y12M30DT00H0M0S\"")
if qtimetadatafield.find('fieldlabel').text == "autosave":
qtimetadatafield.find('fieldentry').text = str(self.autosave)
if qtimetadatafield.find('fieldlabel').text == "autosave_ival":
qtimetadatafield.find('fieldentry').text = str(self.autosave_interval)
# if qtimetadatafield.find('fieldlabel').text == "offer_question_hints":
# qtimetadatafield.find('fieldentry').text = "99"
# if qtimetadatafield.find('fieldlabel').text == "obligations_enabled":
# qtimetadatafield.find('fieldentry').text = "99"
if qtimetadatafield.find('fieldlabel').text == "enable_processing_time":
qtimetadatafield.find('fieldentry').text = str(self.processing_time)
# if qtimetadatafield.find('fieldlabel').text == "mark_step_0":
# qtimetadatafield.find('fieldentry').text = "99"
# if qtimetadatafield.find('fieldlabel').text == "mark_step_1":
# qtimetadatafield.find('fieldentry').text = "99"
# tree = ET.ElementTree(questestinterop)
# tree.write("WORKED_neuerAnfang.xml")
print("Write Test_Settings to File --- ",self.profile_name)
self.mytree.write(self.test_qti_file_path_output)
print("Create Test WITH Test_settings")
| StarcoderdataPython |
3293930 | import pytest
import time
import stl_path
from trex_stl_lib.api import *
"""
An example on how to use TRex for functional tests
using the stateless API with service mode
"""
@pytest.mark.parametrize("protocol", ["TCP", "UDP", "ICMP"])
def test_one_packet(trex, protocol):
tx_port, rx_port = trex.get_all_ports()
trex.reset(ports=[tx_port, rx_port])
# activate service mode on RX code
trex.set_service_mode(ports=rx_port)
# generate a simple UDP packet
pkt = Ether() / IP()
pkt[IP].src = "172.16.58.3"
pkt[IP].dst = "172.16.17.32"
if protocol == "TCP":
pkt = pkt / TCP()
elif protocol == "UDP":
pkt = pkt / UDP()
elif protocol == "ICMP":
pkt = pkt / ICMP()
# start a capture
capture = trex.start_capture(rx_ports=rx_port)
# push the UDP packet to TX port... we need 'force' because this is under service mode
print('\nSending 1 UDP packet(s) on port {}'.format(tx_port))
trex.push_packets(ports=tx_port, pkts=pkt, force=True)
trex.wait_on_traffic(ports=tx_port, rx_delay_ms=1000)
# we need to block with Virtualbox because the delay is huge
time.sleep(0.5)
rx_pkts = []
trex.stop_capture(capture_id=capture['id'], output=rx_pkts)
print('\nRecived {} packets on port {}:\n'.format(len(rx_pkts), rx_port))
trex.set_service_mode(ports=rx_port, enabled=False)
# got back one packet
assert(len(rx_pkts) == 1)
rx_scapy_pkt = Ether(rx_pkts[0]['binary'])
# Check if it's the same packet
assert(protocol in rx_scapy_pkt)
| StarcoderdataPython |
3353155 | """
# LARGEST DIVISIBLE SUBSET
Given a set of distinct positive integers, find the largest subset such that every pair (Si, Sj) of elements in this subset satisfies:
Si % Sj = 0 or Sj % Si = 0.
If there are multiple solutions, return any subset is fine.
Example 1:
Input: [1,2,3]
Output: [1,2] (of course, [1,3] will also be ok)
Example 2:
Input: [1,2,4,8]
Output: [1,2,4,8]
"""
class Solution:
def largestDivisibleSubset(self, nums):
nums.sort()
n=len(nums)
if n==0:
return []
dp=[[i,1] for i in range(n)]
last=0
maxm=0
for i in range(1,n):
for j in range(i-1,-1,-1):
if nums[i]%nums[j]==0 and dp[j][1]>=dp[i][1]:
dp[i][1]=dp[j][1]+1
dp[i][0]=j
if maxm<dp[i][1]:
maxm=dp[i][1]
last=i
res=[]
while dp[last][0]!=last:
res.append(nums[last])
last=dp[last][0]
res.append(nums[last])
res.reverse()
return res | StarcoderdataPython |
1766840 | <gh_stars>0
import numpy as np
from dezero import Variable
from dezero.utils import plot_dot_graph
import dezero.functions as F
x = Variable(np.array(1.0))
y = F.tanh(x)
x.name = 'x'
y.name = 'y'
y.backward(create_graph=True)
iters = 1
for i in range(iters):
gx = x.grad
x.clear_grad()
gx.backward(create_graph=True)
gx = x.grad
gx.name = 'gx' + str(iters+1)
plot_dot_graph(gx, verbose=False, to_file='tanh-2.png')
| StarcoderdataPython |
3209121 | <gh_stars>0
# -*- coding: utf-8 -*-
""".. moduleauthor:: <NAME>"""
from dataclasses import dataclass
from typing import final, Optional, Dict, List
@final
@dataclass
class RtTfF:
r_tp: int = 0
r_tn: int = 0
r_fp: int = 0
r_fn: int = 0
def merge_ttff_fun(container: RtTfF, target: RtTfF, /) -> int:
sum_added: int = 0
container.r_fn += target.r_fn
sum_added += target.r_fn
container.r_fp += target.r_fp
sum_added += target.r_fp
container.r_tn += target.r_tn
sum_added += target.r_tn
container.r_tp += target.r_tp
sum_added += target.r_tp
return sum_added
def check_ttff_merged(merged_sum: List[Dict[int, int]], /) -> None:
check_sum: Optional[int] = None
for end_cl in merged_sum:
for end_sum in end_cl.values():
if check_sum is None:
check_sum = end_sum
if check_sum != end_sum:
print(f"\nWARNING!! Inconsistent TTFF-data {check_sum} != {end_sum}\n\n")
| StarcoderdataPython |
1717089 | <filename>NoSQLAttack/scanIP.py<gh_stars>1-10
import socket;
import globalVar as GlobalVar
from mongo import netAttacks
def scanMongoDBIP():
SHODAN_API_KEY = "<KEY>";
api = shodan.Shodan(SHODAN_API_KEY);
print 'Start Scanning.....'
try:
results = api.search('mongoDB')
# print 'Results found:%s' % results['total']
for index in range(1,10):
print str(index)+'_Attacked IP : %s' % results['matches'][index]['ip_str']
# select = raw_input("Get more IP (y/n)?")
select = raw_input("Select IP to attack:")
GlobalVar.set_victim(results['matches'][int(select)]['ip_str'])
GlobalVar.set_optionSet(0, True)
GlobalVar.set_myIP('127.0.0.1')
GlobalVar.set_optionSet(4, True)
start = raw_input("Start Default Configuration Attack(y/n)?")
if start == 'y':
netAttacks(GlobalVar.get_victim(), GlobalVar.get_dbPort(), GlobalVar.get_myIP(), GlobalVar.get_myPort())
# for result in results['matches']:
# print 'Attacked IP: %s' % result['ip_str']
#print result['data']
#print 'hostnames:' % result['hostnames'];
#print ' '
except shodan.APIError, e:
print 'Error:%s' % e
#if __name__ == "__main__":
# scanMongoDBIP()
# (1)255.255.255.255 is a broadcast address , beginning with 255 can not be used
# (2)The last ip in each segment is a broadcast address and can not be used by a particular computer . For example, 192.168.1.255 (255 can not be used )
# (3)127.0.0.1 can not be used for communication between computers , 127 beginning unavailable
# (4)0.0.0.0 indicates an invalid address , it can not be used
# (5)10.0.0.0~10.255.255.255 192.168.0.0~192.168.255.255 172.16.0.0~172.31.255.255 are all private address
# (6)169.254.0.0 ~ 169.254.255.255 is assigned by WINDOWS operating system , the emergence of this IP on behalf of your current network can not access the
def scanMongoDBIP_1():
print "1_A class IP"
print "2_B class IP"
print "3_C class IP"
select = raw_input("Select IP class:")
print 'Start Scanning.....'
if select == "1":
scan_A_class()
def scan_A_class():
for part1 in range(1, 126):
for part2 in range(0, 255):
for part3 in range(0, 255):
for part4 in range(0, 254):
print "test"
IP = str(part1) + "." + str(part2) + "." + str(part3) + "." + str(part4);
# check = mongoScan(IP, 27017);
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((IP, 27017))
if result == 0:
print IP+"Port is open"
else:
print IP+"Port is not open"
# if (check[0] == 0):
# print IP;
def mongoScan(ip,port):
try:
conn = pymongo.MongoClient(ip,port)
try:
dbVer= conn.server_info()['version']
conn.close();
return [0,dbVer]
except Exception, e:
if str(e).find('need to login')!=-1:#If find the 'need to login' in error message, we can consider target need credentials
conn.close();
return[1,None]
else:
conn.close();
return[2,None]
except:
return [3,None]
| StarcoderdataPython |
117396 | """Queries
This module contains MongoDB queries for the Squirrel program.
Examples
python -m unittest tests.test_queries
"""
import sys
from pprint import pprint
from typing import List
from bson import ObjectId
from pymongo import MongoClient
from pymongo.database import Database
from pymongo.errors import ServerSelectionTimeoutError
from pymongo import ReturnDocument
from squirrel.helpers import *
def has_database(db_name: str) -> bool:
if has_client():
return db_name in MongoClient().list_database_names()
return False
def has_client(client_uri: str = None) -> bool:
timeout = 500
try:
running_client = MongoClient(
client_uri, serverSelectionTimeoutMS=timeout)
except ServerSelectionTimeoutError as e:
print("no running mongo instance detected!")
print("run 'service mongod status'")
return False
else:
ver = running_client.server_info().get('version')
#print(f"MongoDB version {ver} available!")
return True
def create_database(db_name: str, collection: str, *initial: dict) -> Database:
try:
client = MongoClient()
db = client[db_name]
col = db[collection]
except Exception as e:
print(e)
else:
try:
col.insert_many(initial)
except Exception as e:
print(e)
return None
else:
return db
def insert_item(db_name: str, collection: str, item: dict, **kwargs) -> ObjectId:
try:
client = MongoClient()
db = client[db_name]
col = db[collection]
except Exception as e:
print(e)
return None
else:
try:
x = col.insert_one(item, kwargs)
except Exception as e:
print(e)
return None
else:
return x.inserted_id
def insert_many_items(db_name: str, collection: str, *items):
try:
client = MongoClient()
db = client[db_name]
col = db[collection]
except Exception as e:
print(e)
return None
else:
try:
ins = col.insert_many(items)
except Exception as e:
print(e)
return None
else:
return ins.inserted_ids
def insert_unique_item(db_name: str, collection: str, query: dict):
if item_exists(db_name, collection, query):
return None
return insert_item(db_name, collection, query)
def item_exists(db_name: str, collection: str, query: dict) -> bool:
try:
client = MongoClient()
db = client[db_name]
col = db[collection]
except Exception as e:
print(e)
return False
else:
return col.find_one(query) != None
def get_item(db_name: str, collection: str, it: dict, **kwargs):
try:
client = MongoClient()
db = client[db_name]
col = db[collection]
except Exception as e:
print(e)
return None
else:
return col.find_one(it, kwargs)
def get_many_items(db_name: str, collection: str, **kwargs):
try:
client = MongoClient()
db = client[db_name]
col = db[collection]
except Exception as e:
print(e)
return None
else:
return col.find({}, kwargs)
def get_all_items_in_collection(db_name: str, collection: str) -> List:
try:
client = MongoClient()
db = client[db_name]
col = db[collection]
cursor = col.find({})
except Exception as e:
print(e)
return None
else:
result = []
for docu in cursor:
#pprint(docu)
result += [docu]
return result
def update_item(db_name: str, collection: str, to_update: dict, changes: dict):
try:
client = MongoClient()
db = client[db_name]
col = db[collection]
except Exception as e:
print(e)
return None
else:
new_values = {"$set": changes}
return col.update_one(to_update, new_values)
def update_many(db_name: str, collection: str, query: dict, changes: dict):
try:
client = MongoClient()
db = client[db_name]
col = db[collection]
except Exception as e:
print(e)
return None
else:
new_values = {"$set": changes}
return col.update_many(query, new_values)
def insert_embedded_document(db_name: str, collection: str, query: dict, new_item: dict):
try:
client = MongoClient()
db = client[db_name]
col = db[collection]
except Exception as e:
print(e)
return None
else:
new_values = {"$push": new_item}
return col.update_one(query, new_values)
def find_one_and_update(db_name: str, collection: str, fil: dict, update: dict):
try:
client = MongoClient()
db = client[db_name]
col = db[collection]
except Exception as e:
print(e)
return None
else:
try:
new_values = {"$set": update}
it = col.update(
fil,
new_values
)
except Exception as e:
print(e)
return None
else:
return it
def add_field(db_name: str, collection: str, it: dict, new_value: dict):
try:
client = MongoClient()
db = client[db_name]
col = db[collection]
except Exception as e:
print(e)
return None
else:
new_values = {"$inc": new_value}
return col.update_one(it, new_values)
def remove_one(db_name: str, collection: str, item: dict, **kwargs):
try:
client = MongoClient()
db = client[db_name]
col = db[collection]
except Exception as e:
print(e)
return None
else:
try:
x = col.delete_one(item, kwargs)
except Exception as e:
print(e)
return None
else:
return x
def remove_many(db_name: str, collection: str, fil: dict, **kwargs):
try:
client = MongoClient()
db = client[db_name]
col = db[collection]
except Exception as e:
print(e)
return None
else:
try:
x = col.delete_many(fil, kwargs)
except Exception as e:
print(e)
return None
else:
return x
def query(db_name: str, collection: str, query: dict):
try:
client = MongoClient()
db = client[db_name]
col = db[collection]
except Exception as e:
print(e)
return None
else:
return col.find(query)
def handle_code_change(db_name, col: str, incoming: dict, existing: dict, named: str) -> dict:
"""
Updates the document for a specified code object.
A named update results in a new version. An unnamed
update results in an update to the selected version
or the latest version by default.
Parameters
----------
col : str
[description]
incoming : dict
[description]
latest : dict
[description]
Returns
-------
dict
[description]
"""
name = existing['name']
incoming_version = incoming['versions'][-1]
incoming_docstring = incoming_version['docstring']
incoming_source = incoming_version['source']
ver_names = [ver['version_name'] for ver in existing['versions']]
if named not in ver_names:
insert_embedded_document(
db_name=db_name,
collection=col,
query={"name": name},
new_item={"versions": incoming_version}
)
return
for version in existing['versions']:
version_name = version['version_name']
existing_docstring = version['docstring']
existing_source = version['source']
if incoming_docstring != existing_docstring:
update_item(
db_name,
col,
{
"name": name,
"versions.version_name": named
},
{"versions.$.docstring": incoming_docstring}
)
if incoming_source != existing_source:
update_item(
db_name,
col,
{
"name": name,
"versions.version_name": named
},
{"versions.$.source": incoming_source}
)
def get_version_field(db: str, field: str, citizen: str, name: str, version: str):
document = get_item(
db,
COLLECTIONS[citizen],
{"name": name},
**{'name': 1, 'type': 1, 'versions': 1}
)
for ver in document['versions']:
if ver['version_name'] == version:
result = ver[field]
return result
| StarcoderdataPython |
3396510 | <reponame>josevictorp81/Uri-questions-solutions
import math
x = input().split()
x1 = float(x[0])
y1 = float(x[1])
y = input().split()
x2 = float(y[0])
y2 = float(y[1])
print('{:.4f}'.format(math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2))) | StarcoderdataPython |
3234742 | import scrapy
from scrapy_splash import SplashRequest
from ..items import CareerspiderItem
class CareerFairSpider(scrapy.Spider):
name = "careerfair_spider"
start_urls = ['https://app.thefairsapp.com/#/fair/648/employers']
custom_settings = {
'FEED_EXPORT_FIELDS' : ["name", "industry", "job", "opt_cpt","sponsorship"]
}
def start_requests(self):
for url in self.start_urls:
yield SplashRequest(url=url, callback=self.parse, endpoint='render.html', args={'wait': 3, 'http_method': 'GET'})
def parse(self, response):
EMPLOYER_SELECTOR = '.employer-collection-item'
for employer in response.css(EMPLOYER_SELECTOR):
NAME_SELECTOR = 'span ::text'
URL_SELECTOR = 'a ::attr(href)'
detail_page = employer.css(URL_SELECTOR).get()
if detail_page:
yield SplashRequest(url='https://app.thefairsapp.com/'+detail_page, callback=self.parse2, endpoint='render.html', args={'wait': 3, 'http_method': 'GET'})
def parse2(self, response):
items = CareerspiderItem()
CONTAINER_SELECTOR = '.employer-container'
container = response.css(CONTAINER_SELECTOR)
if container:
NAME_SELECTOR = '.solo-employer-header h5 ::text'
IND_SELECTOR = ".whitelabel-text-primary:contains('Industry') + ul li ::text"
JOB_SELECTOR = ".whitelabel-text-primary:contains('Job') + ul li ::text"
OPTCPT_SELECTOR = ".whitelabel-text-primary:contains('OPT/CPT') + ul li ::text"
VISA_SELECTOR = ".whitelabel-text-primary:contains('Sponsorship') + ul li ::text"
items['name'] = container.css(NAME_SELECTOR).get()
items['industry'] = container.css(IND_SELECTOR).extract() #extract multiple li element
items['job'] = container.css(JOB_SELECTOR).extract() #extract multiple li element
items['opt_cpt'] = container.css(OPTCPT_SELECTOR).get()
items['sponsorship'] = container.css(VISA_SELECTOR).get()
return items
| StarcoderdataPython |
1691853 | import os
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import rcParams
import pandas as pd
from scipy.spatial import distance
from scipy.cluster import hierarchy
from sklearn.cluster import AgglomerativeClustering
os.chdir('Chapter_5')
# %%
# import all sites
conditions = ["G", "_MF", "pMF"] # ,"SRR"]
total_file = "allm5C_libraries_filteredDepthAnno.csv"
total_df = pd.read_csv(total_file, low_memory=False) # file
total_df = total_df.sort_values(by=['position']) # sort
# %%
names = ['G1', 'G2', 'G3', 'G4', 'MF_rep1', 'MF_rep2', 'pMF_rep1', 'pMF_rep2', 'rep1',
'rep2', 'rep3', 'rep4']
# Aggregate methylation level for each condition
total_df.index = total_df['group']
cov_df = total_df.filter(regex='cov')
count_df = total_df.filter(regex='count')
cov_dict = {}
count_dict = {}
for name in conditions:
cov_dict[name] = cov_df.filter(regex=name).sum(axis=1)
count_dict[name] = count_df.filter(regex=name).sum(axis=1)
ML_dict = {}
for i, j in cov_dict.items():
ML_dict[i] = count_dict[i].divide(j, fill_value=0)
result_df = pd.DataFrame(ML_dict)
# result_df.dropna(axis=0, inplace=True, subset=['SRR','_MF','pMF'])
# result_df.replace(np.nan, 0, inplace=True)
# result_df.replace(0, np.nan, inplace=True)
result_df = result_df[(result_df['G'] > 0.1) | (result_df['_MF'] > 0.1) |
(result_df['pMF'] > 0.1)] # | (result_df['SRR'] > 0.1)]
result_df.dropna(axis=0, inplace=True)
test = total_df[total_df['group'].isin(result_df.index)]
# test.to_csv("AllConditionOverlap_methylationLevel.csv")
# %%
result_df_ML = total_df.filter(regex="methRate")
result_df_ML.replace(np.nan, 0, inplace=True)
cov_df.columns = names
count_df.columns = names
# %%
from matplotlib.colors import LinearSegmentedColormap
boundaries = [0.0, 0.05, 0.1, 0.2, 0.4, 0.6, 1.0]
hex_colors = sns.color_palette("RdYlBu_r", n_colors=len(boundaries) * 2).as_hex()
hex_colors = [hex_colors[i] for i in range(0, len(hex_colors), 2)]
colors = list(zip(boundaries, hex_colors))
custom_color_map = LinearSegmentedColormap.from_list(
name="cus",
colors=colors,
)
# %%
# Define clusters
correlations_array = np.asarray(result_df)
row_linkage = hierarchy.linkage(
distance.pdist(correlations_array), method='ward')
col_linkage = hierarchy.linkage(
distance.pdist(correlations_array.T), method='ward')
model = AgglomerativeClustering(n_clusters=8, affinity='euclidean', linkage='ward')
model = model.fit_predict(correlations_array)
# %%
lut = dict(zip(set(model), ['red', 'blue', 'green', 'orange', 'purple', 'pink', 'black', 'grey']))
row_colors = pd.DataFrame(model)[0].map(lut)
cg = sns.clustermap(result_df.reset_index(drop=True), row_linkage=row_linkage, col_linkage=col_linkage,
cmap=custom_color_map,
row_colors=row_colors, figsize=(5, 5), yticklabels=False, col_cluster=False,
robust=True, method='ward') # , row_cluster=False) # z_score=0,
cg.ax_row_dendrogram.set_visible(False)
# plt.savefig("ML_conditions_clusteringHeatmapDepth.png", bbox_inches='tight', dpi=400, transparent=True)
plt.show()
plt.close()
# %%
merge_df = result_df
merge_df['cluster'] = model
merge_df['group'] = result_df.index
merge_df.reset_index(drop=True)
cluster_df = pd.merge(merge_df.rename_axis(None), total_df.rename_axis(None), on='group')
cluster_gene_list = (cluster_df['gene_name'][cluster_df['cluster'] == 5]).unique()
cluster_file = open("Total_cluster_genes.txt", "w")
for i in cluster_gene_list:
cluster_file.write(i + '\n')
cluster_file.close()
# %%
from scipy.stats import zscore
# write correlation matrix (z-score)
zscore_vals = result_df.apply(zscore, axis=1)
# %%
from scipy import stats
# BH t-test
def BH_test(set1, set2):
# subset tests by relevant sites identified by 04a_OverlapDotplot.R
master_set = pd.read_csv('Dotplot_' + set1 + set2 + '_table.csv')
master_set = master_set.dropna(subset=['ML_1', 'ML_2']).reset_index()
count_set = {set1: master_set['C_count_' + set1], set2: master_set['C_count_' + set2]}
cov_set = {set1: master_set['cov_' + set1], set2: master_set['cov_' + set2]}
pvals = []
p_adj = []
try:
len(count_set[set1]) == len(cov_set[set1])
except:
print('data is not same size')
for i in range(len(count_set[set1])):
cont_table = pd.DataFrame({set1: [count_set[set1][i], cov_set[set1][i]],
set2: [count_set[set2][i], cov_set[set2][i]]})
odds, pvalue = stats.fisher_exact(cont_table)
pvals.append(pvalue)
pvals_sorted = sorted(pvals, key=float) # sorted pvalues
master_set['pval'] = pvals
master_set = master_set.sort_values('pval', ascending=True)
rank = 1
for p in pvals_sorted:
fdr_pval = p * len(pvals_sorted) / rank
rank += 1
p_adj.append(fdr_pval)
master_set['BH'] = p_adj
master_set['shape'] = np.where(master_set['BH'] <= 0.01, 'sig', 'non-sig')
return master_set
test_BH = pd.DataFrame(BH_test('G3', 'G4'))
# %%
rcParams['figure.figsize'] = 3, 3
markers = {"sig": "X", "non-sig": "o"}
palette = ['blue']
# ax = sns.scatterplot(data=test_BH[test_BH['BH'] > 0.01], x='ML_1', y='ML_2', style = 'shape',
# markers=markers, s=25)
sns.scatterplot(data=test_BH, x='ML_1', y='ML_2', style='shape', hue='shape', palette = palette,
markers=markers, s=25)
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.legend([], frameon=False)
plt.savefig("G3G4_DMS.png",bbox_inches='tight', dpi=400, transparent=True)
plt.show()
# %%
# Correlation matix of samples
from scipy.spatial import distance
from scipy.cluster import hierarchy
correlations = result_df.corr()
correlations_array = np.asarray(result_df.corr())
row_linkage = hierarchy.linkage(
distance.pdist(correlations_array), method='average')
col_linkage = hierarchy.linkage(
distance.pdist(correlations_array.T), method='average')
sns.clustermap(correlations, row_linkage=col_linkage, col_linkage=row_linkage, method="average",
figsize=(5, 10))
plt.show()
# %%
from matplotlib.colors import LinearSegmentedColormap
boundaries = [0.0, 0.05, 0.1, 0.2, 0.4, 0.6, 1.0]
hex_colors = sns.color_palette("RdBu_r", n_colors=len(boundaries) * 2 + 2).as_hex()
hex_colors = [hex_colors[i] for i in range(0, len(hex_colors), 2)]
colors = list(zip(boundaries, hex_colors))
custom_color_map = LinearSegmentedColormap.from_list(
name="cus",
colors=colors,
)
cg = sns.clustermap(result_df, annot=False, cmap=custom_color_map, dendrogram_ratio=(.1, .2),
figsize=(5, 5), yticklabels=False) # z_score=0,
cg.ax_row_dendrogram.set_visible(False)
plt.savefig("ML_conditions_clusteringHeatmapCcutoffDepth_noSRR.png", bbox_inches='tight', dpi=400, transparent=True)
plt.show()
plt.close()
| StarcoderdataPython |
1781973 | import unittest
from fun import greeter
class GreetingTests(unittest.TestCase):
def test_it_should_properly_greet_a_user(self):
cases = [
('<NAME>', 'Hello, <NAME>'),
('Class', 'Hello, Class')
]
for c in cases:
name = c[0]
expected = c[1]
actual = greeter.speak(name)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
38655 | <reponame>sebanie15/simple_clinic
"""Console script for simple_clinic."""
import sys
import click
class ActiveDoctor(object):
def __init__(self):
self.id = 0
active = click.make_pass_decorator(ActiveDoctor, ensure=True)
@click.group()
@click.option('--id', type=int, help='')
@active
def cli(active, id):
"""Console script for simple_clinic."""
active.id = id
return 0
@cli.command()
@active
def show_activated(active):
click.echo(f'Activated = {active.id}')
# click.echo(f'activated : {activated}')
@cli.command()
@click.option('--set_id', type=int)
@active
def set_activated(active, set_id):
active.id = set_id
@cli.command()
@active
def print_test(active):
print(active.id)
if __name__ == "__main__":
sys.exit(cli()) # pragma: no cover
| StarcoderdataPython |
1735796 | <reponame>iyanmv/galois<gh_stars>0
def add(x, y):
"""
Adds two Galois field arrays element-wise.
References
----------
* https://numpy.org/doc/stable/reference/generated/numpy.add.html
Examples
--------
.. ipython:: python
GF = galois.GF(31)
x = GF.Random(10); x
y = GF.Random(10); y
np.add(x, y)
x + y
"""
return
def subtract(x, y):
"""
Subtracts two Galois field arrays element-wise.
References
----------
* https://numpy.org/doc/stable/reference/generated/numpy.subtract.html
Examples
--------
.. ipython:: python
GF = galois.GF(31)
x = GF.Random(10); x
y = GF.Random(10); y
np.subtract(x, y)
x - y
"""
return
def multiply(x, y):
"""
Multiplies two Galois field arrays element-wise.
References
----------
* https://numpy.org/doc/stable/reference/generated/numpy.multiply.html
Examples
--------
Multiplying two Galois field arrays results in field multiplication.
.. ipython:: python
GF = galois.GF(31)
x = GF.Random(10); x
y = GF.Random(10); y
np.multiply(x, y)
x * y
Multiplying a Galois field array with an integer results in scalar multiplication.
.. ipython:: python
GF = galois.GF(31)
x = GF.Random(10); x
np.multiply(x, 3)
x * 3
.. ipython:: python
print(GF.properties)
# Adding `characteristic` copies of any element always results in zero
x * GF.characteristic
"""
return
def divide(x, y):
"""
Divides two Galois field arrays element-wise.
References
----------
* https://numpy.org/doc/stable/reference/generated/numpy.divide.html
Examples
--------
.. ipython:: python
GF = galois.GF(31)
x = GF.Random(10); x
y = GF.Random(10, low=1); y
z = np.divide(x, y); z
y * z
.. ipython:: python
np.true_divide(x, y)
x / y
np.floor_divide(x, y)
x // y
"""
return
def negative(x):
"""
Returns the element-wise additive inverse of a Galois field array.
References
----------
* https://numpy.org/doc/stable/reference/generated/numpy.negative.html
Examples
--------
.. ipython:: python
GF = galois.GF(31)
x = GF.Random(10); x
y = np.negative(x); y
x + y
.. ipython:: python
-x
-1*x
"""
return
def reciprocal(x):
"""
Returns the element-wise multiplicative inverse of a Galois field array.
References
----------
* https://numpy.org/doc/stable/reference/generated/numpy.reciprocal.html
Examples
--------
.. ipython:: python
GF = galois.GF(31)
x = GF.Random(5, low=1); x
y = np.reciprocal(x); y
x * y
.. ipython:: python
x ** -1
GF(1) / x
GF(1) // x
"""
return
def power(x, y):
"""
Exponentiates a Galois field array element-wise.
References
----------
* https://numpy.org/doc/stable/reference/generated/numpy.power.html
Examples
--------
.. ipython:: python
GF = galois.GF(31)
x = GF.Random(10); x
np.power(x, 3)
x ** 3
x * x * x
.. ipython:: python
x = GF.Random(10, low=1); x
y = np.random.randint(-10, 10, 10); y
np.power(x, y)
x ** y
"""
return
def square(x):
"""
Squares a Galois field array element-wise.
References
----------
* https://numpy.org/doc/stable/reference/generated/numpy.square.html
Examples
--------
.. ipython:: python
GF = galois.GF(31)
x = GF.Random(10); x
np.square(x)
x ** 2
x * x
"""
return
def log(x):
"""
Computes the logarithm (base `GF.primitive_element`) of a Galois field array element-wise.
Calling :func:`np.log` implicitly uses base :obj:`galois.FieldClass.primitive_element`. See
:func:`galois.FieldArray.log` for logarithm with arbitrary base.
References
----------
* https://numpy.org/doc/stable/reference/generated/numpy.log.html
Examples
--------
.. ipython:: python
GF = galois.GF(31)
alpha = GF.primitive_element; alpha
x = GF.Random(10, low=1); x
y = np.log(x); y
alpha ** y
"""
return
def sqrt(x):
"""
Computes the square root a Galois field array element-wise.
References
----------
* https://numpy.org/doc/stable/reference/generated/numpy.sqrt.html
Notes
-----
This function returns the lexicographically-minimal root :math:`r` (the root whose integer representation is smallest).
In addition to :math:`r`, :math:`-r` is also a root.
Examples
--------
.. ipython:: python
GF = galois.GF(31)
# Only the "quadratic residues" have square roots
x = GF.quadratic_residues; x
r = np.sqrt(x)
# Both roots in the finite field
r, -r
r**2
(-r)**2
"""
return
def matmul(x1, x2):
"""
Computes the matrix multiplication of two Galois field arrays.
References
----------
* https://numpy.org/doc/stable/reference/generated/numpy.log.html
Examples
--------
.. ipython:: python
GF = galois.GF(31)
x1 = GF.Random((3,4)); x1
x2 = GF.Random((4,5)); x2
np.matmul(x1, x2)
x1 @ x2
"""
return
| StarcoderdataPython |
11920 | from typing import Any, Dict, Tuple
import torch
from torch_geometric.nn import GATConv
from torch_sparse import SparseTensor, set_diag
from rgnn_at_scale.aggregation import ROBUST_MEANS
from rgnn_at_scale.models.gcn import GCN
class RGATConv(GATConv):
"""Extension of Pytorch Geometric's `GCNConv` to execute a robust aggregation function:
- soft_k_medoid
- soft_medoid (not scalable)
- k_medoid
- medoid (not scalable)
- dimmedian
Parameters
----------
mean : str, optional
The desired mean (see above for the options), by default 'soft_k_medoid'
mean_kwargs : Dict[str, Any], optional
Arguments for the mean, by default dict(k=64, temperature=1.0, with_weight_correction=True)
"""
def __init__(self, mean='soft_k_medoid',
mean_kwargs: Dict[str, Any] = dict(k=64, temperature=1.0, with_weight_correction=True),
**kwargs):
kwargs['in_channels'] = 2 * [kwargs['in_channels']]
super().__init__(**kwargs)
self._mean = ROBUST_MEANS[mean] if mean is not None else None
self._mean_kwargs = mean_kwargs
def forward(self, arguments: Tuple[torch.Tensor, SparseTensor] = None) -> torch.Tensor:
"""Predictions based on the input.
Parameters
----------
arguments : Sequence[torch.Tensor]
[x, edge indices] or [x, edge indices, edge weights], by default None
Returns
-------
torch.Tensor
the output of `GCNConv`.
Raises
------
NotImplementedError
if the arguments are not of length 2 or 3
"""
if len(arguments) == 2:
x, edge_index = arguments
edge_weight = None
elif len(arguments) == 3:
x, edge_index, edge_weight = arguments
else:
raise NotImplementedError("This method is just implemented for two or three arguments")
assert isinstance(edge_index, SparseTensor), 'GAT requires a SparseTensor as input'
assert edge_weight is None, 'The weights must be passed via a SparseTensor'
H, C = self.heads, self.out_channels
assert x.dim() == 2, 'Static graphs not supported in `GATConv`.'
x_l = x_r = self.lin_l(x).view(-1, H, C)
alpha_l = (x_l * self.att_l).sum(dim=-1)
alpha_r = (x_r * self.att_r).sum(dim=-1)
if self.add_self_loops:
edge_index = set_diag(edge_index)
# propagate_type: (x: OptPairTensor, alpha: OptPairTensor)
out = self.propagate(edge_index, x=(x_l, x_r),
alpha=(alpha_l, alpha_r))
alpha = self._alpha * edge_index.storage.value()[:, None]
self._alpha = None
if self.concat:
out = out.view(-1, self.heads * self.out_channels)
else:
out = out.mean(dim=1)
if self.bias is not None:
out += self.bias
attention_matrix = edge_index.set_value(alpha, layout='coo')
attention_matrix.storage._value = attention_matrix.storage._value.squeeze()
x = self.lin_l(x)
if self._mean is not None:
x = self._mean(attention_matrix, x, **self._mean_kwargs)
else:
x = attention_matrix @ x
x += self.bias
return x
class RGAT(GCN):
"""Generic Reliable Graph Neural Network (RGNN) implementation which currently supports a GCN architecture with the
aggregation functions:
- soft_k_medoid
- soft_medoid (not scalable)
- k_medoid
- medoid (not scalable)
- dimmedian
and with the adjacency preprocessings:
- SVD: <NAME>, <NAME>, <NAME>, and <NAME>. All you need is Low
(rank): Defending against adversarial attacks on graphs.
- GDC: <NAME>, <NAME>, and <NAME>. Diffusion Improves Graph Learning.
- Jaccard: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. Adversarial examples
for graph data: Deep insights into attack and defense.
Parameters
----------
mean : str, optional
The desired mean (see above for the options), by default 'soft_k_medoid'
mean_kwargs : Dict[str, Any], optional
Arguments for the mean, by default dict(k=64, temperature=1.0, with_weight_correction=True)
"""
def __init__(self,
mean: str = 'soft_k_medoid',
mean_kwargs: Dict[str, Any] = dict(k=64, temperature=1.0, with_weight_correction=True),
**kwargs):
self._mean_kwargs = dict(mean_kwargs)
self._mean = mean
super().__init__(**kwargs)
assert not self.do_checkpoint, 'Checkpointing is not supported'
def _build_conv_layer(self, in_channels: int, out_channels: int):
return RGATConv(mean=self._mean, mean_kwargs=self._mean_kwargs,
in_channels=in_channels, out_channels=out_channels)
def _cache_if_option_is_set(self, callback, x, edge_idx, edge_weight):
return SparseTensor.from_edge_index(edge_idx, edge_weight, (x.shape[0], x.shape[0])), None
| StarcoderdataPython |
3306324 | <reponame>stanionascu/python-embyapi
# coding: utf-8
"""
Emby Server API
Explore the Emby Server API # noqa: E501
OpenAPI spec version: 4.1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SyncModelSyncJobItem(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'job_id': 'int',
'item_id': 'int',
'item_name': 'str',
'media_source_id': 'str',
'media_source': 'MediaSourceInfo',
'target_id': 'str',
'output_path': 'str',
'status': 'str',
'progress': 'float',
'date_created': 'datetime',
'primary_image_item_id': 'int',
'primary_image_tag': 'str',
'temporary_path': 'str',
'additional_files': 'list[SyncModelItemFileInfo]',
'item_date_modified_ticks': 'int'
}
attribute_map = {
'id': 'Id',
'job_id': 'JobId',
'item_id': 'ItemId',
'item_name': 'ItemName',
'media_source_id': 'MediaSourceId',
'media_source': 'MediaSource',
'target_id': 'TargetId',
'output_path': 'OutputPath',
'status': 'Status',
'progress': 'Progress',
'date_created': 'DateCreated',
'primary_image_item_id': 'PrimaryImageItemId',
'primary_image_tag': 'PrimaryImageTag',
'temporary_path': 'TemporaryPath',
'additional_files': 'AdditionalFiles',
'item_date_modified_ticks': 'ItemDateModifiedTicks'
}
def __init__(self, id=None, job_id=None, item_id=None, item_name=None, media_source_id=None, media_source=None, target_id=None, output_path=None, status=None, progress=None, date_created=None, primary_image_item_id=None, primary_image_tag=None, temporary_path=None, additional_files=None, item_date_modified_ticks=None): # noqa: E501
"""SyncModelSyncJobItem - a model defined in Swagger""" # noqa: E501
self._id = None
self._job_id = None
self._item_id = None
self._item_name = None
self._media_source_id = None
self._media_source = None
self._target_id = None
self._output_path = None
self._status = None
self._progress = None
self._date_created = None
self._primary_image_item_id = None
self._primary_image_tag = None
self._temporary_path = None
self._additional_files = None
self._item_date_modified_ticks = None
self.discriminator = None
if id is not None:
self.id = id
if job_id is not None:
self.job_id = job_id
if item_id is not None:
self.item_id = item_id
if item_name is not None:
self.item_name = item_name
if media_source_id is not None:
self.media_source_id = media_source_id
if media_source is not None:
self.media_source = media_source
if target_id is not None:
self.target_id = target_id
if output_path is not None:
self.output_path = output_path
if status is not None:
self.status = status
if progress is not None:
self.progress = progress
if date_created is not None:
self.date_created = date_created
if primary_image_item_id is not None:
self.primary_image_item_id = primary_image_item_id
if primary_image_tag is not None:
self.primary_image_tag = primary_image_tag
if temporary_path is not None:
self.temporary_path = temporary_path
if additional_files is not None:
self.additional_files = additional_files
if item_date_modified_ticks is not None:
self.item_date_modified_ticks = item_date_modified_ticks
@property
def id(self):
"""Gets the id of this SyncModelSyncJobItem. # noqa: E501
:return: The id of this SyncModelSyncJobItem. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this SyncModelSyncJobItem.
:param id: The id of this SyncModelSyncJobItem. # noqa: E501
:type: int
"""
self._id = id
@property
def job_id(self):
"""Gets the job_id of this SyncModelSyncJobItem. # noqa: E501
:return: The job_id of this SyncModelSyncJobItem. # noqa: E501
:rtype: int
"""
return self._job_id
@job_id.setter
def job_id(self, job_id):
"""Sets the job_id of this SyncModelSyncJobItem.
:param job_id: The job_id of this SyncModelSyncJobItem. # noqa: E501
:type: int
"""
self._job_id = job_id
@property
def item_id(self):
"""Gets the item_id of this SyncModelSyncJobItem. # noqa: E501
:return: The item_id of this SyncModelSyncJobItem. # noqa: E501
:rtype: int
"""
return self._item_id
@item_id.setter
def item_id(self, item_id):
"""Sets the item_id of this SyncModelSyncJobItem.
:param item_id: The item_id of this SyncModelSyncJobItem. # noqa: E501
:type: int
"""
self._item_id = item_id
@property
def item_name(self):
"""Gets the item_name of this SyncModelSyncJobItem. # noqa: E501
:return: The item_name of this SyncModelSyncJobItem. # noqa: E501
:rtype: str
"""
return self._item_name
@item_name.setter
def item_name(self, item_name):
"""Sets the item_name of this SyncModelSyncJobItem.
:param item_name: The item_name of this SyncModelSyncJobItem. # noqa: E501
:type: str
"""
self._item_name = item_name
@property
def media_source_id(self):
"""Gets the media_source_id of this SyncModelSyncJobItem. # noqa: E501
:return: The media_source_id of this SyncModelSyncJobItem. # noqa: E501
:rtype: str
"""
return self._media_source_id
@media_source_id.setter
def media_source_id(self, media_source_id):
"""Sets the media_source_id of this SyncModelSyncJobItem.
:param media_source_id: The media_source_id of this SyncModelSyncJobItem. # noqa: E501
:type: str
"""
self._media_source_id = media_source_id
@property
def media_source(self):
"""Gets the media_source of this SyncModelSyncJobItem. # noqa: E501
:return: The media_source of this SyncModelSyncJobItem. # noqa: E501
:rtype: MediaSourceInfo
"""
return self._media_source
@media_source.setter
def media_source(self, media_source):
"""Sets the media_source of this SyncModelSyncJobItem.
:param media_source: The media_source of this SyncModelSyncJobItem. # noqa: E501
:type: MediaSourceInfo
"""
self._media_source = media_source
@property
def target_id(self):
"""Gets the target_id of this SyncModelSyncJobItem. # noqa: E501
:return: The target_id of this SyncModelSyncJobItem. # noqa: E501
:rtype: str
"""
return self._target_id
@target_id.setter
def target_id(self, target_id):
"""Sets the target_id of this SyncModelSyncJobItem.
:param target_id: The target_id of this SyncModelSyncJobItem. # noqa: E501
:type: str
"""
self._target_id = target_id
@property
def output_path(self):
"""Gets the output_path of this SyncModelSyncJobItem. # noqa: E501
:return: The output_path of this SyncModelSyncJobItem. # noqa: E501
:rtype: str
"""
return self._output_path
@output_path.setter
def output_path(self, output_path):
"""Sets the output_path of this SyncModelSyncJobItem.
:param output_path: The output_path of this SyncModelSyncJobItem. # noqa: E501
:type: str
"""
self._output_path = output_path
@property
def status(self):
"""Gets the status of this SyncModelSyncJobItem. # noqa: E501
:return: The status of this SyncModelSyncJobItem. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this SyncModelSyncJobItem.
:param status: The status of this SyncModelSyncJobItem. # noqa: E501
:type: str
"""
allowed_values = ["Queued", "Converting", "ReadyToTransfer", "Transferring", "Synced", "Failed"] # noqa: E501
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}" # noqa: E501
.format(status, allowed_values)
)
self._status = status
@property
def progress(self):
"""Gets the progress of this SyncModelSyncJobItem. # noqa: E501
:return: The progress of this SyncModelSyncJobItem. # noqa: E501
:rtype: float
"""
return self._progress
@progress.setter
def progress(self, progress):
"""Sets the progress of this SyncModelSyncJobItem.
:param progress: The progress of this SyncModelSyncJobItem. # noqa: E501
:type: float
"""
self._progress = progress
@property
def date_created(self):
"""Gets the date_created of this SyncModelSyncJobItem. # noqa: E501
:return: The date_created of this SyncModelSyncJobItem. # noqa: E501
:rtype: datetime
"""
return self._date_created
@date_created.setter
def date_created(self, date_created):
"""Sets the date_created of this SyncModelSyncJobItem.
:param date_created: The date_created of this SyncModelSyncJobItem. # noqa: E501
:type: datetime
"""
self._date_created = date_created
@property
def primary_image_item_id(self):
"""Gets the primary_image_item_id of this SyncModelSyncJobItem. # noqa: E501
:return: The primary_image_item_id of this SyncModelSyncJobItem. # noqa: E501
:rtype: int
"""
return self._primary_image_item_id
@primary_image_item_id.setter
def primary_image_item_id(self, primary_image_item_id):
"""Sets the primary_image_item_id of this SyncModelSyncJobItem.
:param primary_image_item_id: The primary_image_item_id of this SyncModelSyncJobItem. # noqa: E501
:type: int
"""
self._primary_image_item_id = primary_image_item_id
@property
def primary_image_tag(self):
"""Gets the primary_image_tag of this SyncModelSyncJobItem. # noqa: E501
:return: The primary_image_tag of this SyncModelSyncJobItem. # noqa: E501
:rtype: str
"""
return self._primary_image_tag
@primary_image_tag.setter
def primary_image_tag(self, primary_image_tag):
"""Sets the primary_image_tag of this SyncModelSyncJobItem.
:param primary_image_tag: The primary_image_tag of this SyncModelSyncJobItem. # noqa: E501
:type: str
"""
self._primary_image_tag = primary_image_tag
@property
def temporary_path(self):
"""Gets the temporary_path of this SyncModelSyncJobItem. # noqa: E501
:return: The temporary_path of this SyncModelSyncJobItem. # noqa: E501
:rtype: str
"""
return self._temporary_path
@temporary_path.setter
def temporary_path(self, temporary_path):
"""Sets the temporary_path of this SyncModelSyncJobItem.
:param temporary_path: The temporary_path of this SyncModelSyncJobItem. # noqa: E501
:type: str
"""
self._temporary_path = temporary_path
@property
def additional_files(self):
"""Gets the additional_files of this SyncModelSyncJobItem. # noqa: E501
:return: The additional_files of this SyncModelSyncJobItem. # noqa: E501
:rtype: list[SyncModelItemFileInfo]
"""
return self._additional_files
@additional_files.setter
def additional_files(self, additional_files):
"""Sets the additional_files of this SyncModelSyncJobItem.
:param additional_files: The additional_files of this SyncModelSyncJobItem. # noqa: E501
:type: list[SyncModelItemFileInfo]
"""
self._additional_files = additional_files
@property
def item_date_modified_ticks(self):
"""Gets the item_date_modified_ticks of this SyncModelSyncJobItem. # noqa: E501
:return: The item_date_modified_ticks of this SyncModelSyncJobItem. # noqa: E501
:rtype: int
"""
return self._item_date_modified_ticks
@item_date_modified_ticks.setter
def item_date_modified_ticks(self, item_date_modified_ticks):
"""Sets the item_date_modified_ticks of this SyncModelSyncJobItem.
:param item_date_modified_ticks: The item_date_modified_ticks of this SyncModelSyncJobItem. # noqa: E501
:type: int
"""
self._item_date_modified_ticks = item_date_modified_ticks
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SyncModelSyncJobItem, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SyncModelSyncJobItem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| StarcoderdataPython |
3611 | """Mobjects representing vector fields."""
__all__ = [
"VectorField",
"ArrowVectorField",
"StreamLines",
]
import itertools as it
import random
from math import ceil, floor
from typing import Callable, Iterable, Optional, Sequence, Tuple, Type
import numpy as np
from colour import Color
from PIL import Image
from .. import config
from ..animation.composition import AnimationGroup, Succession
from ..animation.creation import Create
from ..animation.indication import ShowPassingFlash
from ..animation.update import UpdateFromAlphaFunc
from ..constants import OUT, RIGHT, UP
from ..mobject.geometry import Vector
from ..mobject.mobject import Mobject
from ..mobject.types.vectorized_mobject import VGroup, VMobject
from ..utils.bezier import interpolate, inverse_interpolate
from ..utils.color import BLUE_E, GREEN, RED, YELLOW, color_to_rgb, rgb_to_color
from ..utils.deprecation import deprecated_params
from ..utils.rate_functions import ease_out_sine, linear
from ..utils.simple_functions import sigmoid
from .types.opengl_vectorized_mobject import OpenGLVMobject
DEFAULT_SCALAR_FIELD_COLORS: list = [BLUE_E, GREEN, YELLOW, RED]
class VectorField(VGroup):
"""A vector field.
Vector fields are based on a function defining a vector at every position.
This class does by default not include any visible elements but provides
methods to move other :class:`~.Mobject` s along the vector field.
Parameters
----------
func
The function defining the rate of change at every position of the `VectorField`.
color
The color of the vector field. If set, position-specific coloring is disabled.
color_scheme
A function mapping a vector to a single value. This value gives the position in the color gradient defined using `min_color_scheme_value`, `max_color_scheme_value` and `colors`.
min_color_scheme_value
The value of the color_scheme function to be mapped to the first color in `colors`. Lower values also result in the first color of the gradient.
max_color_scheme_value
The value of the color_scheme function to be mapped to the last color in `colors`. Higher values also result in the last color of the gradient.
colors
The colors defining the color gradient of the vector field.
kwargs : Any
Additional arguments to be passed to the :class:`~.VGroup` constructor
"""
def __init__(
self,
func: Callable[[np.ndarray], np.ndarray],
color: Optional[Color] = None,
color_scheme: Optional[Callable[[np.ndarray], float]] = None,
min_color_scheme_value: float = 0,
max_color_scheme_value: float = 2,
colors: Sequence[Color] = DEFAULT_SCALAR_FIELD_COLORS,
**kwargs
):
super().__init__(**kwargs)
self.func = func
if color is None:
self.single_color = False
if color_scheme is None:
def color_scheme(p):
return np.linalg.norm(p)
self.color_scheme = color_scheme # TODO maybe other default for direction?
self.rgbs = np.array(list(map(color_to_rgb, colors)))
def pos_to_rgb(pos: np.ndarray) -> Tuple[float, float, float, float]:
vec = self.func(pos)
color_value = np.clip(
self.color_scheme(vec),
min_color_scheme_value,
max_color_scheme_value,
)
alpha = inverse_interpolate(
min_color_scheme_value,
max_color_scheme_value,
color_value,
)
alpha *= len(self.rgbs) - 1
c1 = self.rgbs[int(alpha)]
c2 = self.rgbs[min(int(alpha + 1), len(self.rgbs) - 1)]
alpha %= 1
return interpolate(c1, c2, alpha)
self.pos_to_rgb = pos_to_rgb
self.pos_to_color = lambda pos: rgb_to_color(self.pos_to_rgb(pos))
else:
self.single_color = True
self.color = color
self.submob_movement_updater = None
@staticmethod
def shift_func(
func: Callable[[np.ndarray], np.ndarray],
shift_vector: np.ndarray,
) -> Callable[[np.ndarray], np.ndarray]:
"""Shift a vector field function.
Parameters
----------
func
The function defining a vector field.
shift_vector
The shift to be applied to the vector field.
Returns
-------
`Callable[[np.ndarray], np.ndarray]`
The shifted vector field function.
"""
return lambda p: func(p - shift_vector)
@staticmethod
def scale_func(
func: Callable[[np.ndarray], np.ndarray],
scalar: float,
) -> Callable[[np.ndarray], np.ndarray]:
"""Scale a vector field function.
Parameters
----------
func
The function defining a vector field.
shift_vector
The scalar to be applied to the vector field.
Examples
--------
.. manim:: ScaleVectorFieldFunction
class ScaleVectorFieldFunction(Scene):
def construct(self):
func = lambda pos: np.sin(pos[1]) * RIGHT + np.cos(pos[0]) * UP
vector_field = ArrowVectorField(func)
self.add(vector_field)
self.wait()
func = VectorField.scale_func(func, 0.5)
self.play(vector_field.animate.become(ArrowVectorField(func)))
self.wait()
Returns
-------
`Callable[[np.ndarray], np.ndarray]`
The scaled vector field function.
"""
return lambda p: func(p * scalar)
def nudge(
self,
mob: Mobject,
dt: float = 1,
substeps: int = 1,
pointwise: bool = False,
) -> "VectorField":
"""Nudge a :class:`~.Mobject` along the vector field.
Parameters
----------
mob
The mobject to move along the vector field
dt
A scalar to the amount the mobject is moved along the vector field.
The actual distance is based on the magnitude of the vector field.
substeps
The amount of steps the whole nudge is divided into. Higher values
give more accurate approximations.
pointwise
Whether to move the mobject along the vector field. If `False` the
vector field takes effect on the center of the given
:class:`~.Mobject`. If `True` the vector field takes effect on the
points of the individual points of the :class:`~.Mobject`,
potentially distorting it.
Returns
-------
VectorField
This vector field.
Examples
--------
.. manim:: Nudging
class Nudging(Scene):
def construct(self):
func = lambda pos: np.sin(pos[1] / 2) * RIGHT + np.cos(pos[0] / 2) * UP
vector_field = ArrowVectorField(
func, x_range=[-7, 7, 1], y_range=[-4, 4, 1], length_func=lambda x: x / 2
)
self.add(vector_field)
circle = Circle(radius=2).shift(LEFT)
self.add(circle.copy().set_color(GRAY))
dot = Dot().move_to(circle)
vector_field.nudge(circle, -2, 60, True)
vector_field.nudge(dot, -2, 60)
circle.add_updater(vector_field.get_nudge_updater(pointwise=True))
dot.add_updater(vector_field.get_nudge_updater())
self.add(circle, dot)
self.wait(6)
"""
def runge_kutta(self, p: Sequence[float], step_size: float) -> float:
"""Returns the change in position of a point along a vector field.
Parameters
----------
p
The position of each point being moved along the vector field.
step_size
A scalar that is used to determine how much a point is shifted in a single step.
Returns
-------
float
How much the point is shifted.
"""
k_1 = self.func(p)
k_2 = self.func(p + step_size * (k_1 * 0.5))
k_3 = self.func(p + step_size * (k_2 * 0.5))
k_4 = self.func(p + step_size * k_3)
return step_size / 6.0 * (k_1 + 2.0 * k_2 + 2.0 * k_3 + k_4)
step_size = dt / substeps
for _ in range(substeps):
if pointwise:
mob.apply_function(lambda p: p + runge_kutta(self, p, step_size))
else:
mob.shift(runge_kutta(self, mob.get_center(), step_size))
return self
def nudge_submobjects(
self,
dt: float = 1,
substeps: int = 1,
pointwise: bool = False,
) -> "VectorField":
"""Apply a nudge along the vector field to all submobjects.
Parameters
----------
dt
A scalar to the amount the mobject is moved along the vector field.
The actual distance is based on the magnitude of the vector field.
substeps
The amount of steps the whole nudge is divided into. Higher values
give more accurate approximations.
pointwise
Whether to move the mobject along the vector field. See :meth:`nudge` for details.
Returns
-------
VectorField
This vector field.
"""
for mob in self.submobjects:
self.nudge(mob, dt, substeps, pointwise)
return self
def get_nudge_updater(
self,
speed: float = 1,
pointwise: bool = False,
) -> Callable[[Mobject, float], Mobject]:
"""Get an update function to move a :class:`~.Mobject` along the vector field.
When used with :meth:`~.Mobject.add_updater`, the mobject will move along the vector field, where its speed is determined by the magnitude of the vector field.
Parameters
----------
speed
At `speed=1` the distance a mobject moves per second is equal to the magnitude of the vector field along its path. The speed value scales the speed of such a mobject.
pointwise
Whether to move the mobject along the vector field. See :meth:`nudge` for details.
Returns
-------
Callable[[Mobject, float], Mobject]
The update function.
"""
return lambda mob, dt: self.nudge(mob, dt * speed, pointwise=pointwise)
def start_submobject_movement(
self,
speed: float = 1,
pointwise: bool = False,
) -> "VectorField":
"""Start continuously moving all submobjects along the vector field.
Calling this method multiple times will result in removing the previous updater created by this method.
Parameters
----------
speed
The speed at which to move the submobjects. See :meth:`get_nudge_updater` for details.
pointwise
Whether to move the mobject along the vector field. See :meth:`nudge` for details.
Returns
-------
VectorField
This vector field.
"""
self.stop_submobject_movement()
self.submob_movement_updater = lambda mob, dt: mob.nudge_submobjects(
dt * speed,
pointwise=pointwise,
)
self.add_updater(self.submob_movement_updater)
return self
def stop_submobject_movement(self) -> "VectorField":
"""Stops the continuous movement started using :meth:`start_submobject_movement`.
Returns
-------
VectorField
This vector field.
"""
self.remove_updater(self.submob_movement_updater)
self.submob_movement_updater = None
return self
def get_colored_background_image(self, sampling_rate: int = 5) -> Image.Image:
"""Generate an image that displays the vector field.
The color at each position is calculated by passing the positing through a
series of steps:
Calculate the vector field function at that position, map that vector to a
single value using `self.color_scheme` and finally generate a color from
that value using the color gradient.
Parameters
----------
sampling_rate
The stepsize at which pixels get included in the image. Lower values give
more accurate results, but may take a long time to compute.
Returns
-------
Image.Imgae
The vector field image.
"""
if self.single_color:
raise ValueError(
"There is no point in generating an image if the vector field uses a single color.",
)
ph = int(config["pixel_height"] / sampling_rate)
pw = int(config["pixel_width"] / sampling_rate)
fw = config["frame_width"]
fh = config["frame_height"]
points_array = np.zeros((ph, pw, 3))
x_array = np.linspace(-fw / 2, fw / 2, pw)
y_array = np.linspace(fh / 2, -fh / 2, ph)
x_array = x_array.reshape((1, len(x_array)))
y_array = y_array.reshape((len(y_array), 1))
x_array = x_array.repeat(ph, axis=0)
y_array.repeat(pw, axis=1) # TODO why not y_array = y_array.repeat(...)?
points_array[:, :, 0] = x_array
points_array[:, :, 1] = y_array
rgbs = np.apply_along_axis(self.pos_to_rgb, 2, points_array)
return Image.fromarray((rgbs * 255).astype("uint8"))
def get_vectorized_rgba_gradient_function(
self,
start: float,
end: float,
colors: Iterable,
):
"""
Generates a gradient of rgbas as a numpy array
Parameters
----------
start
start value used for inverse interpolation at :func:`~.inverse_interpolate`
end
end value used for inverse interpolation at :func:`~.inverse_interpolate`
colors
list of colors to generate the gradient
Returns
-------
function to generate the gradients as numpy arrays representing rgba values
"""
rgbs = np.array([color_to_rgb(c) for c in colors])
def func(values, opacity=1):
alphas = inverse_interpolate(start, end, np.array(values))
alphas = np.clip(alphas, 0, 1)
scaled_alphas = alphas * (len(rgbs) - 1)
indices = scaled_alphas.astype(int)
next_indices = np.clip(indices + 1, 0, len(rgbs) - 1)
inter_alphas = scaled_alphas % 1
inter_alphas = inter_alphas.repeat(3).reshape((len(indices), 3))
result = interpolate(rgbs[indices], rgbs[next_indices], inter_alphas)
result = np.concatenate(
(result, np.full([len(result), 1], opacity)),
axis=1,
)
return result
return func
class ArrowVectorField(VectorField):
"""A :class:`VectorField` represented by a set of change vectors.
Vector fields are always based on a function defining the :class:`~.Vector` at every position.
The values of this functions is displayed as a grid of vectors.
By default the color of each vector is determined by it's magnitude.
Other color schemes can be used however.
Parameters
----------
func
The function defining the rate of change at every position of the vector field.
color
The color of the vector field. If set, position-specific coloring is disabled.
color_scheme
A function mapping a vector to a single value. This value gives the position in the color gradient defined using `min_color_scheme_value`, `max_color_scheme_value` and `colors`.
min_color_scheme_value
The value of the color_scheme function to be mapped to the first color in `colors`. Lower values also result in the first color of the gradient.
max_color_scheme_value
The value of the color_scheme function to be mapped to the last color in `colors`. Higher values also result in the last color of the gradient.
colors
The colors defining the color gradient of the vector field.
x_range
A sequence of x_min, x_max, delta_x
y_range
A sequence of y_min, y_max, delta_y
z_range
A sequence of z_min, z_max, delta_z
three_dimensions
Enables three_dimensions. Default set to False, automatically turns True if
z_range is not None.
length_func
The function determining the displayed size of the vectors. The actual size
of the vector is passed, the returned value will be used as display size for the
vector. By default this is used to cap the displayed size of vectors to reduce the clutter.
opacity
The opacity of the arrows.
vector_config
Additional arguments to be passed to the :class:`~.Vector` constructor
kwargs : Any
Additional arguments to be passed to the :class:`~.VGroup` constructor
Examples
--------
.. manim:: BasicUsage
:save_last_frame:
class BasicUsage(Scene):
def construct(self):
func = lambda pos: ((pos[0] * UR + pos[1] * LEFT) - pos) / 3
self.add(ArrowVectorField(func))
.. manim:: SizingAndSpacing
class SizingAndSpacing(Scene):
def construct(self):
func = lambda pos: np.sin(pos[0] / 2) * UR + np.cos(pos[1] / 2) * LEFT
vf = ArrowVectorField(func, x_range=[-7, 7, 1])
self.add(vf)
self.wait()
length_func = lambda x: x / 3
vf2 = ArrowVectorField(func, x_range=[-7, 7, 1], length_func=length_func)
self.play(vf.animate.become(vf2))
self.wait()
.. manim:: Coloring
:save_last_frame:
class Coloring(Scene):
def construct(self):
func = lambda pos: pos - LEFT * 5
colors = [RED, YELLOW, BLUE, DARK_GRAY]
min_radius = Circle(radius=2, color=colors[0]).shift(LEFT * 5)
max_radius = Circle(radius=10, color=colors[-1]).shift(LEFT * 5)
vf = ArrowVectorField(
func, min_color_scheme_value=2, max_color_scheme_value=10, colors=colors
)
self.add(vf, min_radius, max_radius)
"""
def __init__(
self,
func: Callable[[np.ndarray], np.ndarray],
color: Optional[Color] = None,
color_scheme: Optional[Callable[[np.ndarray], float]] = None,
min_color_scheme_value: float = 0,
max_color_scheme_value: float = 2,
colors: Sequence[Color] = DEFAULT_SCALAR_FIELD_COLORS,
# Determining Vector positions:
x_range: Sequence[float] = None,
y_range: Sequence[float] = None,
z_range: Sequence[float] = None,
three_dimensions: bool = False, # Automatically True if z_range is set
# Takes in actual norm, spits out displayed norm
length_func: Callable[[float], float] = lambda norm: 0.45 * sigmoid(norm),
opacity: float = 1.0,
vector_config: Optional[dict] = None,
**kwargs
):
self.x_range = x_range or [
floor(-config["frame_width"] / 2),
ceil(config["frame_width"] / 2),
]
self.y_range = y_range or [
floor(-config["frame_height"] / 2),
ceil(config["frame_height"] / 2),
]
self.ranges = [self.x_range, self.y_range]
if three_dimensions or z_range:
self.z_range = z_range or self.y_range.copy()
self.ranges += [self.z_range]
else:
self.ranges += [[0, 0]]
for i in range(len(self.ranges)):
if len(self.ranges[i]) == 2:
self.ranges[i] += [0.5]
self.ranges[i][1] += self.ranges[i][2]
self.x_range, self.y_range, self.z_range = self.ranges
super().__init__(
func,
color,
color_scheme,
min_color_scheme_value,
max_color_scheme_value,
colors,
**kwargs,
)
self.length_func = length_func
self.opacity = opacity
if vector_config is None:
vector_config = {}
self.vector_config = vector_config
self.func = func
x_range = np.arange(*self.x_range)
y_range = np.arange(*self.y_range)
z_range = np.arange(*self.z_range)
for x, y, z in it.product(x_range, y_range, z_range):
self.add(self.get_vector(x * RIGHT + y * UP + z * OUT))
self.set_opacity(self.opacity)
def get_vector(self, point: np.ndarray):
"""Creates a vector in the vector field.
The created vector is based on the function of the vector field and is
rooted in the given point. Color and length fit the specifications of
this vector field.
Parameters
----------
point
The root point of the vector.
kwargs : Any
Additional arguments to be passed to the :class:`~.Vector` constructor
"""
output = np.array(self.func(point))
norm = np.linalg.norm(output)
if norm != 0:
output *= self.length_func(norm) / norm
vect = Vector(output, **self.vector_config)
vect.shift(point)
if self.single_color:
vect.set_color(self.color)
else:
vect.set_color(self.pos_to_color(point))
return vect
class StreamLines(VectorField):
"""StreamLines represent the flow of a :class:`VectorField` using the trace of moving agents.
Vector fields are always based on a function defining the vector at every position.
The values of this functions is displayed by moving many agents along the vector field
and showing their trace.
Parameters
----------
func
The function defining the rate of change at every position of the vector field.
color
The color of the vector field. If set, position-specific coloring is disabled.
color_scheme
A function mapping a vector to a single value. This value gives the position in the color gradient defined using `min_color_scheme_value`, `max_color_scheme_value` and `colors`.
min_color_scheme_value
The value of the color_scheme function to be mapped to the first color in `colors`. Lower values also result in the first color of the gradient.
max_color_scheme_value
The value of the color_scheme function to be mapped to the last color in `colors`. Higher values also result in the last color of the gradient.
colors
The colors defining the color gradient of the vector field.
x_range
A sequence of x_min, x_max, delta_x
y_range
A sequence of y_min, y_max, delta_y
z_range
A sequence of z_min, z_max, delta_z
three_dimensions
Enables three_dimensions. Default set to False, automatically turns True if
z_range is not None.
noise_factor
The amount by which the starting position of each agent is altered along each axis. Defaults to :code:`delta_y / 2` if not defined.
n_repeats
The number of agents generated at each starting point.
dt
The factor by which the distance an agent moves per step is stretched. Lower values result in a better approximation of the trajectories in the vector field.
virtual_time
The time the agents get to move in the vector field. Higher values therefore result in longer stream lines. However, this whole time gets simulated upon creation.
max_anchors_per_line
The maximum number of anchors per line. Lines with more anchors get reduced in complexity, not in length.
padding
The distance agents can move out of the generation area before being terminated.
stroke_width
The stroke with of the stream lines.
opacity
The opacity of the stream lines.
Examples
--------
.. manim:: BasicUsage
:save_last_frame:
class BasicUsage(Scene):
def construct(self):
func = lambda pos: ((pos[0] * UR + pos[1] * LEFT) - pos) / 3
self.add(StreamLines(func))
.. manim:: SpawningAndFlowingArea
:save_last_frame:
class SpawningAndFlowingArea(Scene):
def construct(self):
func = lambda pos: np.sin(pos[0]) * UR + np.cos(pos[1]) * LEFT + pos / 5
stream_lines = StreamLines(
func, x_range=[-3, 3, 0.2], y_range=[-2, 2, 0.2], padding=1
)
spawning_area = Rectangle(width=6, height=4)
flowing_area = Rectangle(width=8, height=6)
labels = [Tex("Spawning Area"), Tex("Flowing Area").shift(DOWN * 2.5)]
for lbl in labels:
lbl.add_background_rectangle(opacity=0.6, buff=0.05)
self.add(stream_lines, spawning_area, flowing_area, *labels)
"""
def __init__(
self,
func: Callable[[np.ndarray], np.ndarray],
color: Optional[Color] = None,
color_scheme: Optional[Callable[[np.ndarray], float]] = None,
min_color_scheme_value: float = 0,
max_color_scheme_value: float = 2,
colors: Sequence[Color] = DEFAULT_SCALAR_FIELD_COLORS,
# Determining stream line starting positions:
x_range: Sequence[float] = None,
y_range: Sequence[float] = None,
z_range: Sequence[float] = None,
three_dimensions: bool = False,
noise_factor: Optional[float] = None,
n_repeats=1,
# Determining how lines are drawn
dt=0.05,
virtual_time=3,
max_anchors_per_line=100,
padding=3,
# Determining stream line appearance:
stroke_width=1,
opacity=1,
**kwargs
):
self.x_range = x_range or [
floor(-config["frame_width"] / 2),
ceil(config["frame_width"] / 2),
]
self.y_range = y_range or [
floor(-config["frame_height"] / 2),
ceil(config["frame_height"] / 2),
]
self.ranges = [self.x_range, self.y_range]
if three_dimensions or z_range:
self.z_range = z_range or self.y_range.copy()
self.ranges += [self.z_range]
else:
self.ranges += [[0, 0]]
for i in range(len(self.ranges)):
if len(self.ranges[i]) == 2:
self.ranges[i] += [0.5]
self.ranges[i][1] += self.ranges[i][2]
self.x_range, self.y_range, self.z_range = self.ranges
super().__init__(
func,
color,
color_scheme,
min_color_scheme_value,
max_color_scheme_value,
colors,
**kwargs,
)
self.noise_factor = (
noise_factor if noise_factor is not None else self.y_range[2] / 2
)
self.n_repeats = n_repeats
self.virtual_time = virtual_time
self.max_anchors_per_line = max_anchors_per_line
self.padding = padding
self.stroke_width = stroke_width
half_noise = self.noise_factor / 2
np.random.seed(0)
start_points = np.array(
[
(x - half_noise) * RIGHT
+ (y - half_noise) * UP
+ (z - half_noise) * OUT
+ self.noise_factor * np.random.random(3)
for n in range(self.n_repeats)
for x in np.arange(*self.x_range)
for y in np.arange(*self.y_range)
for z in np.arange(*self.z_range)
],
)
def outside_box(p):
return (
p[0] < self.x_range[0] - self.padding
or p[0] > self.x_range[1] + self.padding - self.x_range[2]
or p[1] < self.y_range[0] - self.padding
or p[1] > self.y_range[1] + self.padding - self.y_range[2]
or p[2] < self.z_range[0] - self.padding
or p[2] > self.z_range[1] + self.padding - self.z_range[2]
)
max_steps = ceil(virtual_time / dt) + 1
if not self.single_color:
self.background_img = self.get_colored_background_image()
if config["renderer"] == "opengl":
self.values_to_rgbas = self.get_vectorized_rgba_gradient_function(
min_color_scheme_value,
max_color_scheme_value,
colors,
)
for point in start_points:
points = [point]
for _ in range(max_steps):
last_point = points[-1]
new_point = last_point + dt * func(last_point)
if outside_box(new_point):
break
points.append(new_point)
step = max_steps
if not step:
continue
if config["renderer"] == "opengl":
line = OpenGLVMobject()
else:
line = VMobject()
line.duration = step * dt
step = max(1, int(len(points) / self.max_anchors_per_line))
line.set_points_smoothly(points[::step])
if self.single_color:
line.set_stroke(self.color)
else:
if config["renderer"] == "opengl":
# scaled for compatibility with cairo
line.set_stroke(width=self.stroke_width / 4.0)
norms = np.array(
[np.linalg.norm(self.func(point)) for point in line.points],
)
line.set_rgba_array_direct(
self.values_to_rgbas(norms, opacity),
name="stroke_rgba",
)
else:
if np.any(self.z_range != np.array([0, 0.5, 0.5])):
line.set_stroke(
[self.pos_to_color(p) for p in line.get_anchors()],
)
else:
line.color_using_background_image(self.background_img)
line.set_stroke(width=self.stroke_width, opacity=opacity)
self.add(line)
self.stream_lines = [*self.submobjects]
def create(
self,
lag_ratio: Optional[float] = None,
run_time: Optional[Callable[[float], float]] = None,
**kwargs
) -> AnimationGroup:
"""The creation animation of the stream lines.
The stream lines appear in random order.
Parameters
----------
lag_ratio
The lag ratio of the animation.
If undefined, it will be selected so that the total animation length is 1.5 times the run time of each stream line creation.
run_time
The run time of every single stream line creation. The runtime of the whole animation might be longer due to the `lag_ratio`.
If undefined, the virtual time of the stream lines is used as run time.
Returns
-------
:class:`~.AnimationGroup`
The creation animation of the stream lines.
Examples
--------
.. manim:: StreamLineCreation
class StreamLineCreation(Scene):
def construct(self):
func = lambda pos: (pos[0] * UR + pos[1] * LEFT) - pos
stream_lines = StreamLines(
func,
color=YELLOW,
x_range=[-7, 7, 1],
y_range=[-4, 4, 1],
stroke_width=3,
virtual_time=1, # use shorter lines
max_anchors_per_line=5, # better performance with fewer anchors
)
self.play(stream_lines.create()) # uses virtual_time as run_time
self.wait()
"""
if run_time is None:
run_time = self.virtual_time
if lag_ratio is None:
lag_ratio = run_time / 2 / len(self.submobjects)
animations = [
Create(line, run_time=run_time, **kwargs) for line in self.stream_lines
]
random.shuffle(animations)
return AnimationGroup(*animations, lag_ratio=lag_ratio)
def start_animation(
self,
warm_up=True,
flow_speed: float = 1,
time_width: float = 0.3,
rate_func: Callable[[float], float] = linear,
line_animation_class: Type[ShowPassingFlash] = ShowPassingFlash,
**kwargs
) -> None:
"""Animates the stream lines using an updater.
The stream lines will continuously flow
Parameters
----------
warm_up : bool, optional
If `True` the animation is initialized line by line. Otherwise it starts with all lines shown.
flow_speed
At `flow_speed=1` the distance the flow moves per second is equal to the magnitude of the vector field along its path. The speed value scales the speed of this flow.
time_width
The proportion of the stream line shown while being animated
rate_func
The rate function of each stream line flashing
line_animation_class
The animation class being used
Examples
--------
.. manim:: ContinuousMotion
class ContinuousMotion(Scene):
def construct(self):
func = lambda pos: np.sin(pos[0] / 2) * UR + np.cos(pos[1] / 2) * LEFT
stream_lines = StreamLines(func, stroke_width=3, max_anchors_per_line=30)
self.add(stream_lines)
stream_lines.start_animation(warm_up=False, flow_speed=1.5)
self.wait(stream_lines.virtual_time / stream_lines.flow_speed)
"""
for line in self.stream_lines:
run_time = line.duration / flow_speed
line.anim = line_animation_class(
line,
run_time=run_time,
rate_func=rate_func,
time_width=time_width,
**kwargs,
)
line.anim.begin()
line.time = random.random() * self.virtual_time
if warm_up:
line.time *= -1
self.add(line.anim.mobject)
def updater(mob, dt):
for line in mob.stream_lines:
line.time += dt * flow_speed
if line.time >= self.virtual_time:
line.time -= self.virtual_time
line.anim.interpolate(np.clip(line.time / line.anim.run_time, 0, 1))
self.add_updater(updater)
self.flow_animation = updater
self.flow_speed = flow_speed
self.time_width = time_width
def end_animation(self) -> AnimationGroup:
"""End the stream line animation smoothly.
Returns an animation resulting in fully displayed stream lines without a noticeable cut.
Returns
-------
:class:`~.AnimationGroup`
The animation fading out the running stream animation.
Raises
------
ValueError
if no stream line animation is running
Examples
--------
.. manim:: EndAnimation
class EndAnimation(Scene):
def construct(self):
func = lambda pos: np.sin(pos[0] / 2) * UR + np.cos(pos[1] / 2) * LEFT
stream_lines = StreamLines(
func, stroke_width=3, max_anchors_per_line=5, virtual_time=1, color=BLUE
)
self.add(stream_lines)
stream_lines.start_animation(warm_up=False, flow_speed=1.5, time_width=0.5)
self.wait(1)
self.play(stream_lines.end_animation())
"""
if self.flow_animation is None:
raise ValueError("You have to start the animation before fading it out.")
def hide_and_wait(mob, alpha):
if alpha == 0:
mob.set_stroke(opacity=0)
elif alpha == 1:
mob.set_stroke(opacity=1)
def finish_updater_cycle(line, alpha):
line.time += dt * self.flow_speed
line.anim.interpolate(min(line.time / line.anim.run_time, 1))
if alpha == 1:
self.remove(line.anim.mobject)
line.anim.finish()
max_run_time = self.virtual_time / self.flow_speed
creation_rate_func = ease_out_sine
creation_staring_speed = creation_rate_func(0.001) * 1000
creation_run_time = (
max_run_time / (1 + self.time_width) * creation_staring_speed
)
# creation_run_time is calculated so that the creation animation starts at the same speed
# as the regular line flash animation but eases out.
dt = 1 / config["frame_rate"]
animations = []
self.remove_updater(self.flow_animation)
self.flow_animation = None
for line in self.stream_lines:
create = Create(
line,
run_time=creation_run_time,
rate_func=creation_rate_func,
)
if line.time <= 0:
animations.append(
Succession(
UpdateFromAlphaFunc(
line,
hide_and_wait,
run_time=-line.time / self.flow_speed,
),
create,
),
)
self.remove(line.anim.mobject)
line.anim.finish()
else:
remaining_time = max_run_time - line.time / self.flow_speed
animations.append(
Succession(
UpdateFromAlphaFunc(
line,
finish_updater_cycle,
run_time=remaining_time,
),
create,
),
)
return AnimationGroup(*animations)
# TODO: Variant of StreamLines that is able to respond to changes in the vector field function
| StarcoderdataPython |
3325404 | <gh_stars>0
# 자료정리
# 장르별로 재생횟수를 정리해야겠지 ? > 그래야 어느 노래를 먼저 틀지 알 수 있으니까
# 장르내에서 순위를 매겨서 정리 > 어떤 자료형으로 어떻게 장르별 재생횟수를 기록할 것 인가?
import collections
def solution(genres, plays) :
count_max = collections.defaultdict(int)
rank_genre = []
rank_songs = []
answer = []
for ind, (genre, play) in enumerate(zip(genres, plays)) :
count_max[genre] += play
rank_songs.append((genre,[ind, play]))
for key, value in count_max.items() :
rank_genre.append((key, value))
rank_genre = sorted(rank_genre, key = lambda x : x[1], reverse= True)
rank_songs = sorted(rank_songs, key = lambda x : x[1][1], reverse= True)
for rank in rank_genre :
count = 0
for song in rank_songs :
if rank[0] == song[0] :
if count >= 2 :
break
else :
answer.append(song[1][0])
count += 1
return answer
def solution2(genres, plays) :
answer = []
# dict에 여러개의 원소를 넣는 하나의 방법으로 기억!!
# lambda 문 잘 쓰자!!
d = {e:[] for e in set(genres)}
for e in zip(genres, plays, range(len(plays))):
d[e[0]].append([e[1] , e[2]])
genresort = sorted(list(d.keys()), key = lambda x : sum(map(lambda y : y[0],d[x])), reverse=True)
for genre in genresort :
genrelist = sorted(d[genre], key = lambda x : (-x[0], x[1]))
for genre_count in genrelist[:2] :
answer.append(genre_count[1])
return answer
answer = solution2(['jazz', "classic", "pop", "classic", "classic", "pop"], [3000, 500, 600, 150, 800, 2500])
print(answer) | StarcoderdataPython |
72104 | <filename>GetProvince.py<gh_stars>1-10
# -*- coding: utf-8 -*-
import requests
import os
class GetProvince(object):
@staticmethod
def get_all():
url = "https://kyfw.12306.cn/otn/resources/js/framework/station_name.js?station_version=1.9044"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/"
"63.0.3239.132"" Safari/537.36"
}
response = requests.get(url=url, headers=headers)
if response.status_code == 200:
return response.text
else:
return None
@staticmethod
def get_province():
response = GetProvince.get_all().split("=")[1]
split_data_list = response.split("|")
split_data_list_total = len(split_data_list)
tail = 6 - (split_data_list_total % 5) + split_data_list_total
province_dict = dict()
for head in range(0, tail, 5):
try:
simplified, abbreviation = split_data_list[head+1: head+3]
province_dict[abbreviation] = simplified
except ValueError as e:
pass
# print(e)
# print("共{0}条, 当前查询{1}条, 抛出异常, 表示已查询完毕".format(split_data_list_total, head))
return province_dict
# 这里将来用数据库替换
@staticmethod
def output_province(path, content):
if path[-1] != os.sep:
path += "/"
with open(file=path + "province.txt", mode="w", encoding="utf-8") as wf:
wf.writelines(content)
if __name__ == '__main__':
gp = GetProvince()
content = gp.get_province()
print(content)
# gp.output_province(r"C:\Users\Administrator\Desktop", content)
| StarcoderdataPython |
1648004 | """
This file sets a parameter for the current scence.
"""
def set_param(args, scene_root, history_db, current_scene_db):
"""sets the parameter by making a sqlite call"""
from src.praxxis.sqlite import sqlite_parameter
from src.praxxis.display import display_param
from src.praxxis.util import error
if hasattr(args, "name"):
name = args.name
else:
name = args
if str(name).isdigit():
# checking if the user passed an ordinal instead of a string
try:
name = sqlite_parameter.get_param_by_ord(current_scene_db, int(name))
except error.ParamNotFoundError as e:
raise e
sqlite_parameter.set_param(current_scene_db, name, args.value)
display_param.display_set_param(name, args.value)
return args
| StarcoderdataPython |
3380937 | <filename>lib/optim/build.py<gh_stars>10-100
import torch.optim as optim
from .adamw import AdamW
from .adabound import AdaBound, AdaBoundW
from .asam import SAM, ASAM
def build_optimizer(model, args):
if args.optims == "sgd":
optimizer = optim.SGD(
filter(lambda p: p.requires_grad, model.parameters()),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
elif args.optims == "adam":
optimizer = optim.Adam(
filter(lambda p: p.requires_grad, model.parameters()),
lr=args.lr,
betas=(0.9, 0.999),
)
elif args.optims == "adamw":
optimizer = AdamW(
filter(lambda p: p.requires_grad, model.parameters()),
lr=args.lr,
betas=(0.9, 0.999),
)
elif args.optims == "nesterov":
optimizer = optim.SGD(
filter(lambda p: p.requires_grad, model.parameters()),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
nesterov=True,
)
elif args.optims == "adabound":
optimizer = AdaBound(
filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr
)
elif args.optims == "adaboundw":
optimizer = AdaBoundW(
filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr
)
elif args.optims == "sam":
opt = optim.SGD(
filter(lambda p: p.requires_grad, model.parameters()),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
optimizer = SAM(
optimizer=opt,
model=model,
rho=0.5,
eta=0,
)
elif args.optims == "asam":
opt = optim.SGD(
filter(lambda p: p.requires_grad, model.parameters()),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
optimizer = ASAM(
optimizer=opt,
model=model,
rho=0.5,
eta=0,
)
else:
raise "Not Implemented."
return optimizer
| StarcoderdataPython |
3253140 | """Class implementing meta-model for a Conv3D Layer."""
from typing import Dict
from tensorflow.keras.layers import (Activation, BatchNormalization, Conv3D,
Layer)
from .regularized_meta_layer import RegularizedMetaLayer
from ..utils import distributions
class Conv3DMetaLayer(RegularizedMetaLayer):
"""Class implementing meta-layer for tri-dimensional convolutional layers.
Private members
------------------------
_min_filters: int,
Minimum number of filters to use for the layer.
_max_filters: int,
Maximum number of filters to use for the layer.
_min_x_kernel_size: int,
Minimum size of the kernel on the lenght axis.
_max_x_kernel_size: int,
Maximum size of the kernel on the lenght axis.
_min_y_kernel_size: int,
Minimum size of the kernel on the depth axis.
_max_y_kernel_size: int,
Maximum size of the kernel on the depth axis.
_min_z_kernel_size: int,
Minimum size of the kernel on the height axis.
_max_z_kernel_size: int,
Maximum size of the kernel on the height axis.
_activation: str,
The activation function to use for the layer.
"""
def __init__(
self,
min_filters: int = 0,
max_filters: int = 256,
min_x_kernel_size: int = 1,
max_x_kernel_size: int = 5,
min_y_kernel_size: int = 1,
max_y_kernel_size: int = 5,
min_z_kernel_size: int = 1,
max_z_kernel_size: int = 5,
activation: str = "relu",
**kwargs: Dict
):
"""Create new Conv3DResidualLayer meta-model object.
Parameters
----------------------
min_filters: int = 0,
Minimum number of filters (neurons) in each layer.
If the tuning process passes 0, then the layer is skipped.
max_filters: int = 256,
Maximum number of filters (neurons) in each layer.
min_x_kernel_size: int = 1,
Minimum size of the kernel on the lenght axis.
max_x_kernel_size: int = 5,
Maximum size of the kernel on the lenght axis.
min_y_kernel_size: int = 1,
Minimum size of the kernel on the depth axis.
max_y_kernel_size: int = 5,
Maximum size of the kernel on the depth axis.
min_z_kernel_size: int = 1,
Minimum size of the kernel on the height axis.
max_z_kernel_size: int = 5,
Maximum size of the kernel on the height axis.
activation: str = "relu",
The activation function to use for the layer.
**kwargs: Dict,
Dictionary of keyword parameters to be passed to parent class.
"""
super().__init__(**kwargs)
self._min_filters = min_filters
self._max_filters = max_filters
self._min_x_kernel_size = min_x_kernel_size
self._max_x_kernel_size = max_x_kernel_size
self._min_y_kernel_size = min_y_kernel_size
self._max_y_kernel_size = max_y_kernel_size
self._min_z_kernel_size = min_z_kernel_size
self._max_z_kernel_size = max_z_kernel_size
self._activation = activation
def _space(self) -> Dict:
"""Return hyper parameters of the layer."""
return {
"filters": (distributions.integer, self._min_filters, self._max_filters),
"x_kernel_size": (distributions.integer, self._min_x_kernel_size, self._max_x_kernel_size),
"y_kernel_size": (distributions.integer, self._min_y_kernel_size, self._max_y_kernel_size),
"z_kernel_size": (distributions.integer, self._min_z_kernel_size, self._max_z_kernel_size),
**super()._space()
}
def _build(
self,
input_layers: Layer,
filters: int,
x_kernel_size: int,
y_kernel_size: int,
z_kernel_size: int,
strides: int = (1, 1, 1),
**kwargs: Dict
) -> Layer:
"""Return built Conv3D layer block.
If the given filters number is equal to 0, the layer is skipped.
Parameters
--------------------------
input_layers: Layer,
The input layer of the current layer.
filters: int,
The number of neurons of the layer.
x_kernel_size: int,
The dimension of the kernel for the layer, on the length axis.
y_kernel_size: int,
The dimension of the kernel for the layer, on the depth axis.
z_kernel_size: int,
The dimension of the kernel for the layer, on the height axis.
strides: int = (1, 1),
Strides for the convolutional layer.
**kwargs: Dict,
The kwargs to pass to the kernel regularizers.
Returns
--------------------------
Output layer of the block.
"""
filters = round(filters)
x_kernel_size = round(x_kernel_size)
y_kernel_size = round(y_kernel_size)
z_kernel_size = round(z_kernel_size)
if filters == 0:
return input_layers
layer = Conv3D(
filters=filters,
kernel_size=(x_kernel_size, y_kernel_size, z_kernel_size),
strides=strides,
padding="same",
**self._build_regularizers(**kwargs)
)(input_layers)
if self._batch_normalization:
layer = BatchNormalization()(layer)
activation = Activation(self._activation)(layer)
return activation
| StarcoderdataPython |
118802 | # Author: <NAME>
# Davenport Lab - Penn State University
# Date: 9-2-2021
from src import *
| StarcoderdataPython |
84466 | <filename>third_party/sqlalchemy_0_7_1/sqlalchemy/ext/declarative.py
# ext/declarative.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Synopsis
========
SQLAlchemy object-relational configuration involves the
combination of :class:`.Table`, :func:`.mapper`, and class
objects to define a mapped class.
:mod:`~sqlalchemy.ext.declarative` allows all three to be
expressed at once within the class declaration. As much as
possible, regular SQLAlchemy schema and ORM constructs are
used directly, so that configuration between "classical" ORM
usage and declarative remain highly similar.
As a simple example::
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class SomeClass(Base):
__tablename__ = 'some_table'
id = Column(Integer, primary_key=True)
name = Column(String(50))
Above, the :func:`declarative_base` callable returns a new base class from
which all mapped classes should inherit. When the class definition is
completed, a new :class:`.Table` and
:func:`.mapper` will have been generated.
The resulting table and mapper are accessible via
``__table__`` and ``__mapper__`` attributes on the
``SomeClass`` class::
# access the mapped Table
SomeClass.__table__
# access the Mapper
SomeClass.__mapper__
Defining Attributes
===================
In the previous example, the :class:`.Column` objects are
automatically named with the name of the attribute to which they are
assigned.
To name columns explicitly with a name distinct from their mapped attribute,
just give the column a name. Below, column "some_table_id" is mapped to the
"id" attribute of `SomeClass`, but in SQL will be represented as "some_table_id"::
class SomeClass(Base):
__tablename__ = 'some_table'
id = Column("some_table_id", Integer, primary_key=True)
Attributes may be added to the class after its construction, and they will be
added to the underlying :class:`.Table` and
:func:`.mapper()` definitions as appropriate::
SomeClass.data = Column('data', Unicode)
SomeClass.related = relationship(RelatedInfo)
Classes which are constructed using declarative can interact freely
with classes that are mapped explicitly with :func:`mapper`.
It is recommended, though not required, that all tables
share the same underlying :class:`~sqlalchemy.schema.MetaData` object,
so that string-configured :class:`~sqlalchemy.schema.ForeignKey`
references can be resolved without issue.
Accessing the MetaData
=======================
The :func:`declarative_base` base class contains a
:class:`.MetaData` object where newly defined
:class:`.Table` objects are collected. This object is
intended to be accessed directly for
:class:`.MetaData`-specific operations. Such as, to issue
CREATE statements for all tables::
engine = create_engine('sqlite://')
Base.metadata.create_all(engine)
The usual techniques of associating :class:`.MetaData:` with :class:`.Engine`
apply, such as assigning to the ``bind`` attribute::
Base.metadata.bind = create_engine('sqlite://')
To associate the engine with the :func:`declarative_base` at time
of construction, the ``bind`` argument is accepted::
Base = declarative_base(bind=create_engine('sqlite://'))
:func:`declarative_base` can also receive a pre-existing
:class:`.MetaData` object, which allows a
declarative setup to be associated with an already
existing traditional collection of :class:`~sqlalchemy.schema.Table`
objects::
mymetadata = MetaData()
Base = declarative_base(metadata=mymetadata)
Configuring Relationships
=========================
Relationships to other classes are done in the usual way, with the added
feature that the class specified to :func:`~sqlalchemy.orm.relationship`
may be a string name. The "class registry" associated with ``Base``
is used at mapper compilation time to resolve the name into the actual
class object, which is expected to have been defined once the mapper
configuration is used::
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String(50))
addresses = relationship("Address", backref="user")
class Address(Base):
__tablename__ = 'addresses'
id = Column(Integer, primary_key=True)
email = Column(String(50))
user_id = Column(Integer, ForeignKey('users.id'))
Column constructs, since they are just that, are immediately usable,
as below where we define a primary join condition on the ``Address``
class using them::
class Address(Base):
__tablename__ = 'addresses'
id = Column(Integer, primary_key=True)
email = Column(String(50))
user_id = Column(Integer, ForeignKey('users.id'))
user = relationship(User, primaryjoin=user_id == User.id)
In addition to the main argument for :func:`~sqlalchemy.orm.relationship`,
other arguments which depend upon the columns present on an as-yet
undefined class may also be specified as strings. These strings are
evaluated as Python expressions. The full namespace available within
this evaluation includes all classes mapped for this declarative base,
as well as the contents of the ``sqlalchemy`` package, including
expression functions like :func:`~sqlalchemy.sql.expression.desc` and
:attr:`~sqlalchemy.sql.expression.func`::
class User(Base):
# ....
addresses = relationship("Address",
order_by="desc(Address.email)",
primaryjoin="Address.user_id==User.id")
As an alternative to string-based attributes, attributes may also be
defined after all classes have been created. Just add them to the target
class after the fact::
User.addresses = relationship(Address,
primaryjoin=Address.user_id==User.id)
Configuring Many-to-Many Relationships
======================================
Many-to-many relationships are also declared in the same way
with declarative as with traditional mappings. The
``secondary`` argument to
:func:`.relationship` is as usual passed a
:class:`.Table` object, which is typically declared in the
traditional way. The :class:`.Table` usually shares
the :class:`.MetaData` object used by the declarative base::
keywords = Table(
'keywords', Base.metadata,
Column('author_id', Integer, ForeignKey('authors.id')),
Column('keyword_id', Integer, ForeignKey('keywords.id'))
)
class Author(Base):
__tablename__ = 'authors'
id = Column(Integer, primary_key=True)
keywords = relationship("Keyword", secondary=keywords)
As with traditional mapping, its generally not a good idea to use
a :class:`.Table` as the "secondary" argument which is also mapped to
a class, unless the :class:`.relationship` is declared with ``viewonly=True``.
Otherwise, the unit-of-work system may attempt duplicate INSERT and
DELETE statements against the underlying table.
.. _declarative_sql_expressions:
Defining SQL Expressions
========================
The usage of :func:`.column_property` with Declarative to define
load-time, mapped SQL expressions is
pretty much the same as that described in
:ref:`mapper_sql_expressions`. Local columns within the same
class declaration can be referenced directly::
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
firstname = Column(String)
lastname = Column(String)
fullname = column_property(
firstname + " " + lastname
)
Correlated subqueries reference the :class:`.Column` objects they
need either from the local class definition or from remote
classes::
from sqlalchemy.sql import func
class Address(Base):
__tablename__ = 'address'
id = Column('id', Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'))
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String)
address_count = column_property(
select([func.count(Address.id)]).\\
where(Address.user_id==id)
)
In the case that the ``address_count`` attribute above doesn't have access to
``Address`` when ``User`` is defined, the ``address_count`` attribute should
be added to ``User`` when both ``User`` and ``Address`` are available (i.e.
there is no string based "late compilation" feature like there is with
:func:`.relationship` at this time). Note we reference the ``id`` column
attribute of ``User`` with its class when we are no longer in the declaration
of the ``User`` class::
User.address_count = column_property(
select([func.count(Address.id)]).\\
where(Address.user_id==User.id)
)
Table Configuration
===================
Table arguments other than the name, metadata, and mapped Column
arguments are specified using the ``__table_args__`` class attribute.
This attribute accommodates both positional as well as keyword
arguments that are normally sent to the
:class:`~sqlalchemy.schema.Table` constructor.
The attribute can be specified in one of two forms. One is as a
dictionary::
class MyClass(Base):
__tablename__ = 'sometable'
__table_args__ = {'mysql_engine':'InnoDB'}
The other, a tuple, where each argument is positional
(usually constraints)::
class MyClass(Base):
__tablename__ = 'sometable'
__table_args__ = (
ForeignKeyConstraint(['id'], ['remote_table.id']),
UniqueConstraint('foo'),
)
Keyword arguments can be specified with the above form by
specifying the last argument as a dictionary::
class MyClass(Base):
__tablename__ = 'sometable'
__table_args__ = (
ForeignKeyConstraint(['id'], ['remote_table.id']),
UniqueConstraint('foo'),
{'autoload':True}
)
Using a Hybrid Approach with __table__
=======================================
As an alternative to ``__tablename__``, a direct
:class:`~sqlalchemy.schema.Table` construct may be used. The
:class:`~sqlalchemy.schema.Column` objects, which in this case require
their names, will be added to the mapping just like a regular mapping
to a table::
class MyClass(Base):
__table__ = Table('my_table', Base.metadata,
Column('id', Integer, primary_key=True),
Column('name', String(50))
)
``__table__`` provides a more focused point of control for establishing
table metadata, while still getting most of the benefits of using declarative.
An application that uses reflection might want to load table metadata elsewhere
and simply pass it to declarative classes::
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
Base.metadata.reflect(some_engine)
class User(Base):
__table__ = metadata.tables['user']
class Address(Base):
__table__ = metadata.tables['address']
Some configuration schemes may find it more appropriate to use ``__table__``,
such as those which already take advantage of the data-driven nature of
:class:`.Table` to customize and/or automate schema definition. See
the wiki example `NamingConventions <http://www.sqlalchemy.org/trac/wiki/UsageRecipes/NamingConventions>`_
for one such example.
Mapper Configuration
====================
Declarative makes use of the :func:`~.orm.mapper` function internally
when it creates the mapping to the declared table. The options
for :func:`~.orm.mapper` are passed directly through via the ``__mapper_args__``
class attribute. As always, arguments which reference locally
mapped columns can reference them directly from within the
class declaration::
from datetime import datetime
class Widget(Base):
__tablename__ = 'widgets'
id = Column(Integer, primary_key=True)
timestamp = Column(DateTime, nullable=False)
__mapper_args__ = {
'version_id_col': timestamp,
'version_id_generator': lambda v:datetime.now()
}
.. _declarative_inheritance:
Inheritance Configuration
=========================
Declarative supports all three forms of inheritance as intuitively
as possible. The ``inherits`` mapper keyword argument is not needed
as declarative will determine this from the class itself. The various
"polymorphic" keyword arguments are specified using ``__mapper_args__``.
Joined Table Inheritance
~~~~~~~~~~~~~~~~~~~~~~~~
Joined table inheritance is defined as a subclass that defines its own
table::
class Person(Base):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__tablename__ = 'engineers'
__mapper_args__ = {'polymorphic_identity': 'engineer'}
id = Column(Integer, ForeignKey('people.id'), primary_key=True)
primary_language = Column(String(50))
Note that above, the ``Engineer.id`` attribute, since it shares the
same attribute name as the ``Person.id`` attribute, will in fact
represent the ``people.id`` and ``engineers.id`` columns together, and
will render inside a query as ``"people.id"``.
To provide the ``Engineer`` class with an attribute that represents
only the ``engineers.id`` column, give it a different attribute name::
class Engineer(Person):
__tablename__ = 'engineers'
__mapper_args__ = {'polymorphic_identity': 'engineer'}
engineer_id = Column('id', Integer, ForeignKey('people.id'),
primary_key=True)
primary_language = Column(String(50))
Single Table Inheritance
~~~~~~~~~~~~~~~~~~~~~~~~
Single table inheritance is defined as a subclass that does not have
its own table; you just leave out the ``__table__`` and ``__tablename__``
attributes::
class Person(Base):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column(String(50))
When the above mappers are configured, the ``Person`` class is mapped
to the ``people`` table *before* the ``primary_language`` column is
defined, and this column will not be included in its own mapping.
When ``Engineer`` then defines the ``primary_language`` column, the
column is added to the ``people`` table so that it is included in the
mapping for ``Engineer`` and is also part of the table's full set of
columns. Columns which are not mapped to ``Person`` are also excluded
from any other single or joined inheriting classes using the
``exclude_properties`` mapper argument. Below, ``Manager`` will have
all the attributes of ``Person`` and ``Manager`` but *not* the
``primary_language`` attribute of ``Engineer``::
class Manager(Person):
__mapper_args__ = {'polymorphic_identity': 'manager'}
golf_swing = Column(String(50))
The attribute exclusion logic is provided by the
``exclude_properties`` mapper argument, and declarative's default
behavior can be disabled by passing an explicit ``exclude_properties``
collection (empty or otherwise) to the ``__mapper_args__``.
Concrete Table Inheritance
~~~~~~~~~~~~~~~~~~~~~~~~~~
Concrete is defined as a subclass which has its own table and sets the
``concrete`` keyword argument to ``True``::
class Person(Base):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
name = Column(String(50))
class Engineer(Person):
__tablename__ = 'engineers'
__mapper_args__ = {'concrete':True}
id = Column(Integer, primary_key=True)
primary_language = Column(String(50))
name = Column(String(50))
Usage of an abstract base class is a little less straightforward as it
requires usage of :func:`~sqlalchemy.orm.util.polymorphic_union`::
engineers = Table('engineers', Base.metadata,
Column('id', Integer, primary_key=True),
Column('name', String(50)),
Column('primary_language', String(50))
)
managers = Table('managers', Base.metadata,
Column('id', Integer, primary_key=True),
Column('name', String(50)),
Column('golf_swing', String(50))
)
punion = polymorphic_union({
'engineer':engineers,
'manager':managers
}, 'type', 'punion')
class Person(Base):
__table__ = punion
__mapper_args__ = {'polymorphic_on':punion.c.type}
class Engineer(Person):
__table__ = engineers
__mapper_args__ = {'polymorphic_identity':'engineer', 'concrete':True}
class Manager(Person):
__table__ = managers
__mapper_args__ = {'polymorphic_identity':'manager', 'concrete':True}
Mixin Classes
==============
A common need when using :mod:`~sqlalchemy.ext.declarative` is to
share some functionality, often a set of columns, across many
classes. The normal Python idiom would be to put this common code into
a base class and have all the other classes subclass this class.
When using :mod:`~sqlalchemy.ext.declarative`, this need is met by
using a "mixin class". A mixin class is one that isn't mapped to a
table and doesn't subclass the declarative :class:`.Base`. For example::
class MyMixin(object):
__table_args__ = {'mysql_engine': 'InnoDB'}
__mapper_args__= {'always_refresh': True}
id = Column(Integer, primary_key=True)
class MyModel(Base,MyMixin):
__tablename__ = 'test'
name = Column(String(1000))
Where above, the class ``MyModel`` will contain an "id" column
as well as ``__table_args__`` and ``__mapper_args__`` defined
by the ``MyMixin`` mixin class.
Mixing in Columns
~~~~~~~~~~~~~~~~~
The most basic way to specify a column on a mixin is by simple
declaration::
class TimestampMixin(object):
created_at = Column(DateTime, default=func.now())
class MyModel(Base, TimestampMixin):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
name = Column(String(1000))
Where above, all declarative classes that include ``TimestampMixin``
will also have a column ``created_at`` that applies a timestamp to
all row insertions.
Those familiar with the SQLAlchemy expression language know that
the object identity of clause elements defines their role in a schema.
Two ``Table`` objects ``a`` and ``b`` may both have a column called
``id``, but the way these are differentiated is that ``a.c.id``
and ``b.c.id`` are two distinct Python objects, referencing their
parent tables ``a`` and ``b`` respectively.
In the case of the mixin column, it seems that only one
:class:`.Column` object is explicitly created, yet the ultimate
``created_at`` column above must exist as a distinct Python object
for each separate destination class. To accomplish this, the declarative
extension creates a **copy** of each :class:`.Column` object encountered on
a class that is detected as a mixin.
This copy mechanism is limited to simple columns that have no foreign
keys, as a :class:`.ForeignKey` itself contains references to columns
which can't be properly recreated at this level. For columns that
have foreign keys, as well as for the variety of mapper-level constructs
that require destination-explicit context, the
:func:`~.declared_attr` decorator (renamed from ``sqlalchemy.util.classproperty`` in 0.6.5)
is provided so that
patterns common to many classes can be defined as callables::
from sqlalchemy.ext.declarative import declared_attr
class ReferenceAddressMixin(object):
@declared_attr
def address_id(cls):
return Column(Integer, ForeignKey('address.id'))
class User(Base, ReferenceAddressMixin):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
Where above, the ``address_id`` class-level callable is executed at the
point at which the ``User`` class is constructed, and the declarative
extension can use the resulting :class:`.Column` object as returned by
the method without the need to copy it.
Columns generated by :func:`~.declared_attr` can also be
referenced by ``__mapper_args__`` to a limited degree, currently
by ``polymorphic_on`` and ``version_id_col``, by specifying the
classdecorator itself into the dictionary - the declarative extension
will resolve them at class construction time::
class MyMixin:
@declared_attr
def type_(cls):
return Column(String(50))
__mapper_args__= {'polymorphic_on':type_}
class MyModel(Base,MyMixin):
__tablename__='test'
id = Column(Integer, primary_key=True)
Mixing in Relationships
~~~~~~~~~~~~~~~~~~~~~~~
Relationships created by :func:`~sqlalchemy.orm.relationship` are provided
with declarative mixin classes exclusively using the
:func:`.declared_attr` approach, eliminating any ambiguity
which could arise when copying a relationship and its possibly column-bound
contents. Below is an example which combines a foreign key column and a
relationship so that two classes ``Foo`` and ``Bar`` can both be configured to
reference a common target class via many-to-one::
class RefTargetMixin(object):
@declared_attr
def target_id(cls):
return Column('target_id', ForeignKey('target.id'))
@declared_attr
def target(cls):
return relationship("Target")
class Foo(Base, RefTargetMixin):
__tablename__ = 'foo'
id = Column(Integer, primary_key=True)
class Bar(Base, RefTargetMixin):
__tablename__ = 'bar'
id = Column(Integer, primary_key=True)
class Target(Base):
__tablename__ = 'target'
id = Column(Integer, primary_key=True)
:func:`~sqlalchemy.orm.relationship` definitions which require explicit
primaryjoin, order_by etc. expressions should use the string forms
for these arguments, so that they are evaluated as late as possible.
To reference the mixin class in these expressions, use the given ``cls``
to get it's name::
class RefTargetMixin(object):
@declared_attr
def target_id(cls):
return Column('target_id', ForeignKey('target.id'))
@declared_attr
def target(cls):
return relationship("Target",
primaryjoin="Target.id==%s.target_id" % cls.__name__
)
Mixing in deferred(), column_property(), etc.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Like :func:`~sqlalchemy.orm.relationship`, all
:class:`~sqlalchemy.orm.interfaces.MapperProperty` subclasses such as
:func:`~sqlalchemy.orm.deferred`, :func:`~sqlalchemy.orm.column_property`,
etc. ultimately involve references to columns, and therefore, when
used with declarative mixins, have the :func:`.declared_attr`
requirement so that no reliance on copying is needed::
class SomethingMixin(object):
@declared_attr
def dprop(cls):
return deferred(Column(Integer))
class Something(Base, SomethingMixin):
__tablename__ = "something"
Controlling table inheritance with mixins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The ``__tablename__`` attribute in conjunction with the hierarchy of
classes involved in a declarative mixin scenario controls what type of
table inheritance, if any,
is configured by the declarative extension.
If the ``__tablename__`` is computed by a mixin, you may need to
control which classes get the computed attribute in order to get the
type of table inheritance you require.
For example, if you had a mixin that computes ``__tablename__`` but
where you wanted to use that mixin in a single table inheritance
hierarchy, you can explicitly specify ``__tablename__`` as ``None`` to
indicate that the class should not have a table mapped::
from sqlalchemy.ext.declarative import declared_attr
class Tablename:
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
class Person(Base,Tablename):
id = Column(Integer, primary_key=True)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__tablename__ = None
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column(String(50))
Alternatively, you can make the mixin intelligent enough to only
return a ``__tablename__`` in the event that no table is already
mapped in the inheritance hierarchy. To help with this, a
:func:`~sqlalchemy.ext.declarative.has_inherited_table` helper
function is provided that returns ``True`` if a parent class already
has a mapped table.
As an example, here's a mixin that will only allow single table
inheritance::
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.declarative import has_inherited_table
class Tablename:
@declared_attr
def __tablename__(cls):
if has_inherited_table(cls):
return None
return cls.__name__.lower()
class Person(Base,Tablename):
id = Column(Integer, primary_key=True)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
primary_language = Column(String(50))
__mapper_args__ = {'polymorphic_identity': 'engineer'}
If you want to use a similar pattern with a mix of single and joined
table inheritance, you would need a slightly different mixin and use
it on any joined table child classes in addition to their parent
classes::
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.declarative import has_inherited_table
class Tablename:
@declared_attr
def __tablename__(cls):
if (has_inherited_table(cls) and
Tablename not in cls.__bases__):
return None
return cls.__name__.lower()
class Person(Base,Tablename):
id = Column(Integer, primary_key=True)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
# This is single table inheritance
class Engineer(Person):
primary_language = Column(String(50))
__mapper_args__ = {'polymorphic_identity': 'engineer'}
# This is joined table inheritance
class Manager(Person,Tablename):
id = Column(Integer, ForeignKey('person.id'), primary_key=True)
preferred_recreation = Column(String(50))
__mapper_args__ = {'polymorphic_identity': 'engineer'}
Combining Table/Mapper Arguments from Multiple Mixins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In the case of ``__table_args__`` or ``__mapper_args__``
specified with declarative mixins, you may want to combine
some parameters from several mixins with those you wish to
define on the class iteself. The
:func:`.declared_attr` decorator can be used
here to create user-defined collation routines that pull
from multiple collections::
from sqlalchemy.ext.declarative import declared_attr
class MySQLSettings:
__table_args__ = {'mysql_engine':'InnoDB'}
class MyOtherMixin:
__table_args__ = {'info':'foo'}
class MyModel(Base,MySQLSettings,MyOtherMixin):
__tablename__='my_model'
@declared_attr
def __table_args__(cls):
args = dict()
args.update(MySQLSettings.__table_args__)
args.update(MyOtherMixin.__table_args__)
return args
id = Column(Integer, primary_key=True)
Creating Indexes with Mixins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To define a named, potentially multicolumn :class:`.Index` that applies to all
tables derived from a mixin, use the "inline" form of :class:`.Index` and establish
it as part of ``__table_args__``::
class MyMixin(object):
a = Column(Integer)
b = Column(Integer)
@declared_attr
def __table_args__(cls):
return (Index('test_idx_%s' % cls.__tablename__, 'a', 'b'),)
class MyModel(Base,MyMixin):
__tablename__ = 'atable'
c = Column(Integer,primary_key=True)
Class Constructor
=================
As a convenience feature, the :func:`declarative_base` sets a default
constructor on classes which takes keyword arguments, and assigns them
to the named attributes::
e = Engineer(primary_language='python')
Sessions
========
Note that ``declarative`` does nothing special with sessions, and is
only intended as an easier way to configure mappers and
:class:`~sqlalchemy.schema.Table` objects. A typical application
setup using :func:`~sqlalchemy.orm.scoped_session` might look like::
engine = create_engine('postgresql://scott:tiger@localhost/test')
Session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
Base = declarative_base()
Mapped instances then make usage of
:class:`~sqlalchemy.orm.session.Session` in the usual way.
"""
from sqlalchemy.schema import Table, Column, MetaData, _get_table_key
from sqlalchemy.orm import synonym as _orm_synonym, mapper,\
comparable_property, class_mapper
from sqlalchemy.orm.interfaces import MapperProperty
from sqlalchemy.orm.properties import RelationshipProperty, ColumnProperty, CompositeProperty
from sqlalchemy.orm.util import _is_mapped_class
from sqlalchemy import util, exc
from sqlalchemy.sql import util as sql_util, expression
__all__ = 'declarative_base', 'synonym_for', \
'comparable_using', 'instrument_declarative'
def instrument_declarative(cls, registry, metadata):
"""Given a class, configure the class declaratively,
using the given registry, which can be any dictionary, and
MetaData object.
"""
if '_decl_class_registry' in cls.__dict__:
raise exc.InvalidRequestError(
"Class %r already has been "
"instrumented declaratively" % cls)
cls._decl_class_registry = registry
cls.metadata = metadata
_as_declarative(cls, cls.__name__, cls.__dict__)
def has_inherited_table(cls):
"""Given a class, return True if any of the classes it inherits from has a
mapped table, otherwise return False.
"""
for class_ in cls.__mro__:
if getattr(class_,'__table__',None) is not None:
return True
return False
def _as_declarative(cls, classname, dict_):
# dict_ will be a dictproxy, which we can't write to, and we need to!
dict_ = dict(dict_)
column_copies = {}
potential_columns = {}
mapper_args = {}
table_args = inherited_table_args = None
tablename = None
parent_columns = ()
declarative_props = (declared_attr, util.classproperty)
for base in cls.__mro__:
class_mapped = _is_mapped_class(base)
if class_mapped:
parent_columns = base.__table__.c.keys()
for name,obj in vars(base).items():
if name == '__mapper_args__':
if not mapper_args and (
not class_mapped or
isinstance(obj, declarative_props)
):
mapper_args = cls.__mapper_args__
elif name == '__tablename__':
if not tablename and (
not class_mapped or
isinstance(obj, declarative_props)
):
tablename = cls.__tablename__
elif name == '__table_args__':
if not table_args and (
not class_mapped or
isinstance(obj, declarative_props)
):
table_args = cls.__table_args__
if not isinstance(table_args, (tuple, dict, type(None))):
raise exc.ArgumentError(
"__table_args__ value must be a tuple, "
"dict, or None")
if base is not cls:
inherited_table_args = True
elif class_mapped:
continue
elif base is not cls:
# we're a mixin.
if isinstance(obj, Column):
if obj.foreign_keys:
raise exc.InvalidRequestError(
"Columns with foreign keys to other columns "
"must be declared as @declared_attr callables "
"on declarative mixin classes. ")
if name not in dict_ and not (
'__table__' in dict_ and
(obj.name or name) in dict_['__table__'].c
) and name not in potential_columns:
potential_columns[name] = \
column_copies[obj] = \
obj.copy()
column_copies[obj]._creation_order = \
obj._creation_order
elif isinstance(obj, MapperProperty):
raise exc.InvalidRequestError(
"Mapper properties (i.e. deferred,"
"column_property(), relationship(), etc.) must "
"be declared as @declared_attr callables "
"on declarative mixin classes.")
elif isinstance(obj, declarative_props):
dict_[name] = ret = \
column_copies[obj] = getattr(cls, name)
if isinstance(ret, (Column, MapperProperty)) and \
ret.doc is None:
ret.doc = obj.__doc__
# apply inherited columns as we should
for k, v in potential_columns.items():
if tablename or (v.name or k) not in parent_columns:
dict_[k] = v
if inherited_table_args and not tablename:
table_args = None
# make sure that column copies are used rather
# than the original columns from any mixins
for k in ('version_id_col', 'polymorphic_on',):
if k in mapper_args:
v = mapper_args[k]
mapper_args[k] = column_copies.get(v,v)
if classname in cls._decl_class_registry:
util.warn("The classname %r is already in the registry of this"
" declarative base, mapped to %r" % (
classname,
cls._decl_class_registry[classname]
))
cls._decl_class_registry[classname] = cls
our_stuff = util.OrderedDict()
for k in dict_:
value = dict_[k]
if isinstance(value, declarative_props):
value = getattr(cls, k)
if (isinstance(value, tuple) and len(value) == 1 and
isinstance(value[0], (Column, MapperProperty))):
util.warn("Ignoring declarative-like tuple value of attribute "
"%s: possibly a copy-and-paste error with a comma "
"left at the end of the line?" % k)
continue
if not isinstance(value, (Column, MapperProperty)):
continue
if k == 'metadata':
raise exc.InvalidRequestError(
"Attribute name 'metadata' is reserved "
"for the MetaData instance when using a "
"declarative base class."
)
prop = _deferred_relationship(cls, value)
our_stuff[k] = prop
# set up attributes in the order they were created
our_stuff.sort(key=lambda key: our_stuff[key]._creation_order)
# extract columns from the class dict
cols = set()
for key, c in our_stuff.iteritems():
if isinstance(c, (ColumnProperty, CompositeProperty)):
for col in c.columns:
if isinstance(col, Column) and \
col.table is None:
_undefer_column_name(key, col)
cols.add(col)
elif isinstance(c, Column):
_undefer_column_name(key, c)
cols.add(c)
# if the column is the same name as the key,
# remove it from the explicit properties dict.
# the normal rules for assigning column-based properties
# will take over, including precedence of columns
# in multi-column ColumnProperties.
if key == c.key:
del our_stuff[key]
cols = sorted(cols, key=lambda c:c._creation_order)
table = None
if '__table__' not in dict_:
if tablename is not None:
if isinstance(table_args, dict):
args, table_kw = (), table_args
elif isinstance(table_args, tuple):
if isinstance(table_args[-1], dict):
args, table_kw = table_args[0:-1], table_args[-1]
else:
args, table_kw = table_args, {}
else:
args, table_kw = (), {}
autoload = dict_.get('__autoload__')
if autoload:
table_kw['autoload'] = True
cls.__table__ = table = Table(tablename, cls.metadata,
*(tuple(cols) + tuple(args)),
**table_kw)
else:
table = cls.__table__
if cols:
for c in cols:
if not table.c.contains_column(c):
raise exc.ArgumentError(
"Can't add additional column %r when "
"specifying __table__" % c.key
)
if 'inherits' not in mapper_args:
for c in cls.__bases__:
if _is_mapped_class(c):
mapper_args['inherits'] = cls._decl_class_registry.get(
c.__name__, None)
break
if hasattr(cls, '__mapper_cls__'):
mapper_cls = util.unbound_method_to_callable(cls.__mapper_cls__)
else:
mapper_cls = mapper
if table is None and 'inherits' not in mapper_args:
raise exc.InvalidRequestError(
"Class %r does not have a __table__ or __tablename__ "
"specified and does not inherit from an existing "
"table-mapped class." % cls
)
elif 'inherits' in mapper_args and not mapper_args.get('concrete', False):
inherited_mapper = class_mapper(mapper_args['inherits'],
compile=False)
inherited_table = inherited_mapper.local_table
if table is None:
# single table inheritance.
# ensure no table args
if table_args:
raise exc.ArgumentError(
"Can't place __table_args__ on an inherited class "
"with no table."
)
# add any columns declared here to the inherited table.
for c in cols:
if c.primary_key:
raise exc.ArgumentError(
"Can't place primary key columns on an inherited "
"class with no table."
)
if c.name in inherited_table.c:
raise exc.ArgumentError(
"Column '%s' on class %s conflicts with "
"existing column '%s'" %
(c, cls, inherited_table.c[c.name])
)
inherited_table.append_column(c)
# single or joined inheritance
# exclude any cols on the inherited table which are not mapped on the
# parent class, to avoid
# mapping columns specific to sibling/nephew classes
inherited_mapper = class_mapper(mapper_args['inherits'],
compile=False)
inherited_table = inherited_mapper.local_table
if 'exclude_properties' not in mapper_args:
mapper_args['exclude_properties'] = exclude_properties = \
set([c.key for c in inherited_table.c
if c not in inherited_mapper._columntoproperty])
exclude_properties.difference_update([c.key for c in cols])
# look through columns in the current mapper that
# are keyed to a propname different than the colname
# (if names were the same, we'd have popped it out above,
# in which case the mapper makes this combination).
# See if the superclass has a similar column property.
# If so, join them together.
for k, col in our_stuff.items():
if not isinstance(col, expression.ColumnElement):
continue
if k in inherited_mapper._props:
p = inherited_mapper._props[k]
if isinstance(p, ColumnProperty):
# note here we place the superclass column
# first. this corresponds to the
# append() in mapper._configure_property().
# change this ordering when we do [ticket:1892]
our_stuff[k] = p.columns + [col]
cls.__mapper__ = mapper_cls(cls,
table,
properties=our_stuff,
**mapper_args)
class DeclarativeMeta(type):
def __init__(cls, classname, bases, dict_):
if '_decl_class_registry' in cls.__dict__:
return type.__init__(cls, classname, bases, dict_)
_as_declarative(cls, classname, cls.__dict__)
return type.__init__(cls, classname, bases, dict_)
def __setattr__(cls, key, value):
if '__mapper__' in cls.__dict__:
if isinstance(value, Column):
_undefer_column_name(key, value)
cls.__table__.append_column(value)
cls.__mapper__.add_property(key, value)
elif isinstance(value, ColumnProperty):
for col in value.columns:
if isinstance(col, Column) and col.table is None:
_undefer_column_name(key, col)
cls.__table__.append_column(col)
cls.__mapper__.add_property(key, value)
elif isinstance(value, MapperProperty):
cls.__mapper__.add_property(
key,
_deferred_relationship(cls, value)
)
else:
type.__setattr__(cls, key, value)
else:
type.__setattr__(cls, key, value)
class _GetColumns(object):
def __init__(self, cls):
self.cls = cls
def __getattr__(self, key):
mapper = class_mapper(self.cls, compile=False)
if mapper:
if not mapper.has_property(key):
raise exc.InvalidRequestError(
"Class %r does not have a mapped column named %r"
% (self.cls, key))
prop = mapper.get_property(key)
if not isinstance(prop, ColumnProperty):
raise exc.InvalidRequestError(
"Property %r is not an instance of"
" ColumnProperty (i.e. does not correspond"
" directly to a Column)." % key)
return getattr(self.cls, key)
class _GetTable(object):
def __init__(self, key, metadata):
self.key = key
self.metadata = metadata
def __getattr__(self, key):
return self.metadata.tables[
_get_table_key(key, self.key)
]
def _deferred_relationship(cls, prop):
def resolve_arg(arg):
import sqlalchemy
def access_cls(key):
if key in cls._decl_class_registry:
return _GetColumns(cls._decl_class_registry[key])
elif key in cls.metadata.tables:
return cls.metadata.tables[key]
elif key in cls.metadata._schemas:
return _GetTable(key, cls.metadata)
else:
return sqlalchemy.__dict__[key]
d = util.PopulateDict(access_cls)
def return_cls():
try:
x = eval(arg, globals(), d)
if isinstance(x, _GetColumns):
return x.cls
else:
return x
except NameError, n:
raise exc.InvalidRequestError(
"When initializing mapper %s, expression %r failed to "
"locate a name (%r). If this is a class name, consider "
"adding this relationship() to the %r class after "
"both dependent classes have been defined." %
(prop.parent, arg, n.args[0], cls)
)
return return_cls
if isinstance(prop, RelationshipProperty):
for attr in ('argument', 'order_by', 'primaryjoin', 'secondaryjoin',
'secondary', '_user_defined_foreign_keys', 'remote_side'):
v = getattr(prop, attr)
if isinstance(v, basestring):
setattr(prop, attr, resolve_arg(v))
if prop.backref and isinstance(prop.backref, tuple):
key, kwargs = prop.backref
for attr in ('primaryjoin', 'secondaryjoin', 'secondary',
'foreign_keys', 'remote_side', 'order_by'):
if attr in kwargs and isinstance(kwargs[attr], basestring):
kwargs[attr] = resolve_arg(kwargs[attr])
return prop
def synonym_for(name, map_column=False):
"""Decorator, make a Python @property a query synonym for a column.
A decorator version of :func:`~sqlalchemy.orm.synonym`. The function being
decorated is the 'descriptor', otherwise passes its arguments through to
synonym()::
@synonym_for('col')
@property
def prop(self):
return 'special sauce'
The regular ``synonym()`` is also usable directly in a declarative setting
and may be convenient for read/write properties::
prop = synonym('col', descriptor=property(_read_prop, _write_prop))
"""
def decorate(fn):
return _orm_synonym(name, map_column=map_column, descriptor=fn)
return decorate
def comparable_using(comparator_factory):
"""Decorator, allow a Python @property to be used in query criteria.
This is a decorator front end to
:func:`~sqlalchemy.orm.comparable_property` that passes
through the comparator_factory and the function being decorated::
@comparable_using(MyComparatorType)
@property
def prop(self):
return 'special sauce'
The regular ``comparable_property()`` is also usable directly in a
declarative setting and may be convenient for read/write properties::
prop = comparable_property(MyComparatorType)
"""
def decorate(fn):
return comparable_property(comparator_factory, fn)
return decorate
class declared_attr(property):
"""Mark a class-level method as representing the definition of
a mapped property or special declarative member name.
.. note:: @declared_attr is available as
``sqlalchemy.util.classproperty`` for SQLAlchemy versions
0.6.2, 0.6.3, 0.6.4.
@declared_attr turns the attribute into a scalar-like
property that can be invoked from the uninstantiated class.
Declarative treats attributes specifically marked with
@declared_attr as returning a construct that is specific
to mapping or declarative table configuration. The name
of the attribute is that of what the non-dynamic version
of the attribute would be.
@declared_attr is more often than not applicable to mixins,
to define relationships that are to be applied to different
implementors of the class::
class ProvidesUser(object):
"A mixin that adds a 'user' relationship to classes."
@declared_attr
def user(self):
return relationship("User")
It also can be applied to mapped classes, such as to provide
a "polymorphic" scheme for inheritance::
class Employee(Base):
id = Column(Integer, primary_key=True)
type = Column(String(50), nullable=False)
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
@declared_attr
def __mapper_args__(cls):
if cls.__name__ == 'Employee':
return {
"polymorphic_on":cls.type,
"polymorphic_identity":"Employee"
}
else:
return {"polymorphic_identity":cls.__name__}
"""
def __init__(self, fget, *arg, **kw):
super(declared_attr, self).__init__(fget, *arg, **kw)
self.__doc__ = fget.__doc__
def __get__(desc, self, cls):
return desc.fget(cls)
def _declarative_constructor(self, **kwargs):
"""A simple constructor that allows initialization from kwargs.
Sets attributes on the constructed instance using the names and
values in ``kwargs``.
Only keys that are present as
attributes of the instance's class are allowed. These could be,
for example, any mapped columns or relationships.
"""
cls_ = type(self)
for k in kwargs:
if not hasattr(cls_, k):
raise TypeError(
"%r is an invalid keyword argument for %s" %
(k, cls_.__name__))
setattr(self, k, kwargs[k])
_declarative_constructor.__name__ = '__init__'
def declarative_base(bind=None, metadata=None, mapper=None, cls=object,
name='Base', constructor=_declarative_constructor,
metaclass=DeclarativeMeta):
"""Construct a base class for declarative class definitions.
The new base class will be given a metaclass that produces
appropriate :class:`~sqlalchemy.schema.Table` objects and makes
the appropriate :func:`~sqlalchemy.orm.mapper` calls based on the
information provided declaratively in the class and any subclasses
of the class.
:param bind: An optional
:class:`~sqlalchemy.engine.base.Connectable`, will be assigned
the ``bind`` attribute on the :class:`~sqlalchemy.MetaData`
instance.
:param metadata:
An optional :class:`~sqlalchemy.MetaData` instance. All
:class:`~sqlalchemy.schema.Table` objects implicitly declared by
subclasses of the base will share this MetaData. A MetaData instance
will be created if none is provided. The
:class:`~sqlalchemy.MetaData` instance will be available via the
`metadata` attribute of the generated declarative base class.
:param mapper:
An optional callable, defaults to :func:`~sqlalchemy.orm.mapper`. Will
be used to map subclasses to their Tables.
:param cls:
Defaults to :class:`object`. A type to use as the base for the generated
declarative base class. May be a class or tuple of classes.
:param name:
Defaults to ``Base``. The display name for the generated
class. Customizing this is not required, but can improve clarity in
tracebacks and debugging.
:param constructor:
Defaults to
:func:`~sqlalchemy.ext.declarative._declarative_constructor`, an
__init__ implementation that assigns \**kwargs for declared
fields and relationships to an instance. If ``None`` is supplied,
no __init__ will be provided and construction will fall back to
cls.__init__ by way of the normal Python semantics.
:param metaclass:
Defaults to :class:`.DeclarativeMeta`. A metaclass or __metaclass__
compatible callable to use as the meta type of the generated
declarative base class.
"""
lcl_metadata = metadata or MetaData()
if bind:
lcl_metadata.bind = bind
bases = not isinstance(cls, tuple) and (cls,) or cls
class_dict = dict(_decl_class_registry=dict(),
metadata=lcl_metadata)
if constructor:
class_dict['__init__'] = constructor
if mapper:
class_dict['__mapper_cls__'] = mapper
return metaclass(name, bases, class_dict)
def _undefer_column_name(key, column):
if column.key is None:
column.key = key
if column.name is None:
column.name = key
| StarcoderdataPython |
1744782 | """ Helper functions and exceptions for the Zenodo extension """
from datetime import datetime
import os
import re
import tempfile
from urllib.parse import urlparse, urlencode
import zipfile
class UserMistake(Exception):
"""Raised when something went wrong due to user input"""
pass
def get_id(doi):
"""Parses Zenodo DOI to isolate record id
Parameters
----------
doi : string
doi to isolate record id from; must not be empty
Returns
------
string
The Zenodo record id at the end of the doi
Notes
-----
- DOIs are expected to be in the form 10.xxxx/zenodo.xxxxx
- Behaviour is undefined if they are given in another format
"""
if not doi:
raise Exception("No doi")
elif not re.match(r'10\.[0-9]+\/zenodo\.[0-9]+$', doi):
raise Exception("Doi is invalid (wrong format)")
else:
record_id = doi.split('.')[-1]
return record_id
def zip_dir(directory):
"""Create zip file filename from directory
Parameters
----------
directory : string
Explicit path to directory to be zipped
Returns
-------
string
Full path of zipped file
"""
if not os.path.exists(directory):
raise UserMistake("That directory path is not valid. To use your"
" work directory, leave the directory field empty")
# Create temporary directory for archive
temp_dir = tempfile.mkdtemp()
filepath = os.path.join(temp_dir, 'archive.zip')
with zipfile.ZipFile(filepath, 'w', zipfile.ZIP_DEFLATED) as zipf:
for root, dirs, files in os.walk(directory):
for afile in files:
zipf.write(os.path.join(root, afile))
return filepath
def add_query_parameter(url, params):
"""Add query parameters to an existing url
Parameters
----------
url : string
Url to add to
params : dict
Labels and values to add to url
Returns
-------
string
Updated url
"""
if not params:
raise Exception("No query arguments given")
if not url:
raise Exception("Empty url")
url += ('&' if urlparse(url).query else '?') + urlencode(params)
return url
| StarcoderdataPython |
1652447 | # coding: utf-8
from unittest import TestCase
from siebenapp.goaltree import Goals
from siebenapp.domain import (
EdgeType,
HoldSelect,
ToggleClose,
Delete,
ToggleLink,
Add,
Select,
Insert,
Rename,
)
from siebenapp.tests.dsl import build_goaltree, open_, selected, previous, clos_
class GoalsTest(TestCase):
def setUp(self):
self.messages = []
self.goals = Goals("Root", self._register_message)
def _register_message(self, msg):
self.messages.append(msg)
def build(self, *goal_prototypes):
return build_goaltree(*goal_prototypes, message_fn=self._register_message)
def test_there_is_one_goal_at_start(self):
assert self.goals.q(keys="name,switchable") == {
1: {"name": "Root", "switchable": True}
}
def test_new_goal_moves_to_top(self):
self.goals.accept(Add("A"))
assert self.goals.q(keys="name,switchable") == {
1: {"name": "Root", "switchable": False},
2: {"name": "A", "switchable": True},
}
def test_added_goal_has_strong_link_with_parent(self):
self.goals.accept(Add("New"))
assert self.goals.q(keys="name,edge") == {
1: {"name": "Root", "edge": [(2, EdgeType.PARENT)]},
2: {"name": "New", "edge": []},
}
def test_two_new_goals_move_to_top(self):
self.goals.accept_all(Add("A"), Add("B"))
assert self.goals.q(keys="name,switchable") == {
1: {"name": "Root", "switchable": False},
2: {"name": "A", "switchable": True},
3: {"name": "B", "switchable": True},
}
def test_two_goals_in_a_chain(self):
self.goals.accept_all(Add("A"), Add("AA", 2))
assert self.goals.q(keys="name,switchable") == {
1: {"name": "Root", "switchable": False},
2: {"name": "A", "switchable": False},
3: {"name": "AA", "switchable": True},
}
def test_rename_goal(self):
self.goals.accept_all(Add("Boom"), Select(2), Rename("A"))
assert self.goals.q() == {1: {"name": "Root"}, 2: {"name": "A"}}
def test_insert_goal_in_the_middle(self):
self.goals.accept_all(Add("B"), HoldSelect(), Select(2))
assert self.goals.q(keys="name,edge,switchable") == {
1: {"name": "Root", "edge": [(2, EdgeType.PARENT)], "switchable": False},
2: {"name": "B", "edge": [], "switchable": True},
}
self.goals.accept(Insert("A"))
assert self.goals.q(keys="name,edge,switchable") == {
1: {"name": "Root", "edge": [(3, EdgeType.PARENT)], "switchable": False},
2: {"name": "B", "edge": [], "switchable": True},
3: {"name": "A", "edge": [(2, EdgeType.PARENT)], "switchable": False},
}
def test_insert_goal_between_independent_goals(self):
self.goals = self.build(
open_(1, "Root", [2, 3]),
open_(2, "A", select=previous),
open_(3, "B", select=selected),
)
self.goals.accept(Insert("Wow"))
assert self.goals.q(keys="name,edge,switchable") == {
1: {
"name": "Root",
"edge": [(2, EdgeType.PARENT), (3, EdgeType.PARENT)],
"switchable": False,
},
2: {"name": "A", "edge": [(4, EdgeType.BLOCKER)], "switchable": False},
3: {"name": "B", "edge": [], "switchable": True},
4: {"name": "Wow", "edge": [(3, EdgeType.BLOCKER)], "switchable": False},
}
def test_reverse_insertion(self):
"""Not sure whether such trick should be legal"""
self.goals = self.build(
open_(1, "Root", [2], select=selected),
open_(2, "Selected", select=previous),
)
self.goals.accept(Insert("Intermediate?"))
# No, it's not intermediate
assert self.goals.q("name,edge") == {
1: {"name": "Root", "edge": [(2, EdgeType.PARENT)]},
2: {"name": "Selected", "edge": [(3, EdgeType.BLOCKER)]},
3: {"name": "Intermediate?", "edge": []},
}
def test_close_single_goal(self):
assert self.goals.q(keys="name,open") == {1: {"name": "Root", "open": True}}
self.goals.accept(ToggleClose())
assert self.goals.q(keys="name,open,switchable") == {
1: {"name": "Root", "open": False, "switchable": True}
}
def test_reopen_goal(self):
self.goals = self.build(open_(1, "Root", [2]), clos_(2, "A", select=selected))
assert self.goals.q(keys="open") == {1: {"open": True}, 2: {"open": False}}
self.goals.accept(ToggleClose())
assert self.goals.q(keys="open,switchable") == {
1: {"open": True, "switchable": False},
2: {"open": True, "switchable": True},
}
def test_close_goal_again(self):
self.goals = self.build(
open_(1, "Root", [2], select=selected),
open_(2, "A", [3]),
clos_(3, "Ab"),
)
self.goals.accept_all(Select(2), ToggleClose())
assert self.goals.q(keys="open,switchable") == {
1: {"open": True, "switchable": True},
2: {"open": False, "switchable": True},
3: {"open": False, "switchable": False},
}
self.goals.accept_all(Select(2), ToggleClose())
assert self.goals.q(keys="open,switchable") == {
1: {"open": True, "switchable": False},
2: {"open": True, "switchable": True},
3: {"open": False, "switchable": True},
}
self.goals.accept_all(Select(2), ToggleClose())
assert self.goals.q(keys="open,switchable") == {
1: {"open": True, "switchable": True},
2: {"open": False, "switchable": True},
3: {"open": False, "switchable": False},
}
def test_closed_leaf_goal_could_not_be_reopened(self):
self.goals = self.build(
open_(1, "Root", [2], select=selected), clos_(2, "A", [3]), clos_(3, "B")
)
assert self.goals.q(keys="open,switchable") == {
1: {"open": True, "switchable": True},
2: {"open": False, "switchable": True},
3: {"open": False, "switchable": False},
}
self.goals.accept_all(Select(3), ToggleClose())
# nothing should change
assert self.goals.q(keys="open,switchable") == {
1: {"open": True, "switchable": True},
2: {"open": False, "switchable": True},
3: {"open": False, "switchable": False},
}
def test_goal_in_the_middle_could_not_be_closed(self):
self.goals = self.build(
open_(1, "Root", [2, 3]),
open_(2, "A", blockers=[4]),
open_(3, "B", [4], select=selected),
open_(4, "C"),
)
self.goals.accept(ToggleClose())
assert self.goals.q(keys="open") == {
1: {"open": True},
2: {"open": True},
3: {"open": True},
4: {"open": True},
}
def test_delete_single_goal(self):
self.goals = self.build(open_(1, "Root", [2]), open_(2, "A", select=selected))
self.goals.accept(Delete())
assert self.goals.q(keys="name,select,switchable") == {
1: {"name": "Root", "select": "select", "switchable": True},
}
def test_enumeration_should_not_be_changed_after_delete(self):
self.goals = self.build(
open_(1, "Root", [2, 3]), open_(2, "A", select=selected), open_(3, "B")
)
self.goals.accept(Delete())
assert self.goals.q(keys="name,switchable") == {
1: {"name": "Root", "switchable": False},
3: {"name": "B", "switchable": True},
}
def test_remove_goal_chain_with_children(self):
self.goals = self.build(
open_(1, "Root", [2]), open_(2, "A", [3], select=selected), open_(3, "B")
)
self.goals.accept(Delete())
assert self.goals.q() == {1: {"name": "Root"}}
def test_relink_goal_chain_with_blockers(self):
self.goals = self.build(
open_(1, "Root", [2]),
open_(2, "A", blockers=[3], select=selected),
open_(3, "B"),
)
self.goals.accept(Delete())
assert self.goals.q("name,edge") == {
1: {"name": "Root", "edge": [(3, EdgeType.BLOCKER)]},
3: {"name": "B", "edge": []},
}
def test_select_parent_after_delete(self):
self.goals = self.build(
open_(1, "Root", [2], select=previous),
open_(2, "Parent", [3]),
open_(3, "Delete me", select=selected),
)
self.goals.accept(Delete())
assert self.goals.q("name,edge,select") == {
1: {"name": "Root", "edge": [(2, EdgeType.PARENT)], "select": None},
2: {"name": "Parent", "edge": [], "select": "select"},
}
def test_add_link_between_goals(self):
self.goals = self.build(
open_(1, "Root", [2, 3]),
open_(2, "A", select=previous),
open_(3, "B", select=selected),
)
assert self.goals.q(keys="switchable,edge") == {
1: {
"switchable": False,
"edge": [(2, EdgeType.PARENT), (3, EdgeType.PARENT)],
},
2: {"switchable": True, "edge": []},
3: {"switchable": True, "edge": []},
}
self.goals.accept(ToggleLink())
assert self.goals.q(keys="switchable,edge") == {
1: {
"switchable": False,
"edge": [(2, EdgeType.PARENT), (3, EdgeType.PARENT)],
},
2: {"switchable": False, "edge": [(3, EdgeType.BLOCKER)]},
3: {"switchable": True, "edge": []},
}
def test_view_edges(self):
self.goals = self.build(
open_(1, "Root", [2, 3]),
open_(2, "A", [4]),
open_(3, "B", blockers=[4], select=previous),
open_(4, "C", select=selected),
)
assert self.goals.q(keys="edge,switchable") == {
1: {
"edge": [(2, EdgeType.PARENT), (3, EdgeType.PARENT)],
"switchable": False,
},
2: {"edge": [(4, EdgeType.PARENT)], "switchable": False},
3: {"edge": [(4, EdgeType.BLOCKER)], "switchable": False},
4: {"edge": [], "switchable": True},
}
def test_no_link_to_self_is_allowed(self):
self.goals.accept(ToggleLink())
assert self.goals.q(keys="edge") == {1: {"edge": []}}
def test_no_loops_allowed(self):
self.goals = self.build(
open_(1, "Root", [2], select=selected),
open_(2, "step", [3]),
open_(3, "next", [4]),
open_(4, "more", select=previous),
)
self.goals.accept(ToggleLink())
assert self.goals.q(keys="edge") == {
1: {"edge": [(2, EdgeType.PARENT)]},
2: {"edge": [(3, EdgeType.PARENT)]},
3: {"edge": [(4, EdgeType.PARENT)]},
4: {"edge": []},
}
def test_new_parent_link_replaces_old_one(self):
self.goals = self.build(
open_(1, "Root", [2, 3]),
open_(2, "Old parent", [4]),
open_(3, "New parent", select=previous),
open_(4, "Child", select=selected),
)
self.goals.accept(ToggleLink(edge_type=EdgeType.PARENT))
assert self.goals.q(keys="edge") == {
1: {"edge": [(2, EdgeType.PARENT), (3, EdgeType.PARENT)]},
2: {"edge": [(4, EdgeType.BLOCKER)]},
3: {"edge": [(4, EdgeType.PARENT)]},
4: {"edge": []},
}
def test_new_parent_link_replaces_old_one_when_changed_from_blocker(self):
self.goals = self.build(
open_(1, "Root", [2, 3]),
open_(2, "A", select=selected),
open_(3, "B", blockers=[2], select=previous),
)
self.goals.accept(ToggleLink(edge_type=EdgeType.PARENT))
assert self.goals.q("name,edge") == {
1: {"name": "Root", "edge": [(2, EdgeType.BLOCKER), (3, EdgeType.PARENT)]},
2: {"name": "A", "edge": []},
3: {"name": "B", "edge": [(2, EdgeType.PARENT)]},
}
def test_remove_link_between_goals(self):
self.goals = self.build(
open_(1, "Root", [2, 3]),
open_(2, "A", blockers=[3], select=previous),
open_(3, "B", select=selected),
)
self.goals.accept(ToggleLink(edge_type=EdgeType.BLOCKER))
assert self.goals.q(keys="edge,switchable") == {
1: {
"edge": [(2, EdgeType.PARENT), (3, EdgeType.PARENT)],
"switchable": False,
},
2: {"edge": [], "switchable": True},
3: {"edge": [], "switchable": True},
}
def test_change_link_type(self):
self.goals = self.build(
open_(1, "Root", [2], select=previous), open_(2, "Top", [], select=selected)
)
assert self.goals.q(keys="name,edge") == {
1: {"name": "Root", "edge": [(2, EdgeType.PARENT)]},
2: {"name": "Top", "edge": []},
}
self.goals.accept(ToggleLink())
assert self.goals.q(keys="name,edge") == {
1: {"name": "Root", "edge": [(2, EdgeType.BLOCKER)]},
2: {"name": "Top", "edge": []},
}
self.goals.accept(ToggleLink(edge_type=EdgeType.PARENT))
assert self.goals.q(keys="name,edge") == {
1: {"name": "Root", "edge": [(2, EdgeType.PARENT)]},
2: {"name": "Top", "edge": []},
}
def test_remove_blocked_goal_without_children(self):
self.goals = self.build(
open_(1, "Root", [2, 3]),
open_(2, "A", [4]),
open_(3, "B", blockers=[4]),
open_(4, "C", select=selected),
)
assert self.goals.q(keys="name,edge") == {
1: {"name": "Root", "edge": [(2, EdgeType.PARENT), (3, EdgeType.PARENT)]},
2: {"name": "A", "edge": [(4, EdgeType.PARENT)]},
3: {"name": "B", "edge": [(4, EdgeType.BLOCKER)]},
4: {"name": "C", "edge": []},
}
self.goals.accept_all(Select(3), Delete())
assert self.goals.q(keys="name,edge,switchable") == {
1: {"name": "Root", "edge": [(2, EdgeType.PARENT)], "switchable": False},
2: {"name": "A", "edge": [(4, EdgeType.PARENT)], "switchable": False},
4: {"name": "C", "edge": [], "switchable": True},
}
def test_root_goal_is_selected_by_default(self):
assert self.goals.q(keys="select") == {1: {"select": "select"}}
self.goals.accept(Add("A"))
assert self.goals.q(keys="select") == {
1: {"select": "select"},
2: {"select": None},
}
self.goals.accept(Add("B"))
assert self.goals.q(keys="select") == {
1: {"select": "select"},
2: {"select": None},
3: {"select": None},
}
def test_new_goal_is_added_to_the_selected_node(self):
self.goals.accept_all(Add("A"), Select(2))
assert self.goals.q(keys="name,select") == {
1: {"name": "Root", "select": "prev"},
2: {"name": "A", "select": "select"},
}
self.goals.accept(Add("B"))
assert self.goals.q(keys="name,select,edge") == {
1: {"name": "Root", "select": "prev", "edge": [(2, EdgeType.PARENT)]},
2: {"name": "A", "select": "select", "edge": [(3, EdgeType.PARENT)]},
3: {"name": "B", "select": None, "edge": []},
}
def test_move_selection_to_another_open_goal_after_closing(self):
self.goals = self.build(
open_(1, "Root", [2, 3]), open_(2, "A", select=selected), open_(3, "B")
)
self.goals.accept(ToggleClose())
assert self.goals.q(keys="open,select") == {
1: {"open": True, "select": None},
2: {"open": False, "select": None},
3: {"open": True, "select": "select"},
}
def test_move_selection_to_another_open_goal_with_given_root_after_closing(self):
self.goals = self.build(
open_(1, "Root", [2, 3]),
open_(2, "Should not be selected"),
open_(3, "Subroot", [4, 5]),
open_(4, "Must be selected"),
open_(5, "Closing", select=selected),
)
self.goals.accept(ToggleClose(3))
assert self.goals.q(keys="open,select") == {
1: {"open": True, "select": None},
2: {"open": True, "select": None},
3: {"open": True, "select": None},
4: {"open": True, "select": "select"},
5: {"open": False, "select": None},
}
def test_do_not_select_unswitchable_goal_after_closing(self):
self.goals = self.build(
open_(1, "Root", [2, 3]),
open_(2, "Should not be selected"),
open_(3, "Subroot", [4, 5]),
open_(4, "intermediate", [6]),
open_(5, "Closing", select=selected),
open_(6, "Must be selected"),
)
self.goals.accept(ToggleClose(3))
assert self.goals.q(keys="open,select") == {
1: {"open": True, "select": None},
2: {"open": True, "select": None},
3: {"open": True, "select": None},
4: {"open": True, "select": None},
5: {"open": False, "select": None},
6: {"open": True, "select": "select"},
}
def test_ignore_wrong_selection(self):
self.goals.accept(Select(2))
assert self.goals.q(keys="select") == {1: {"select": "select"}}
def test_do_not_select_deleted_goals(self):
self.goals = self.build(
open_(1, "Root", [2]), open_(2, "broken", select=selected)
)
self.goals.accept_all(Delete(), Select(2))
assert self.goals.q(keys="select") == {1: {"select": "select"}}
def test_selection_should_be_instant(self):
self.goals = self.build(
open_(1, "Root", [2, 3, 4, 5, 6, 7, 8, 9, 10, 11], select=selected),
open_(2, "A"),
open_(3, "B"),
open_(4, "C"),
open_(5, "D"),
open_(6, "E"),
open_(7, "F"),
open_(8, "G"),
open_(9, "H"),
open_(10, "I"),
open_(11, "J"),
)
self.goals.accept(Select(2))
assert self.goals.q(keys="select") == {
1: {"select": "prev"},
2: {"select": "select"},
3: {"select": None},
4: {"select": None},
5: {"select": None},
6: {"select": None},
7: {"select": None},
8: {"select": None},
9: {"select": None},
10: {"select": None},
11: {"select": None},
}
self.goals.accept(Select(11))
assert self.goals.q(keys="select") == {
1: {"select": "prev"},
2: {"select": None},
3: {"select": None},
4: {"select": None},
5: {"select": None},
6: {"select": None},
7: {"select": None},
8: {"select": None},
9: {"select": None},
10: {"select": None},
11: {"select": "select"},
}
def test_add_events(self):
assert self.goals.events().pop() == ("add", 1, "Root", True)
self.goals.accept(Add("Next"))
assert self.goals.events()[-2] == ("add", 2, "Next", True)
assert self.goals.events()[-1] == ("link", 1, 2, EdgeType.PARENT)
def test_select_events(self):
self.goals.accept_all(Add("Next"), Select(2))
assert self.goals.events()[-1] == ("select", 2)
self.goals.accept_all(HoldSelect(), Select(1))
assert self.goals.events()[-2] == ("hold_select", 2)
assert self.goals.events()[-1] == ("select", 1)
def test_toggle_close_events(self):
self.goals.accept(ToggleClose())
assert self.goals.events()[-3] == ("toggle_close", False, 1)
assert self.goals.events()[-2] == ("select", 1)
assert self.goals.events()[-1] == ("hold_select", 1)
self.goals.accept(ToggleClose())
assert self.goals.events()[-1] == ("toggle_close", True, 1)
def test_rename_event(self):
self.goals.accept(Rename("New"))
assert self.goals.events()[-1] == ("rename", "New", 1)
def test_delete_events(self):
self.goals.accept_all(Add("Sheep"), Select(2), Delete())
assert self.goals.events()[-3] == ("delete", 2)
assert self.goals.events()[-2] == ("select", 1)
assert self.goals.events()[-1] == ("hold_select", 1)
def test_link_events(self):
self.goals.accept_all(
Add("Next"), Add("More"), Select(2), HoldSelect(), Select(3), ToggleLink()
)
assert self.goals.events()[-1] == ("link", 2, 3, EdgeType.BLOCKER)
self.goals.accept(ToggleLink())
assert self.goals.events()[-1] == ("unlink", 2, 3, EdgeType.BLOCKER)
def test_change_link_type_events(self):
self.goals = self.build(
open_(1, "Root", [2, 3]),
open_(2, "Lower", blockers=[3], select=previous),
open_(3, "Upper", [], select=selected),
)
self.goals.accept(ToggleLink(edge_type=EdgeType.PARENT))
assert self.goals.events()[-4] == ("link", 2, 3, EdgeType.PARENT)
assert self.goals.events()[-3] == ("unlink", 2, 3, EdgeType.BLOCKER)
assert self.goals.events()[-2] == ("link", 1, 3, EdgeType.BLOCKER)
assert self.goals.events()[-1] == ("unlink", 1, 3, EdgeType.PARENT)
def test_no_messages_at_start(self):
assert self.messages == []
def test_no_message_on_good_add(self):
self.goals = self.build(open_(1, "Root", select=selected))
self.goals.accept(Add("Success"))
assert self.messages == []
def test_message_on_wrong_add(self):
self.goals = self.build(clos_(1, "Root", select=selected))
self.goals.accept(Add("Failed"))
assert len(self.messages) == 1
def test_no_message_on_good_insert(self):
self.goals = self.build(
open_(1, "Root", [2], select=previous), open_(2, "Top", select=selected)
)
self.goals.accept(Insert("Success"))
assert self.messages == []
def test_message_on_insert_without_two_goals(self):
self.goals = self.build(open_(1, "Root", select=selected))
self.goals.accept(Insert("Failed"))
assert len(self.messages) == 1
def test_message_on_circular_insert(self):
self.goals = self.build(
open_(1, "Root", [2], select=selected), open_(2, "Top", [], select=previous)
)
self.goals.accept(Insert("Failed"))
assert len(self.messages) == 1
def test_no_message_on_valid_closing(self):
self.goals = self.build(
open_(1, "Root", [2]), open_(2, "Top", [], select=selected)
)
self.goals.accept(ToggleClose())
assert self.messages == []
def test_message_on_closing_blocked_goal(self):
self.goals = self.build(open_(1, "Root", [2], select=selected), open_(2, "Top"))
self.goals.accept(ToggleClose())
assert len(self.messages) == 1
def test_no_message_on_valid_reopening(self):
self.goals = self.build(clos_(1, "Root", [2], select=selected), clos_(2, "Top"))
self.goals.accept(ToggleClose())
assert self.messages == []
def test_message_on_reopening_blocked_goal(self):
self.goals = self.build(
clos_(1, "Root", [2]), clos_(2, "Top", [], select=selected)
)
self.goals.accept(ToggleClose())
assert len(self.messages) == 1
def test_no_message_on_delete_non_root_goal(self):
self.goals = self.build(
clos_(1, "Root", [2]), clos_(2, "Top", [], select=selected)
)
self.goals.accept(Delete())
assert self.messages == []
def test_message_on_delete_root_goal(self):
self.goals = self.build(clos_(1, "Root", [2], select=selected), clos_(2, "Top"))
self.goals.accept(Delete())
assert len(self.messages) == 1
def test_no_message_on_allowed_link(self):
self.goals = self.build(
open_(1, "Root", [2], select=previous),
open_(2, "Middle", [3]),
open_(3, "Top", [], select=selected),
)
self.goals.accept(ToggleLink())
assert self.messages == []
def test_message_on_link_to_self(self):
self.goals = self.build(
open_(1, "Root", [2]),
open_(2, "Middle", [3]),
open_(3, "Top", [], select=selected),
)
self.goals.accept(ToggleLink())
assert len(self.messages) == 1
def test_no_message_when_remove_not_last_link(self):
self.goals = self.build(
open_(1, "Root", [2, 3], select=previous),
open_(2, "Middle", blockers=[3]),
open_(3, "Top", [], select=selected),
)
self.goals.accept(ToggleLink())
assert self.messages == []
def test_message_when_remove_last_link(self):
self.goals = self.build(
open_(1, "Root", [2]),
open_(2, "Middle", [3], select=previous),
open_(3, "Top", [], select=selected),
)
self.goals.accept(ToggleLink(edge_type=EdgeType.PARENT))
assert len(self.messages) == 1
def test_message_when_closed_goal_is_blocked_by_open_one(self):
self.goals = self.build(
open_(1, "Root", [2, 3]),
clos_(2, "Middle", [], select=previous),
open_(3, "Top", [], select=selected),
)
self.goals.accept(ToggleLink())
assert len(self.messages) == 1
| StarcoderdataPython |
1727396 | # -*- coding: utf-8 -*-
import json
import time
from datetime import datetime as dt
from . import test_common
class TestOomusicPlaylist(test_common.TestOomusicCommon):
def test_00_create_interact(self):
"""
Test creation and basic interaction
"""
self.FolderScanObj.with_context(test_mode=True)._scan_folder(self.Folder.id)
playlist = self.PlaylistObj.create({"name": "crotte"})
# _onchange_album_id
album1 = self.AlbumObj.search([("name", "=", "Album1")])
playlist.album_id = album1
playlist._onchange_album_id()
self.assertEqual(playlist.album_id, self.AlbumObj)
self.assertEqual(
playlist.mapped("playlist_line_ids").mapped("track_id").mapped("name"),
[u"Song1", u"Song2"],
)
playlist.action_purge()
# _onchange_artist_id
artist1 = self.ArtistObj.search([("name", "=", "Artist1")])
playlist.artist_id = artist1
playlist._onchange_artist_id()
self.assertEqual(playlist.artist_id, self.ArtistObj)
self.assertEqual(
playlist.mapped("playlist_line_ids").mapped("track_id").mapped("name"),
[u"Song3", u"Song4", u"Song1", u"Song2"],
)
playlist.action_purge()
# action_current
playlist.action_current()
playlist.invalidate_cache()
playlist_search = self.PlaylistObj.search([("current", "=", True)])
self.assertEqual(playlist_search, playlist)
self.cleanUp()
def test_10_create_interact(self):
"""
Test creation and basic interaction of playlist lines
"""
self.FolderScanObj.with_context(test_mode=True)._scan_folder(self.Folder.id)
tracks = self.TrackObj.search([], limit=2)
playlist = self.PlaylistObj.search([("current", "=", True)], limit=1)
# Create
playlist_line = self.PlaylistLineObj.create(
{"playlist_id": playlist.id, "track_id": tracks[0].id}
)
# Write
playlist_line.write({"track_id": tracks[1].id})
# Unlink
playlist_line.unlink()
self.cleanUp()
def test_20_player_interaction(self):
"""
Test player interaction: play, next...
"""
self.FolderScanObj.with_context(test_mode=True)._scan_folder(self.Folder.id)
playlist1 = self.PlaylistObj.search([("current", "=", True)], limit=1)
playlist2 = self.PlaylistObj.create({"name": "crotte"})
artist1 = self.ArtistObj.search([("name", "=", "Artist1")])
artist2 = self.ArtistObj.search([("name", "=", "Artist2")])
playlist1.artist_id = artist1
playlist1.audio_mode = "raw"
playlist1._onchange_artist_id()
playlist2.artist_id = artist2
playlist2._onchange_artist_id()
# oomusic_set_current
playlist1.playlist_line_ids[0].playing = True
playlist2.playlist_line_ids[0].oomusic_set_current()
self.assertEqual(playlist1.current, False)
self.assertEqual(playlist2.current, True)
self.assertEqual(playlist1.playlist_line_ids[0].playing, False)
self.assertEqual(playlist2.playlist_line_ids[0].playing, True)
# oomusic_play_skip
playlist2.playlist_line_ids[0].oomusic_play_skip(play=True)
self.assertEqual(playlist2.playlist_line_ids[0].track_id.play_count, 1)
self.assertEqual(playlist2.playlist_line_ids[0].track_id.play_skip_ratio, 1)
playlist2.playlist_line_ids[0].track_id.last_play_skip_ratio = dt.now().replace(year=2016)
playlist2.playlist_line_ids[0].oomusic_play_skip(play=True)
self.assertEqual(playlist2.playlist_line_ids[0].track_id.play_count, 2)
self.assertEqual(playlist2.playlist_line_ids[0].track_id.play_skip_ratio, 2)
playlist2.playlist_line_ids[0].track_id.last_play_skip_ratio = dt.now().replace(year=2016)
playlist2.playlist_line_ids[0].oomusic_play_skip(play=False)
self.assertEqual(playlist2.playlist_line_ids[0].track_id.skip_count, 1)
self.assertEqual(playlist2.playlist_line_ids[0].track_id.play_skip_ratio, 2)
# oomusic_play
res = json.loads(playlist1.playlist_line_ids[0].with_context(test_mode=True).oomusic_play())
track = playlist1.playlist_line_ids[0].track_id
self.assertEqual(res["track_id"], track.id)
self.assertEqual(res["title"], "{} - {}".format(track.artist_id.name, track.name))
self.assertEqual(res["duration"], track.duration)
self.assertEqual(res["image"], "TEST")
src = res["src"][0].split("?")
src[1] = src[1].split("&")
src[1].sort()
self.assertEqual(src[0], "/oomusic/trans/{}.mp3".format(track.id))
self.assertEqual(src[1], [u"mode=raw", u"seek=0"])
src = res["src"][1].split("?")
src[1] = src[1].split("&")
src[1].sort()
self.assertEqual(src[0], "/oomusic/trans/{}.opus".format(track.id))
self.assertEqual(src[1], [u"mode=standard", u"seek=0"])
src = res["src"][2].split("?")
src[1] = src[1].split("&")
src[1].sort()
self.assertEqual(src[0], "/oomusic/trans/{}.ogg".format(track.id))
self.assertEqual(src[1], [u"mode=standard", u"seek=0"])
src = res["src"][3].split("?")
src[1] = src[1].split("&")
src[1].sort()
self.assertEqual(src[0], u"/oomusic/trans/{}.mp3".format(track.id))
self.assertEqual(src[1], [u"mode=standard", u"seek=0"])
# oomusic_next
res = json.loads(playlist1.playlist_line_ids[0].with_context(test_mode=True).oomusic_next())
track = playlist1.playlist_line_ids[1].track_id
self.assertEqual(res["track_id"], track.id)
res = json.loads(
playlist1.playlist_line_ids[-1].with_context(test_mode=True).oomusic_next()
)
track = playlist1.playlist_line_ids[0].track_id
self.assertEqual(res["track_id"], track.id)
# oomusic_previous
res = json.loads(
playlist1.playlist_line_ids[0].with_context(test_mode=True).oomusic_previous()
)
track = playlist1.playlist_line_ids[-1].track_id
self.assertEqual(res["track_id"], track.id)
res = json.loads(
playlist1.playlist_line_ids[1].with_context(test_mode=True).oomusic_previous()
)
track = playlist1.playlist_line_ids[0].track_id
self.assertEqual(res["track_id"], track.id)
# oomusic_last_track
res = json.loads(
playlist1.playlist_line_ids[0].with_context(test_mode=True).oomusic_last_track()
)
track = playlist2.playlist_line_ids[0].track_id
self.assertEqual(res["track_id"], track.id)
self.cleanUp()
def test_30_smart_playlist(self):
"""
Test smart playlists
"""
self.FolderScanObj.with_context(test_mode=True)._scan_folder(self.Folder.id)
playlist = self.PlaylistObj.create({"name": "crotte"})
album1 = self.AlbumObj.search([("name", "=", "Album1")])
album1.track_ids[0].oomusic_star()
album1.track_ids[1].rating = "5"
# rnd
playlist.smart_playlist = "rnd"
playlist.action_add_to_playlist()
self.assertEqual(
set(playlist.mapped("playlist_line_ids").mapped("track_id").mapped("name")),
set(["Song1", "Song2", "Song3", "Song4", "Song5", "Song6"]),
)
playlist.invalidate_cache()
playlist.action_purge()
playlist.invalidate_cache()
# Prepare the 'played' part
playlist.album_id = album1
playlist._onchange_album_id()
playlist.playlist_line_ids[0].with_context(test_mode=True).oomusic_set_current()
playlist.playlist_line_ids[0].oomusic_play_skip(play=True)
playlist.playlist_line_ids[0].track_id.last_play_skip_ratio = dt.now().replace(year=2016)
playlist.playlist_line_ids[0].with_context(test_mode=True).oomusic_set_current()
playlist.playlist_line_ids[0].oomusic_play_skip(play=True)
playlist.playlist_line_ids[0].track_id.last_play_skip_ratio = dt.now().replace(year=2016)
time.sleep(2)
playlist.playlist_line_ids[1].with_context(test_mode=True).oomusic_set_current()
playlist.playlist_line_ids[1].oomusic_play_skip(play=True)
playlist.invalidate_cache()
playlist.action_purge()
playlist.invalidate_cache()
# played
playlist.smart_playlist = "played"
playlist.action_add_to_playlist()
self.assertEqual(
set(playlist.mapped("playlist_line_ids").mapped("track_id").mapped("name")),
set(["Song1", "Song2"]),
)
playlist.invalidate_cache()
playlist.action_purge()
playlist.invalidate_cache()
# not_played
playlist.smart_playlist = "not_played"
playlist.action_add_to_playlist()
self.assertEqual(
set(playlist.mapped("playlist_line_ids").mapped("track_id").mapped("name")),
set(["Song3", "Song4", "Song5", "Song6"]),
)
playlist.invalidate_cache()
playlist.action_purge()
playlist.invalidate_cache()
# most_played
playlist.smart_playlist = "most_played"
playlist.smart_playlist_qty = 1
playlist.action_add_to_playlist()
self.assertEqual(
playlist.mapped("playlist_line_ids").mapped("track_id").mapped("name"), ["Song1"]
)
playlist.invalidate_cache()
playlist.action_purge()
playlist.invalidate_cache()
# last_listened
playlist.smart_playlist = "last_listened"
playlist.smart_playlist_qty = 1
playlist.action_add_to_playlist()
self.assertEqual(
playlist.mapped("playlist_line_ids").mapped("track_id").mapped("name"), ["Song2"]
)
playlist.invalidate_cache()
playlist.action_purge()
playlist.invalidate_cache()
# recent
playlist.smart_playlist = "recent"
playlist.smart_playlist_qty = 20
playlist.action_add_to_playlist()
self.assertEqual(
set(playlist.mapped("playlist_line_ids").mapped("track_id").mapped("name")),
set(["Song1", "Song2", "Song3", "Song4", "Song5", "Song6"]),
)
playlist.invalidate_cache()
playlist.action_purge()
playlist.invalidate_cache()
# favorite
playlist.smart_playlist = "favorite"
playlist.action_add_to_playlist()
self.assertEqual(
set(playlist.mapped("playlist_line_ids").mapped("track_id").mapped("name")),
set(["Song1"]),
)
playlist.invalidate_cache()
playlist.action_purge()
playlist.invalidate_cache()
# best_rated
playlist.smart_playlist = "best_rated"
playlist.smart_playlist_qty = 1
playlist.action_add_to_playlist()
self.assertEqual(
set(playlist.mapped("playlist_line_ids").mapped("track_id").mapped("name")),
set(["Song2"]),
)
playlist.invalidate_cache()
playlist.action_purge()
playlist.invalidate_cache()
# worst_rated
playlist.smart_playlist = "worst_rated"
playlist.smart_playlist_qty = 1
playlist.action_add_to_playlist()
self.assertEqual(
set(playlist.mapped("playlist_line_ids").mapped("track_id").mapped("name")),
set(["Song1"]),
)
playlist.invalidate_cache()
playlist.action_purge()
playlist.invalidate_cache()
self.cleanUp()
def test_40_dynamic_playlist(self):
"""
Test dynamic playlists
"""
self.FolderScanObj.with_context(test_mode=True)._scan_folder(self.Folder.id)
playlist = self.PlaylistObj.create(
{"name": "crotte", "smart_playlist": "rnd", "smart_playlist_qty": 3, "dynamic": True}
)
playlist.action_add_to_playlist()
# There should be 3 tracks
self.assertEqual(len(playlist.playlist_line_ids), 3)
# A track is automatically added when playing
playlist.playlist_line_ids[0].with_context(test_mode=True).oomusic_set_current()
self.assertEqual(len(playlist.playlist_line_ids), 4)
# When playing the last track, it should be moved at second position
last_track = playlist.playlist_line_ids[3].track_id
playlist.playlist_line_ids[3].with_context(test_mode=True).oomusic_set_current()
playlist.invalidate_cache()
self.assertEqual(last_track, playlist.playlist_line_ids[1].track_id)
| StarcoderdataPython |
3264981 | <reponame>IBM/blackbox-adversarial-reprogramming
import tensorflow as tf
def func(train_loss, iNum, var_noises):
image_size = 299
batchsize = 10
q_batch = 1
losses = []
glist = []
##Set paramters
beta = 0.1
d = image_size*image_size*3
b_constant = d
##gradient-free optimization
for i in range(iNum+1):
loss = train_loss[i* batchsize : i*batchsize + batchsize]
losses.append(loss)
for i in range(0,len(losses)-1):
v = tf.expand_dims(var_noises[i], axis=0)
l = tf.expand_dims(losses[i+1] - losses[0], 1)
mul = tf.matmul(l,v)
grad = b_constant * mul / beta
glist.append(grad)
glist = tf.stack(glist, axis=0)
print("glist: ",glist.shape)
avg_grad = tf.reduce_sum(glist, 0) / q_batch
print(avg_grad.shape)
estimate_grad = tf.reduce_sum(avg_grad, axis=0) / batchsize
return tf.reshape(estimate_grad , shape=[image_size, image_size, 3]) | StarcoderdataPython |
3272074 | '''
Code for "Three-dimensional imaging through scattering media based on confocal diffuse tomography"
<NAME> and <NAME>
See README file in this directory for instructions on how to setup and run the code
'''
import h5py
import time
import numpy as np
from numpy.fft import ifftn, fftn
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as TorchF
from utils import fk, compl_mul, conj
class CDTReconstruction():
# class to define scattering parameters and perform
# reconstruction using confocal diffuse tomography
def __init__(self, scene, mu_s=None, zd=None, pause=5, device=torch.device('cuda:0')):
self.device = device
self.scene = scene
self.pause = pause
# set hyper parameters
if scene == 'letter_s':
self.snr = 1e4 # SNR parameter for Wiener deconvolution
self.scan_size = 0.6 # size of scanned area
self.size_calibration = 1.06 # calibrated scanned area scaling for reconstruction
self.exposure_time = 60 / 32**2 # per pixel exposure time, seconds
elif scene == 'mannequin':
self.snr = 2e4
self.scan_size = 0.7
self.size_calibration = 0.87
self.exposure_time = 720 / 32**2
elif scene == 'letters_ut':
self.snr = 1e4
self.scan_size = 0.7
self.size_calibration = 1.0
self.exposure_time = 600 / 32**2
elif scene == 'letter_t':
self.snr = 2.5e3
self.scan_size = 0.7
self.size_calibration = 1.0
self.exposure_time = 3600 / 32**2
elif scene == 'cones':
self.snr = 2e4
self.scan_size = 0.7
self.size_calibration = 1.0
self.exposure_time = 400 / 32**2
elif scene == 'resolution_50':
self.snr = 1.5e4
self.scan_size = 0.7
self.size_calibration = 1.02
self.exposure_time = 80 / 32**2
elif scene == 'resolution_70':
self.snr = 1.5e4
self.scan_size = 0.7
self.size_calibration = 1.04
self.exposure_time = 80 / 32**2
elif 'letter_u' in scene:
self.snr = 5e3
self.scan_size = 0.7
self.size_calibration = 1.0
self.exposure_time = 60 / 32**2
else:
raise ValueError('Unexpected input to scene parameter.')
# physical parameters
# found by minimizing model fit error to calibration data
self.c0 = 3e8
self.n = 1.12
self.c = self.c0/self.n
self.mu_a = 0.53
self.mu_s = 262
self.ze = 0.0036
# volume dimensions
self.Nx = 32
self.Ny = 32
self.Nz = 128
self.xmin = -self.size_calibration * self.scan_size / 2
self.xmax = self.size_calibration * self.scan_size / 2
self.ymin = -self.size_calibration * self.scan_size / 2
self.ymax = self.size_calibration * self.scan_size / 2
self.zmin = 0
self.zmax = 2 # maximum path length in hidden volume (meters)
self.x = np.linspace(self.xmin, self.xmax, self.Nx)
self.y = np.linspace(self.ymin, self.ymax, self.Ny)
self.z = np.linspace(self.zmin, self.zmax, self.Nz)
self.X, self.Z, self.Y = np.meshgrid(self.x, self.z, self.y)
# laser position
self.xl = 0
self.yl = 0
self.zl = 0
# diffuser positioning
self.xd = np.linspace(2*self.xmin, 2*self.xmax, 2*self.Nx)[None, :, None]
self.yd = np.linspace(2*self.ymin, 2*self.ymax, 2*self.Ny)[None, None, :]
self.t = np.linspace(0, 2*self.zmax, 2*self.Nz) / self.c
self.t = self.t[:, None, None]
self.zd = 0.0254 # thickness of diffuser
# allow optional override of these parameters
if zd:
self.zd = zd
if mu_s:
self.mu_s = mu_s
# set diffusion kernel
self.diffusion_fpsf = []
self.setDiffusionKernel(self.c, self.t, self.xl, self.yl, self.zl,
self.xd, self.yd, self.zd, self.ze,
self.mu_s, self.mu_a)
def setDiffusionKernel(self, v, t, xl, yl, zl, xd, yd, zd, ze, mu_s, mu_a):
'''
Returns the diffusion model for a slab with finite thickness given by
<NAME>, <NAME>, and <NAME>,
"Time resolved reflectance and transmittance for the noninvasive
measurement of tissue optical properties,"
Appl. Opt. 28, 2331-2336 (1989)
'''
t[0, :] = 1
d = zd - zl
z0 = 1 / mu_s
D = 1 / (3 * (mu_a + mu_s))
rho = np.sqrt((xd-xl)**2 + (yd - yl)**2)
# Photon migration through a turbid slab described by a model
# based on diffusion approximation.
# https://www.osapublishing.org/ao/abstract.cfm?uri=ao-36-19-4587
n_dipoles = 20
ii = np.arange(-n_dipoles, n_dipoles+1)[None, None, :]
z1 = d * (1 - 2 * ii) - 4*ii*ze - z0
z2 = d * (1 - 2 * ii) - (4*ii - 2)*ze + z0
dipole_term = z1 * np.exp(-(z1**2) / (4*D*v*t)) - \
z2 * np.exp(-(z2**2) / (4*D*v*t))
dipole_term = np.sum(dipole_term, axis=-1)[..., None] # sum over dipoles
diff_kernel = (4*np.pi*D*v)**(-3/2) * t**(-5/2) \
* np.exp(-mu_a * v * t - rho**2 / (4*D*v*t)) \
* dipole_term
psf = diff_kernel
diffusion_psf = psf / np.sum(psf)
diffusion_psf = np.roll(diffusion_psf, -xd.shape[1]//2, axis=1)
diffusion_psf = np.roll(diffusion_psf, -yd.shape[2]//2, axis=2)
diffusion_psf = fftn(diffusion_psf) * fftn(diffusion_psf)
diffusion_psf = abs(ifftn(diffusion_psf))
# convert to pytorch and take fft
self.diffusion_fpsf = torch.from_numpy(diffusion_psf.astype(np.float32)).to(self.device)[None, None, :, :, :]
self.diffusion_fpsf = self.diffusion_fpsf.rfft(3, onesided=False)
return
def AT(self, x):
# wrapper function for f--k migration
return fk(x, 2*self.xmax, 2*self.zmax)
def M(self, x):
# trimming function
return x[:, :, :self.Nz, :self.Nx, :self.Ny]
def MT(self, x):
# padding function
return TorchF.pad(x, (0, self.Ny, 0, self.Nx, 0, self.Nz))
def run(self):
# run confocal diffuse tomography reconstruction
with h5py.File('./data/' + self.scene + '.mat', 'r') as f:
meas = np.array(f['meas']).transpose(2, 1, 0)
f.close()
# trim scene to 1 meter along the z-dimension
# and downsample to ~50 ps time binning from 16 ps
b = meas[:417, :, :]
downsampled = np.zeros((self.Nz, 32, 32))
for i in range(meas.shape[1]):
for j in range(meas.shape[2]):
x = np.linspace(0, 1, self.Nz)
xp = np.linspace(0, 1, 417)
yp = b[:, i, j].squeeze()
downsampled[:, i, j] = np.interp(x, xp, yp)
b = downsampled
b /= np.max(b) # normalize to 0 to 1
# initialize pytorch arrays
b = torch.from_numpy(b).to(self.device)[None, None, :, :, :].float()
x = torch.zeros(b.size()[0], 1, 2*self.Nz, 2*self.Nx, 2*self.Ny).to(self.device)
# construct inverse psf for Wiener filtering
tmp = compl_mul(self.diffusion_fpsf, conj(self.diffusion_fpsf))
tmp = tmp + 1/self.snr
invpsf = compl_mul(conj(self.diffusion_fpsf), 1/tmp)
# measure inversion runtime
if self.device.type == 'cpu':
start = time.time()
else:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
# pad measurements
x = self.MT(b)
# perform f-k migration on measurements
x_fk = self.AT(x)
# perform deconvolution
x_deconv = compl_mul(x.rfft(3, onesided=False), invpsf).ifft(3)[:, :, :, :, :, 0]
# confocal inverse filter
x = self.AT(x_deconv)
# measure elapsed time
if self.device.type == 'cpu':
stop = time.time()
print('Elapsed time: %.02f ms' % (1000 * (stop - start)))
else:
end.record()
torch.cuda.synchronize()
print('Elapsed time: %.02f ms' % (start.elapsed_time(end)))
# plot results
x_npy = x.cpu().data.numpy().squeeze()[:self.Nz, :self.Nx, :self.Ny]
b_npy = b.cpu().data.numpy().squeeze()
x_deconv_npy = x_deconv.cpu().data.numpy().squeeze()[:self.Nz, :self.Nx, :self.Ny]
x_fk_npy = x_fk.cpu().data.numpy().squeeze()[:self.Nz, :self.Nx, :self.Ny]
# trim any amplified noise at the very end of the volume
x_npy[-15:, :, :] = 0
if self.pause > 0:
plt.suptitle('Measurements and reconstruction')
plt.subplot(231)
plt.imshow(np.max(b_npy, axis=0), cmap='gray', extent=[self.xmin, self.xmax, self.ymin, self.ymax])
plt.xlabel('x (m)')
plt.ylabel('y (m)')
plt.subplot(232)
plt.imshow(np.max(b_npy, axis=1), aspect=(self.xmax-self.xmin)/(self.zmax/3e8*1e9), cmap='gray',
extent=[self.xmin, self.xmax, self.zmax/3e8*1e9, self.zmin])
plt.xlabel('x (m)')
plt.ylabel('t (ns)')
plt.subplot(233)
plt.imshow(np.max(b_npy, axis=2), aspect=(self.ymax-self.ymin)/(self.zmax/3e8*1e9), cmap='gray',
extent=[self.ymin, self.ymax, self.zmax/3e8*1e9, self.zmin])
plt.xlabel('y (m)')
plt.ylabel('t (ns)')
plt.subplot(234)
plt.imshow(np.max(x_npy, axis=0), cmap='gray', extent=[self.xmin, self.xmax, self.ymin, self.ymax])
plt.xlabel('x (m)')
plt.ylabel('y (m)')
plt.subplot(235)
plt.imshow(np.max(x_npy, axis=1), aspect=(self.xmax-self.xmin)/(self.zmax/2), cmap='gray',
extent=[self.xmin, self.xmax, self.zmax/2, self.zmin])
plt.xlabel('x (m)')
plt.ylabel('z (m)')
plt.subplot(236)
plt.imshow(np.max(x_npy, axis=2), aspect=(self.ymax-self.ymin)/(self.zmax/2), cmap='gray',
extent=[self.ymin, self.ymax, self.zmax/2, self.zmin])
plt.xlabel('y (m)')
plt.ylabel('z (m)')
plt.tight_layout()
plt.pause(self.pause)
# return measurements, deconvolved meas, reconstruction
return b_npy, x_fk_npy, x_deconv_npy, x_npy
| StarcoderdataPython |
3210840 | import numpy as np
from math import sqrt
import matplotlib.pyplot as plt
import warnings
from matplotlib import style
from collections import Counter
style.use('fivethirtyeight')
import pandas as pd
import random
benign_class = 2
malignant_class = 4
def k_nearest_neighbors(data,predict,k=3):
if len(data) >= k:
warnings.warn('K is set to a value less than total voting groups!')
distances = []
for group in data:
for features in data[group]:
#euclidean_distance = sqrt( (point1[0] - point2[0])**2 + (point1[1] - point2[1])**2 ) for 2D
euclidean_distance = np.linalg.norm(np.array(features) - np.array(predict))
distances.append([euclidean_distance, group])
votes = [i[1] for i in sorted(distances)[:k]] #top k numbers distances
votes_result = ( Counter(votes).most_common(1) )[0][0] #most_common(n) most appeared n numbers element among given top least distant k votes , [0][0] first of first element inside list inside tuple
## print(votes)
## print(votes_result)
result_label_votes = Counter(votes).most_common(1)[0][1]
total_votes = k
confidence = result_label_votes/total_votes
return votes_result,confidence
dataset = {'k':[[1,2],[2,3],[3,1]],'r':[[6,5],[7,7],[8,6]]}
new_features = [5,7]
'''
trials = 1
accuracies = []
for i in range(trials):
df = pd.read_csv('breast-cancer-wisconsin.data')
df.replace('?',-99999,inplace=True)
df.drop(['id'],1,inplace=True)
#feature_data = df.drop(['class'],1).astype(float).values.tolist()
#target_data = df['class'].astype(float).values.tolist()
full_data = df.astype(float).values.tolist()
#Shuffling before creating test train set from keeping it out of any bias
random.shuffle(full_data)
## creating dictionary with unique value of target column 2,4 - benign class, malignant class
train_set = {}
test_set = {}
for dicn in df['class'].unique():
train_set[dicn] = []
test_set[dicn] = []
# creating dictionary with unique value of target column
##train_set = {benign_class:[],malignant_class:[]}
##test_set = {benign_class:[], malignant_class:[]} # creating dictionary of two classes
#creating test train set
test_size = 0.5
train_data = full_data[:-int(test_size*len(full_data))] #first 80% of the data
test_data = full_data[-int(test_size*len(full_data)):] #first 20% of the data
#creating test train set
# storing test and train data in target key dictionary
for row in train_data:
# when last element of the row is target label
label = row[-1] # last element
row_without_label = row[:-1] # all element untill without and untill last element
train_set[label].append(row_without_label)
for row in test_data:
# when last element of the row is target name or label
label = row[-1] # last element
row_without_label = row[:-1] # all element untill without and untill last element
test_set[label].append(row_without_label)
# storing test and train data in target key dictionary
ses_con = []
for x in range(17):
k = 2*x+1
print("k=%s"%k)
correct = 0
total = 0
confidences = []
for train_label in test_set:
for row in test_set[train_label]:
result,confidence = k_nearest_neighbors(train_set,row,k)
confidences.append(confidence)
#if confidence < 0.6:
# print(result,confidence)
if train_label == result:
correct += 1
else:
print(confidence)
total += 1
print("Accuracy",correct/total)
avg_confidence = sum(confidences)/total
print("avg confidence = %s"%avg_confidence)
accuracies.append(correct/total)
ses_con.append([correct/total,k])
votes = [i for i in sorted(ses_con,reverse=True)[:]] #top k numbers distances
print(test_size)
print(votes)
model_accuracy = sum(accuracies)/len(accuracies)
print("Session Model range %s - %s"%(min(accuracies),max(accuracies)))
print("Session Model accuracy %s"%(model_accuracy))
# split = 20%,40% k = 7, accuracy = 1, 96%
'''
# prediction
df = pd.read_csv('samples.txt')
#df.replace('?',-99999,inplace=True)
df.drop(['id'],1,inplace=True)
predict_data = df.astype(float).values.tolist()
#k_nearest_neighbors(data,predict,k=3)
df = pd.read_csv('breast-cancer-wisconsin.data')
df.replace('?',-99999,inplace=True)
df.drop(['id'],1,inplace=True)
full_data = df.astype(float).values.tolist()
random.shuffle(full_data)
## creating dictionary with unique value of target column 2,4 - benign class, malignant class
full_data_set = {}
for dicn in df['class'].unique():
full_data_set[dicn] = []
## creating dictionary with unique value of target column 2,4 - benign class, malignant class
for row in full_data:
# when last element of the row is target label
label = row[-1] # last element
row_without_label = row[:-1] # all element untill without and untill last element
full_data_set[label].append(row_without_label)
ses_con = []
for x in range(4):
k=2*x + 1
print("k=%s"%k)
print("Total samples = %s"%(len(predict_data)))
Cconfidences = []
for row in predict_data:
result,confidence = k_nearest_neighbors(full_data_set,row,k)
if result == benign_class:
con="benign"
elif result == malignant_class:
con="malignant"
print("%s , %s" %(con,confidence))
Cconfidences.append(confidence)
pred_accur = sum(Cconfidences)/len(Cconfidences)
print("PRED ACCur = %s"%(pred_accur))
ses_con.append([pred_accur, k])
votes = [i[1] for i in sorted(ses_con,reverse=True)[:]] #top k numbers distances
print(votes)
#votes_result = ( Counter(votes).most_common(1) )[0][0] #most_common(n) most appeared n numbers element among given top least distant k votes , [0][0] first of first element inside list inside tuple
# prediction
##[[plt.scatter(ii[0],ii[1], s=100, color=i) for ii in dataset[i]] for i in dataset]
##plt.scatter(new_features[0],new_features[1], color = result)
##plt.title('KNN')
##plt.show()
| StarcoderdataPython |
4829177 | import copy
import subprocess
import re
import requests
def fetch_gist(url):
"""
Get the gist url from a media content url
"""
# Get the content from the url
content = requests.get(url).content.decode()
# Find the gist url
match = re.findall("<script src(.*?)><\/script>", content)[0]
gist_url = re.findall('"(.*?)"', match)[0]
return f'<script src="{gist_url}"></script>'
if __name__ == "__main__":
post_url = input("Enter post url: ")
# Title of post
title = "-".join(post_url.split("/")[-1].split("-")[:-1])
date = input("Enter date (as 2018-10-05): ")
# Read in the template
with open("medium-to-markdown.js", "r") as f:
template = f.readlines()
# Copy the template
template_mod = copy.deepcopy(template)
# Update the js script with the url
template_mod[2] = f'mediumToMarkdown.convertFromUrl("{post_url}")'
# Write the new file
with open("medium-to-markdown_mod.js", "w") as f:
f.writelines(template_mod)
# Directory for saving post
# File is automatically correctly named
post_file_name = f"{date}-{title}.md"
try:
# Run javascript function
content = subprocess.Popen(
["node", "medium-to-markdown_mod.js"], stdout=subprocess.PIPE
)
# Extract html as string
content = content.stdout.read().decode()
# Replace noscript image duplication
new_content = re.sub("\\n\\n<noscript>(.*?)<\/noscript>\\n\\n", "\n", content)
# Upgrade image quality
new_content = re.sub("/max/[0-9]{1,3}", "/max/2000", new_content)
# Replace source location
new_content = re.sub(
"source=post_page---------------------------", "", new_content
)
# Remove personal blurb
new_content = re.sub("\[(.*?) min read", "", new_content)
# Replace <pre> around code blocks
new_content = re.sub("<pre(.*?)>", "```\\n", new_content)
new_content = re.sub("<\/pre(.*?)>", "\\n```", new_content)
# Replace <span> within code blocks
new_content = re.sub("<span(.*?)>", "\\n", new_content)
new_content = re.sub("<\/span(.*?)>", "\\n", new_content)
new_content = re.sub("freeze/", "", new_content)
# Identify all iframes (GitHub gists)
iframes = re.findall("<iframe(.*?)><\/iframe>", new_content)
# Process each iframe
for iframe in iframes:
# Find the url in the frame
url = re.findall('src="(.*?)"', iframe)[0]
# Only use those urls with towardsdatascience
if "towardsdatascience" in url:
# Create a replacement script
replacement = fetch_gist(url)
old_iframe = f"<iframe{iframe}></iframe>"
# Substitute the old iframe with the new replacement
new_content = re.sub(old_iframe, replacement, new_content)
captions = captions = re.findall("!\[\]\((.*?)\)\\n(.*?)\\n\\n", new_content)
for caption in captions:
original = f"\n{caption[1]}\n\n"
replacement = f"\n*{caption[1]}*\n\n"
new_content = new_content.replace(original, replacement)
new_content = (
f"""---
published: true
title: "{title.replace('-', ' ').title()}"
date: {date}
categories:
-
-
---
"""
+ new_content
)
# Save the modified post
with open(post_file_name, "w") as fout:
fout.write(new_content)
print(f"Post saved as markdown to {post_file_name}")
# Report errors otherwise
except Exception as e:
print(f"Error somewhere along the way: {e}")
| StarcoderdataPython |
4805380 | <reponame>leschzinerlab/myami-3.2-freeHand
# The Leginon software is Copyright 2004
# The Scripps Research Institute, La Jolla, CA
# For terms of the license agreement
# see http://ami.scripps.edu/software/leginon-license
#
# $Source: /ami/sw/cvsroot/pyleginon/leginon.gui.wx/TargetFinder.py,v $
# $Revision: 1.19 $
# $Name: not supported by cvs2svn $
# $Date: 2008-02-11 23:48:05 $
# $Author: acheng $
# $State: Exp $
# $Locker: $
import wx
import leginon.gui.wx.Node
import leginon.gui.wx.Settings
import leginon.gui.wx.ToolBar
import leginon.gui.wx.ImagePanelTools
hide_incomplete = False
class Panel(leginon.gui.wx.Node.Panel):
def __init__(self, *args, **kwargs):
leginon.gui.wx.Node.Panel.__init__(self, *args, **kwargs)
self.toolbar.AddTool(leginon.gui.wx.ToolBar.ID_SETTINGS,
'settings',
shortHelpString='Settings')
self.toolbar.AddTool(leginon.gui.wx.ToolBar.ID_SUBMIT,
'play',
shortHelpString='Submit Targets')
self.Bind(leginon.gui.wx.Events.EVT_SUBMIT_TARGETS, self.onSubmitTargets)
self.toolbar.AddTool(leginon.gui.wx.ToolBar.ID_SUBMIT_QUEUE,
'send_queue_out',
shortHelpString='Submit Queued Targets')
self.Bind(leginon.gui.wx.Events.EVT_TARGETS_SUBMITTED, self.onTargetsSubmitted)
self.toolbar.EnableTool(leginon.gui.wx.ToolBar.ID_SETTINGS, False)
self.toolbar.EnableTool(leginon.gui.wx.ToolBar.ID_SUBMIT, False)
self.toolbar.EnableTool(leginon.gui.wx.ToolBar.ID_SUBMIT_QUEUE, False)
self.initialize()
self.SetSizer(self.szmain)
self.SetAutoLayout(True)
self.SetupScrolling()
def onNodeInitialized(self):
leginon.gui.wx.Node.Panel.onNodeInitialized(self)
self.toolbar.Bind(wx.EVT_TOOL, self.onSettingsTool,
id=leginon.gui.wx.ToolBar.ID_SETTINGS)
self.toolbar.Bind(wx.EVT_TOOL, self.onSubmitTool,
id=leginon.gui.wx.ToolBar.ID_SUBMIT)
self.toolbar.Bind(wx.EVT_TOOL, self.onSubmitQueueTool,
id=leginon.gui.wx.ToolBar.ID_SUBMIT_QUEUE)
self.toolbar.EnableTool(leginon.gui.wx.ToolBar.ID_SETTINGS, True)
self.Bind(leginon.gui.wx.ImagePanelTools.EVT_SETTINGS, self.onImageSettings)
queue = self.node.settings['queue']
self.toolbar.EnableTool(leginon.gui.wx.ToolBar.ID_SUBMIT_QUEUE, queue)
self.imagepanel.imagevector = self.node.getTargetImageVector()
self.imagepanel.beamradius = self.node.getTargetBeamRadius()
def onSetImage(self, evt):
super(Panel,self).onSetImage(evt)
try:
self.imagepanel.imagevector = self.node.getTargetImageVector()
self.imagepanel.beamradius = self.node.getTargetBeamRadius()
except AttributeError:
# This function is called on initialization and self.node would be None
pass
def onImageSettings(self, evt):
pass
def targetsSubmitted(self):
evt = leginon.gui.wx.Events.TargetsSubmittedEvent()
self.GetEventHandler().AddPendingEvent(evt)
def initialize(self):
pass
def getTargetPositions(self, typename):
return self.imagepanel.getTargetPositions(typename)
def getTargets(self, typename):
return self.imagepanel.getTargets(typename)
def onSettingsTool(self, evt):
dialog = SettingsDialog(self,show_basic=True)
dialog.ShowModal()
dialog.Destroy()
def onSubmitTool(self, evt):
self.toolbar.EnableTool(leginon.gui.wx.ToolBar.ID_SUBMIT, False)
self.node.submitTargets()
def onSubmitQueueTool(self, evt):
self.node.publishQueue()
def onSubmitTargets(self, evt):
self.toolbar.EnableTool(leginon.gui.wx.ToolBar.ID_SUBMIT, True)
def onTargetsSubmitted(self, evt):
self.toolbar.EnableTool(leginon.gui.wx.ToolBar.ID_SUBMIT, False)
def submitTargets(self):
evt = leginon.gui.wx.Events.SubmitTargetsEvent()
self.GetEventHandler().AddPendingEvent(evt)
class SettingsDialog(leginon.gui.wx.Settings.Dialog):
def initialize(self):
return ScrolledSettings(self,self.scrsize,False,self.show_basic)
class ScrolledSettings(leginon.gui.wx.Settings.ScrolledDialog):
def initialize(self):
leginon.gui.wx.Settings.ScrolledDialog.initialize(self)
sb = wx.StaticBox(self, -1, 'General Target Finder Settings ')
sbsz = wx.StaticBoxSizer(sb, wx.VERTICAL)
if self.show_basic:
sz = self.addBasicSettings()
else:
sz = self.addSettings()
sbsz.Add(sz, 0, wx.ALIGN_CENTER|wx.EXPAND|wx.ALL, 5)
return [sbsz]
def addBasicSettings(self):
self.widgets['user check'] = wx.CheckBox(self, -1,
'Allow for user verification of selected targets')
self.widgets['queue'] = wx.CheckBox(self, -1,
'Queue up targets')
self.Bind(wx.EVT_CHECKBOX, self.onQueueCheckbox, self.widgets['queue'])
sz = wx.GridBagSizer(5, 5)
sz.Add(self.widgets['user check'], (0, 0), (1, 1),
wx.ALIGN_CENTER_VERTICAL)
#sz.Add(self.widgets['wait for done'], (1, 0), (1, 1),
# wx.ALIGN_CENTER_VERTICAL)
sz.Add(self.widgets['queue'], (1, 0), (1, 1),
wx.ALIGN_CENTER_VERTICAL)
return sz
def addSettings(self):
#self.widgets['wait for done'] = wx.CheckBox(self, -1,
# 'Wait for another node to process targets before marking them done')
self.widgets['user check'] = wx.CheckBox(self, -1,
'Allow for user verification of selected targets')
self.widgets['queue'] = wx.CheckBox(self, -1,
'Queue up targets')
self.widgets['queue drift'] = wx.CheckBox(self, -1, 'Declare drift when queue submitted')
self.widgets['sort target'] = wx.CheckBox(self, -1, 'Sort targets by shortest path')
self.widgets['allow append'] = wx.CheckBox(self, -1, 'Allow target finding on old images')
self.Bind(wx.EVT_CHECKBOX, self.onQueueCheckbox, self.widgets['queue'])
sz = wx.GridBagSizer(5, 5)
sz.Add(self.widgets['user check'], (0, 0), (1, 1),
wx.ALIGN_CENTER_VERTICAL)
#sz.Add(self.widgets['wait for done'], (1, 0), (1, 1),
# wx.ALIGN_CENTER_VERTICAL)
sz.Add(self.widgets['queue'], (1, 0), (1, 1),
wx.ALIGN_CENTER_VERTICAL)
sz.Add(self.widgets['queue drift'], (2, 0), (1, 1),
wx.ALIGN_CENTER_VERTICAL)
sz.Add(self.widgets['sort target'], (3, 0), (1, 1),
wx.ALIGN_CENTER_VERTICAL)
if not hide_incomplete:
sz.Add(self.widgets['allow append'], (4, 0), (1, 1),
wx.ALIGN_CENTER_VERTICAL)
return sz
def onQueueCheckbox(self, evt):
state = evt.IsChecked()
parent = self.panel
parent.toolbar.EnableTool(leginon.gui.wx.ToolBar.ID_SUBMIT_QUEUE, state)
evt.Skip()
if __name__ == '__main__':
class App(wx.App):
def OnInit(self):
frame = wx.Frame(None, -1, 'Target Finder Test')
panel = Panel(frame)
frame.Fit()
self.SetTopWindow(frame)
frame.Show()
return True
app = App(0)
app.MainLoop()
| StarcoderdataPython |
1676152 | import requests
from lxml import html
from lxml.etree import tostring
class UserParser:
def set_database(self,database):
self.database = database
def set_user_id(self, id):
self.Session = requests.session()
self.trip_advisor = 'https://www.tripadvisor.ca'
self.id = id
self.brief = self.get_user_brief()
self.name = ''
self.user_link = ''
self.leve_of_contribor = 0
self.n_contributions = 0
self.n_cities_visited = 0
self.n_helpful_votes = 0
self.from_city = ''
self.get_attributes()
def find_variable(self, text, variable):
a = text.find(variable) + len(variable)
b = text.find('"', text.find(variable) + len(variable))
return text[a: b]
def open_user_page(self, hotel_link):
try:
response_hotel_page = self.Session.get(hotel_link)
tree = html.fromstring(response_hotel_page.text)
review_ids = list(set(tree.xpath(
'//*[contains(concat( " ", @class, " " ), concat( " ", "review-container", " " ))]/@data-reviewid')))
data_val = {'reviews': ','.join(review_ids)}
headers = {'Referer': hotel_link}
return self.Session.post(
'https://www.tripadvisor.ca/OverlayWidgetAjax?Mode=EXPANDED_HOTEL_REVIEWS_RESP&metaReferer=',
data=data_val,
headers=headers)
except Exception as e:
return None
def get_user_brief(self):
return 'https://www.tripadvisor.ca/MemberOverlay?Mode=owa&uid=' + self.id + '&c=&src=597541025&fus=false&partner=false&LsoId=&metaReferer=Restaurant_Review'
def get_attributes(self):
brief_page = self.Session.get(self.brief)
tree = html.fromstring(brief_page.text)
self.name = list(set(tree.xpath('//a/h3/text()')))[0]
self.user_link = self.trip_advisor + list(set(tree.xpath('//a/@href')))[0]
tmp_leve_of_contribor = list(set(tree.xpath('//*[contains(concat( " ", @class, " " ), concat( " ", "badgeinfo", " " ))]/span/text()')))
tmp_ns = list(set(tree.xpath('//*[contains(concat( " ", @class, " " ), concat( " ", "badgeTextReviewEnhancements", " " ))]/text()')))
tmp_from_city = list(set(tree.xpath(
'//*[contains(concat( " ", @class, " " ), concat( " ", "memberdescriptionReviewEnhancements", " " ))]/li/text()')))
for _ in tmp_from_city:
if 'from' in _:
tmp = _
tmp = tmp[tmp.find('from') + 5:]
self.from_city = tmp
if len(tmp_leve_of_contribor) == 1:
self.leve_of_contribor = tmp_leve_of_contribor[0]
else:
self.leve_of_contribor = 0
for _ in tmp_ns:
if 'Cities visited' in _:
tmp = _
tmp = tmp[0:tmp.find(' ')]
self.n_cities_visited = int(tmp)
elif 'Contributions' in _:
tmp = _
tmp = tmp[0:tmp.find(' ')]
self.n_contributions = int(tmp)
elif 'Helpful votes' in _:
tmp = _
tmp = tmp[0:tmp.find(' ')]
self.n_helpful_votes = int(tmp)
| StarcoderdataPython |
3326181 | import pytest
from plenum.test.bls.helper import change_bls_key, check_bls_key
from plenum.test.conftest import pool_txn_stewards_data, stewards_and_wallets
@pytest.fixture(scope="module")
def update_bls_keys(looper, tconf, nodeSet, stewards_and_wallets):
node = nodeSet[0]
steward_client, steward_wallet = stewards_and_wallets[0]
new_blspk = change_bls_key(looper, nodeSet, node,
steward_client, steward_wallet)
check_bls_key(new_blspk, node, nodeSet)
def test_node_schedules_upgrade_after_bls_keys_update(update_bls_keys,
upgradeScheduled):
# Upgrade should work even after an update to the pool ledger with a
# transaction that does not contain `SERVICES` field
pass
| StarcoderdataPython |
87750 | #!/usr/bin/env python
"""
"""
from plasTeX import Command, Environment
class center(Environment):
blockType = True
class centering(center):
blockType = True
class flushleft(Environment):
blockType = True
class raggedright(flushleft):
blockType = True
class flushright(Environment):
blockType = True
class raggedleft(flushright):
blockType = True
class raggedbottom(Environment):
pass
| StarcoderdataPython |
165557 | # ==============================================================================
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
# ==============================================================================
"""Openvino Tensorflow BiasAdd operation test
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import numpy as np
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
from common import NgraphTest
np.random.seed(8)
class TestBiasAddOperations(NgraphTest):
def test_BiasAdd1(self):
input_data = (0, 1, 0, 1, 2, 1, 1, 0, 3, 1, 1, 0, 4, 4, 5, 4)
input_data = np.reshape(input_data, (2, 2, 2, 2))
input_var = tf.compat.v1.placeholder(tf.float32, shape=(2, 2, 2, 2))
bias_data = (100., -100.)
bias_var = tf.compat.v1.placeholder(tf.float32, shape=(2))
out = tf.nn.bias_add(input_var, bias_var, 'NHWC')
def run_test(sess):
return sess.run(
out, feed_dict={
input_var: input_data,
bias_var: bias_data
})
assert (
self.with_ngraph(run_test) == self.without_ngraph(run_test)).all()
def test_BiasAdd2(self):
input_data = (0, 1, 0, 1, 2, 1, 1, 0, 3, 1, 1, 0, 4, 4, 5, 4)
input_data = np.reshape(input_data, (2, 2, 2, 2))
input_var = tf.compat.v1.placeholder(tf.float32, shape=(2, 2, 2, 2))
bias_data = (100., -100.)
bias_var = tf.compat.v1.placeholder(tf.float32, shape=(2))
out = tf.nn.bias_add(input_var, bias_var, 'NCHW')
def run_test(sess):
return sess.run(
out, feed_dict={
input_var: input_data,
bias_var: bias_data
})
assert (
self.with_ngraph(run_test) == self.without_ngraph(run_test)).all()
def test_BiasAdd3(self):
input_data = (0, 1, 0, 1, 2, 1, 1, 0, 3, 1, 1, 0, 4, 4, 5, 4, 3, 5, 1,
2, 0, 4, 0, 1)
input_data = np.reshape(input_data, (2, 3, 2, 2))
input_var = tf.compat.v1.placeholder(tf.float32, shape=(2, 3, 2, 2))
bias_data = (100., -100., 50) # channels = 3
bias_var = tf.compat.v1.placeholder(tf.float32, shape=(3))
out = tf.nn.bias_add(input_var, bias_var, 'NCHW')
def run_test(sess):
return sess.run(
out, feed_dict={
input_var: input_data,
bias_var: bias_data
})
assert (
self.with_ngraph(run_test) == self.without_ngraph(run_test)).all()
def test_BiasAdd4(self):
input_data = (0, 1, 0, 1, 2, 1, 1, 0, 3, 1, 1, 0, 4, 4, 5, 4, 3, 5, 1,
2, 0, 4, 0, 1)
input_data = np.reshape(input_data, (2, 2, 2, 3))
input_var = tf.compat.v1.placeholder(tf.float32, shape=(2, 2, 2, 3))
bias_data = (100., -100., 50) # channels = 3
bias_var = tf.compat.v1.placeholder(tf.float32, shape=(3))
out = tf.nn.bias_add(input_var, bias_var, 'NHWC')
def run_test(sess):
return sess.run(
out, feed_dict={
input_var: input_data,
bias_var: bias_data
})
assert (
self.with_ngraph(run_test) == self.without_ngraph(run_test)).all()
| StarcoderdataPython |
Subsets and Splits