id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
4978864
|
"""Platform Models."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
from .SearchKeywordResult import SearchKeywordResult
class CreateSearchKeyword(BaseSchema):
# Catalog swagger.json
app_id = fields.Str(required=False)
result = fields.Nested(SearchKeywordResult, required=False)
is_active = fields.Boolean(required=False)
_custom_json = fields.Dict(required=False)
words = fields.List(fields.Str(required=False), required=False)
|
StarcoderdataPython
|
9714295
|
<filename>lab3/lab3.py
import argparse
import logging
import os
import random
import sys
import time
import json
#from typing_extensions import Required
import numpy as np
import copy
import torch
import torchvision
from torchvision import transforms
from torch.utils.data import SubsetRandomSampler
import wandb
from dataloader import femnist_dataloaders
# from fedml_api.model.cv.resnet import resnet56
from fedml_api.standalone.fedavg.fedavg_api import FedAvgAPI
from fedml_api.standalone.fedavg.my_model_trainer_classification import MyModelTrainer
from model import Net_femnist
def add_args(parser):
"""
parser : argparse.ArgumentParser
return a parser added with args required by fit
"""
# Training settings
parser.add_argument('--wandb_name', type=str, required=True,
help='Name of log file')
parser.add_argument('--dataset', type=str, default='cifar10', metavar='N',
help='dataset used for training')
parser.add_argument('--data_dir', type=str, default='./cifar10',
help='data directory')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--client_optimizer', type=str, default='adam',
help='SGD with momentum; adam')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--wd', help='weight decay parameter;', type=float, default=0.001)
parser.add_argument('--epochs', type=int, default=5, metavar='EP',
help='how many epochs will be trained locally')
parser.add_argument('--client_num_in_total', type=int, default=10, metavar='NN',
help='number of workers in a distributed cluster')
parser.add_argument('--client_num_per_round', type=int, default=10, metavar='NN',
help='number of workers')
parser.add_argument('--comm_round', type=int, default=10,
help='how many round of communications we shoud use')
parser.add_argument('--frequency_of_the_test', type=int, default=5,
help='the frequency of the algorithms')
parser.add_argument('--gpu', type=int, default=0,
help='gpu')
parser.add_argument('--ci', type=int, default=0,
help='CI')
parser.add_argument('--seed', type=int, default=0,
help='seed')
return parser
if __name__ == "__main__":
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
parser = add_args(argparse.ArgumentParser(description='FedAvg-standalone'))
# parser.add_argument("-f")
args = parser.parse_args()
logger.info(args)
device = torch.device("cuda:" + str(args.gpu) if torch.cuda.is_available() else "cpu")
logger.info(device)
wandb.init(
project="fedml",
name=args.wandb_name,
config=args
)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.deterministic = True
dataset = femnist_dataloaders(root="./femnist",clients=args.client_num_in_total, batch_size=args.batch_size)
model = Net_femnist()
dummy_opt = torch.optim.SGD(copy.deepcopy(model).parameters(), lr=args.lr)
dummy_scheduler = torch.optim.lr_scheduler.StepLR(optimizer=dummy_opt,
step_size = args.comm_round/5,
gamma = 0.5)
model_trainer = MyModelTrainer(model)
fedavgAPI = FedAvgAPI(dataset, device, args, model_trainer, threading=3, scheduler=dummy_scheduler)
#fedavgAPI = FedAvgAPI(dataset, device, args, model_trainer, scheduler=dummy_scheduler)
fedavgAPI.train()
|
StarcoderdataPython
|
1809325
|
import sys
import matplotlib.pyplot as plt
def fetch_results(fresults):
results = fresults.readline().split(' ')
results.pop()
return [int(elem) for elem in results]
if __name__ == '__main__':
# Name and results of timing functions
names = []
values = []
# Fetch the results
with open(sys.argv[1]) as f:
for line in f:
results = line.split(' ')
# Get sorter name
names.append(results.pop(0))
# Remove EOL character
results.pop()
# Plot the results
intresults = [int(elem) for elem in results]
xaxis = list(range(len(intresults)))
val, = plt.plot(xaxis, intresults)
values.append(val)
# Add a legend
plt.legend(values, names, loc='upper left')
plt.xlabel('Number of elements to sort')
plt.ylabel('Execution time (ms)')
plt.show()
|
StarcoderdataPython
|
6669631
|
<reponame>cjshearer/project-athena<filename>src/scripts/cody_scripts/train_ensemble_model.py
# based on ../zhymir_scripts/train_model.py
import os
import keras
import numpy as np
from utils.file import dump_to_json
import matplotlib.pyplot as plt
def train_model(data, labels, model_p, save, filename, save_history, h_filename):
model_history = model_p.fit(data, labels, batch_size=10)
if save:
model_p.save(filename)
if save_history:
dump_to_json(model_history.history, h_filename)
if __name__ == '__main__':
train_data = np.load('../zhymir_scripts/train_test/train_data.npy')
train_labels = []
with np.load('../zhymir_scripts/train_test/train_labels.npz') as data:
train_labels = data['arr_0']
model_root = '../../../Task2/models'
history_root = '../../../Task2/data'
filepath = os.path.join(model_root, 'cody_model.h5')
history_filename = os.path.join(history_root, 'cody_model_history')
batch_size = 10
num_classifiers = 16
model = keras.models.Sequential([
keras.layers.Dense(units=100, input_shape=(num_classifiers, 10), activation='relu', name='D1'),
keras.layers.Flatten(),
keras.layers.Dense(10, name='output_layer', activation='softmax')
])
metrics = ['accuracy']
model.compile('adam', 'categorical_crossentropy', metrics=metrics)
history = model.fit(train_data, train_labels, epochs=20, batch_size=batch_size, validation_split=0.1, verbose=0)
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
model.save(filepath)
dump_to_json(history.history, history_filename)
|
StarcoderdataPython
|
5167536
|
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.shortcuts import render, redirect
from django.utils.datastructures import MultiValueDictKeyError
from store.models import Ticket
@login_required
def create_ticket_view(request):
# Get actual user
actual_user = User.objects.filter(username__iexact=request.user.username)[0]
# if this is a POST request we need to process the form data
if request.method == "POST":
ticket_title = request.POST.get("ticket_title", None)
ticket_description = request.POST.get("ticket_description", None)
# Check if a file has been sent
try:
ticket_picture = request.FILES["ticket_picture"]
except MultiValueDictKeyError:
ticket_picture = None
if ticket_title is not None and ticket_description is not None:
# Redirect to a success page.
new_ticket = Ticket(
user=actual_user,
title=ticket_title,
description=ticket_description,
image=ticket_picture,
)
new_ticket.save()
context = {"ticket": new_ticket}
return render(request, "success_ticket.html", context)
return render(request, "create_ticket.html")
@login_required
def tickets_view(request):
"""
Create a review from a ticket
:param request: webpage request
:return: the ticket store
"""
every_ticket = list(Ticket.objects.all())
context = {"tickets": every_ticket}
return render(request, "tickets.html", context)
@login_required
def delete_ticket(request):
"""
Delete a ticket
:param request: ticket.id the ticket id
:return:
"""
# Get actual user
actual_user = User.objects.filter(username__iexact=request.user.username)[0]
ticket_id = request.GET.get("ticket_id", None)
ticket_to_delete = Ticket.objects.filter(id=ticket_id).first()
# Check if the actual user is the ticket's owner, only the ticket's owner
# can delete it.
if actual_user == ticket_to_delete.user:
ticket_to_delete.delete()
return redirect("/")
|
StarcoderdataPython
|
1628382
|
<reponame>cjgreencorner/Python-Scripts
#!/usr/bin/env python
##########################################
# Age Calculator #
##########################################
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Finished"
import datetime
def birthyear(year, age): # Calculate birthyear
return year - age
def fifty(year, age): # To calculate the year when the person becomes 50
return year - age + 50
def main():
currentDateTime = datetime.datetime.now()
date = currentDateTime.date()
year = int(date.strftime("%Y"))
age = int(input("Age: ")) # Ask for the person's age
print("You were born in: ", birthyear(year, age))
print("You are 50 in ", fifty(year, age))
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
4890297
|
# coding=utf-8
import re
from enum import Enum
import shutil
import numpy as np
from colorama import Fore
import torch
import os
#from Putil.base import logger as plog
#logger = plog.PutilLogConfig('util').logger()
#logger.setLevel(plog.DEBUG)
from Putil.demo.deep_learning.base import horovod
import Putil.base.save_fold_base as psfb
##@brief 代表着本次运行是在什么模式下
# @note 这与Stage不同,Stage在一次运行中可能会有不同的阶段Stage,
# 比如TrainEvaluate表示在RunStage.Train中的Evaluate阶段
class RunStage(Enum):
Train=0
Evaluate=1
Test=2
def find_repeatable_environ(base_name):
temp = set([k if re.search(base_name, k) is not None else None for k in os.environ.keys()])
temp.remove(None)
return temp
def get_relatived_environ(base_name):
return {property_type.replace(base_name, ''): os.environ[property_type] for property_type in find_repeatable_environ(base_name)}
def complete_environ(source_dict, target_dict, default_content):
# 完善target_dict中缺少而source_dict中存在的类型
[None if property_type in target_dict.keys() else target_dict.update({property_type: default_content}) \
for property_type, name in source_dict.items()]
pass
def empty_tensor_factory(framework, **kwargs):
def generate_empty_tensor_factory_func():
if framework == 'torch':
# tensor operation
def torch_generate_empty_tensor():
return torch.Tensor([])
return torch_generate_empty_tensor
else:
raise NotImplementedError('empty_tensor_factory in framework: {} is Not Implemented'.format(args.framework))
pass
return generate_empty_tensor_factory_func
def string_to_torch_tensor(_str, code='utf-16'):
return torch.from_numpy(np.frombuffer(_str.encode(code), dtype=get_np_dtype_from_code(code)))
def get_np_dtype_from_code(code):
return np.int16 if code == 'utf-16' else np.int8 if code == 'utf-8' else None
def get_code_from_np_dtype(np_dtype):
return ('utf-16', np.uint16) if np_dtype == np.int16 else ('utf-8', np.uint8) if np_dtype == np.int8 else None
def torch_tensor_to_string(tensor, code='utf-16'):
n = tensor.numpy()
return n.astype(get_code_from_np_dtype(n.dtype)[1]).tobytes().decode(get_code_from_np_dtype(n.dtype)[0])
def make_sure_the_save_dir(name, run_stage, save_dir, weight_path, weight_epoch, debug, framework):
hvd = horovod.horovod(framework)
if run_stage == RunStage.Train:
if weight_path == '' or weight_epoch is None and hvd.rank() == 0:
bsf = psfb.BaseSaveFold(
use_date=True if not debug else False, \
use_git=True if not debug else False, \
should_be_new=True if not debug else False, \
base_name='{}{}'.format(name if name is not '' else 'Unnamed', '-debug' if debug else ''))
bsf.mkdir(save_dir)
save_dir = bsf.FullPath
code = 'utf-16'
save_dir_tensor = string_to_torch_tensor(save_dir, code)
save_dir_tensor = hvd.broadcast_object(save_dir_tensor, 0, 'save_dir')
save_dir = torch_tensor_to_string(save_dir_tensor, code)
elif hvd.rank() == 0 and weight_path is not None and weight_epoch is not None:
save_dir = os.path.dirname(weight_path)
code = 'utf-16'
save_dir_tensor = string_to_torch_tensor(save_dir, code)
save_dir_tensor = hvd.broadcast_object(save_dir_tensor, 0, 'save_dir')
save_dir = torch_tensor_to_string(save_dir_tensor, code)
elif hvd.rank() != 0:
code = 'utf-16'
save_dir_tensor = string_to_torch_tensor(save_dir, code)
save_dir_tensor = hvd.broadcast_object(save_dir_tensor, 0, 'save_dir')
save_dir = torch_tensor_to_string(save_dir_tensor, code)
else:
raise RuntimeError('this should not happend')
print(Fore.GREEN + 'rank {} final get save dir: {}'.format(hvd.rank(), save_dir) + Fore.RESET)
return save_dir
pass
def generate_train_time_dir_name(train_time):
return 'train_time-{}'.format(train_time)
def subdir_base_on_train_time(root_dir, train_time, prefix):
'''
@brief 依据根目录与对应的train_time生成子目录名称
'''
return os.path.join(root_dir, '{}{}'.format('' if prefix == '' else '{}-'.format(prefix), generate_train_time_dir_name(train_time)))
def train_time_matched(train_time, subdir):
res = re.search(generate_train_time_dir_name(train_time), subdir)
return res is not None, res
#def get_train_time_from_subdir(subdir):
# return
def _tensorboard_event_and_the_train_time(file_name):
_split_by_point = file_name.split('.')
is_event = _split_by_point[0] == 'events' and _split_by_point[1] == 'out' and _split_by_point[2] == 'tfevents'
train_time = int(file_name.split('-')[-1]) if is_event else None
return is_event, train_time
def _get_trained_result(path, train_time):
'''
@brief 依据提供的train_time与根目录获取相关与train_time次训练的结果内容,提供给clean_train_result等进行处理
'''
files = []
dirs = []
#
#dirs.append(os.path.join(path, subdir_base_on_train_time(path, train_time)))
#
_contents = os.listdir(path)
for _content in _contents:
matched, res = train_time_matched(train_time, _content)
dirs.append(os.path.join(path, _content)) if matched else None
pass
return dirs, files
def clean_train_result(path, train_time):
dirs, files = _get_trained_result(path, train_time)
#sync = hvd.broadcast_object(torch.BoolTensor([True]), 0, 'sync_before_checking_remove_file')
_remove = input(Fore.RED + 'remove the files: {} dir: {} (y/n):'.format(files, dirs))
[os.remove(_file) for _file in files] if _remove in ['y', 'Y'] else None
[shutil.rmtree(_dir) for _dir in dirs] if _remove in ['Y', 'y'] else None
pass
def fix_one_env_param(param):
if isinstance(param, bool):
return param
elif isinstance(param, str):
if param in ['False', 'false']:
return False
elif param in ['True', 'ture']:
return True
elif param in ['None', 'none']:
return None
elif param in ['Train', 'train', 'Evaluate', 'evaluate', 'Test', 'test']:
if param in ['Train', 'train']:
return RunStage.Train
elif param in ['Evaluate', 'evaluate']:
return RunStage.Evaluate
else:
return RunStage.Test
pass
elif param in ['Torch', 'torch']:
return 'torch'
elif param in ['tf', 'tensorflow']:
return 'tf'
else:
return param
pass
elif isinstance(param, None.__class__):
return param
else:
raise NotImplementedError('fix param with type {} is not implemented'.format(param.__class__.__name__))
pass
def fix_env_param(param):
check_multi_param = param.split('.')
if len(check_multi_param) != 1:
temp_params = []
for param in check_multi_param:
temp_params.append(fix_one_env_param(param))
pass
return temp_params
else:
return fix_one_env_param(param)
pass
def print_env_param(param, env_name):
print(Fore.GREEN + 'param: {}:{} | type: {}'.format(env_name, param, param.__class__.__name__) + Fore.RESET)
def make_sure_the_train_time(run_stage, save_dir, framework):
hvd = horovod.horovod(framework)
if run_stage == RunStage.Train:
#if hvd.rank() == 0 and args.weight_epoch is None and args.weight_epoch is None:
# print(Fore.GREEN + 'get the untrained train time is 0' + Fore.RESET)
# args.train_time = 0
# train_time_tensor = torch.IntTensor([args.train_time])
# train_time_tensor = hvd.broadcast_object(train_time_tensor, 0, 'train_time')
#elif hvd.rank() == 0 and (args.weight_path != '') and (args.weight_epoch is not None):
if hvd.rank() == 0:# and (args.weight_path != '') and (args.weight_epoch is not None):
item_list = os.listdir(save_dir)
max_time = 0
for _item in item_list:
if os.path.isdir(os.path.join(save_dir, _item)):
name_part = _item.split('-')
if name_part[-2] == 'train_time':
max_time = max(max_time, int(name_part[-1]))
train_time = max_time + 1
print(Fore.GREEN + 'get the trained train time is {}'.format(train_time) + Fore.RESET)
train_time_tensor = torch.IntTensor([train_time])
train_time_tensor = hvd.broadcast_object(train_time_tensor, 0, 'train_time')
train_time = train_time
elif hvd.rank() != 0:
print(Fore.GREEN + 'wait for the root rank share the train_time' + Fore.RESET)
train_time_tensor = torch.IntTensor([-1])
train_time_tensor = hvd.broadcast_object(train_time_tensor, 0, 'train_time')
train_time = train_time_tensor.item()
else:
raise RuntimeError('this should not happend')
pass
print(Fore.GREEN + 'rank {} final get train time: {}'.format(hvd.rank(), train_time) + Fore.RESET)
return train_time
pass
pass
|
StarcoderdataPython
|
4957989
|
import setuptools
def readme():
with open('README.md') as f:
README = f.read()
return README
setuptools.setup(
name="playment",
version="1.0.5",
description="A Python package to interact with Playment's APIs.",
long_description=readme(),
long_description_content_type="text/markdown",
url="https://github.com/crowdflux/playment-sdk-python.git",
author="Playment",
author_email="<EMAIL>",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
packages=setuptools.find_packages(),
include_package_data=True,
keywords=["playment"],
)
|
StarcoderdataPython
|
5129191
|
from django.urls import path
# 导入需要配置路径路由的文件
from . import views
urlpatterns = [
# 函数视图路由语法
# path('网络地址枕着表达式',函数视图名)
# 用户注册的地址是http://127.0.0.1/users/register/
# path('users/register/',views.register)
# 这一个是类视图,但是方法里面,只能用函数视图,所以用as_view()
path('users/register/',views.RegisterView.as_view())
]
|
StarcoderdataPython
|
310793
|
<filename>api/test/test_cli/test_nivo_record_helper/test_misc.py<gh_stars>1-10
import os
from csv import DictReader
from datetime import date
from uuid import uuid4, UUID
import pytest
import responses
from requests import HTTPError
from sqlalchemy.engine import Engine
from sqlalchemy.exc import IntegrityError
from nivo_api.cli import get_last_nivo_date, check_nivo_doesnt_exist, download_nivo
from nivo_api.cli.nivo_record_helper import (
ArchiveNivoCss,
NivoCsv,
create_new_unknown_nivo_sensor_station,
NivoDate,
)
from nivo_api.core.db.connection import connection_scope
from nivo_api.core.db.models.sql.nivo import SensorStationTable, NivoRecordTable
from nivo_api.settings import Config
from test.pytest_fixtures import database
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
class TestGetLastNivoDate:
@responses.activate
def test_wrong_date(self):
"""
it should raise in case of wrongly formated date format.
"""
responses.add(
responses.GET, Config.METEO_FRANCE_LAST_NIVO_JS_URL, body="jour=20190;"
)
with pytest.raises(ValueError) as e:
get_last_nivo_date()
@responses.activate
def test_wrong_file(self):
"""
it should raise if the regex doesn't match
"""
responses.add(
responses.GET,
Config.METEO_FRANCE_LAST_NIVO_JS_URL,
body="THIS WILL NOT MATCH",
)
with pytest.raises(AttributeError) as e:
get_last_nivo_date()
assert str(e.value) == "'NoneType' object has no attribute 'group'"
@responses.activate
def test_file_fetch_error(self):
responses.add(responses.GET, Config.METEO_FRANCE_LAST_NIVO_JS_URL, status=302)
with pytest.raises(AssertionError) as e:
get_last_nivo_date()
assert str(e.value) == "Impossible to fetch last nivo data from meteofrance url"
@responses.activate
def test_date_ok(self):
responses.add(
responses.GET, Config.METEO_FRANCE_LAST_NIVO_JS_URL, body="jour=20190101;"
)
res = get_last_nivo_date()
assert NivoDate(False, date(2019, 1, 1)) == res
class TestCheckNivoDoesntExist:
def test_check_nivo_doesnt_exist(self, database):
with connection_scope(database.engine) as con:
r = check_nivo_doesnt_exist(con, date(2019, 1, 1))
assert r is True
def _inject_test_data(self, engine: Engine):
with connection_scope(engine) as con:
nss_id = uuid4()
con.execute(
SensorStationTable.insert().values(
{
"nss_id": nss_id,
"nss_name": "test",
"nss_meteofrance_id": 1,
"the_geom": "SRID=4326;POINT(1 1 1)",
}
)
)
con.execute(
NivoRecordTable.insert().values(
{"nr_date": date(2019, 1, 1), "nr_nivo_sensor": nss_id}
)
)
def test_check_nivo_exist(self, database):
self._inject_test_data(database.engine)
with connection_scope(database.engine) as con:
r = check_nivo_doesnt_exist(con, date(2019, 1, 1))
assert r is False
class TestDownloadNivo:
@responses.activate
def test_archive_download(self, database):
url = f"{Config.METEO_FRANCE_NIVO_BASE_URL}/Archive/nivo.201701.csv.gz"
with open(os.path.join(CURRENT_DIR, "test_data/nivo.201701.csv.gz"), "rb") as f:
responses.add(
responses.GET, url, body=f.read(), content_type="application/x-gzip"
)
with connection_scope(database.engine) as con:
r = download_nivo(
NivoDate(is_archive=True, nivo_date=date(2017, 1, 1)), con
)
assert isinstance(r, ArchiveNivoCss)
assert r.nivo_date == date(2017, 1, 1)
@responses.activate
def test_recent_nivo_download(self, database):
url = f"{Config.METEO_FRANCE_NIVO_BASE_URL}/nivo.20190812.csv"
with open(os.path.join(CURRENT_DIR, "test_data/nivo.20190812.csv")) as f:
responses.add(responses.GET, url, body=f.read(), content_type="text/plain")
with connection_scope(database.engine) as con:
r = download_nivo(
NivoDate(is_archive=False, nivo_date=date(2019, 8, 12)), con
)
assert isinstance(r, NivoCsv)
assert r.nivo_date == date(2019, 8, 12)
@responses.activate
def test_download_fail(self, database):
url = f"{Config.METEO_FRANCE_NIVO_BASE_URL}/nivo.20190812.csv"
responses.add(responses.GET, url, status=503)
with connection_scope(database.engine) as con:
with pytest.raises(HTTPError):
download_nivo(
NivoDate(is_archive=False, nivo_date=date(2019, 8, 12)), con
)
class TestImportNivo:
# if this function fail we don't care. It just normalize and find the pk of the station.
def test_import_nivo(self, database):
with open(os.path.join(CURRENT_DIR, "test_data/nivo.20190812.csv")) as f:
with connection_scope(database.engine) as con:
nivo_csv = DictReader(f, delimiter=";")
n = NivoCsv(
NivoDate(is_archive=False, nivo_date=date(2019, 8, 12)), con
)
n.nivo_csv = nivo_csv
class TestCreateNewUnknownNivoSensorStation:
def test_create_new_unknown_nivo_sensor_station(self, database):
with connection_scope(database.engine) as con:
r = create_new_unknown_nivo_sensor_station(10, con)
assert isinstance(r.nss_id, UUID)
def test_create_new_sensor_station_fail(self, database):
"""
It should fail when two unknown station have the same name. non-idempotency is assumed (tech debt FTW)
"""
with connection_scope(database.engine) as con:
with pytest.raises(IntegrityError):
r = create_new_unknown_nivo_sensor_station(10, con)
assert isinstance(r.nss_id, UUID)
create_new_unknown_nivo_sensor_station(10, con)
|
StarcoderdataPython
|
9722721
|
from django.db import models
import datetime
from Accounts.models import Examinee, Examiner
# Create your models here.
# from AnswerManagement.models import ExamineeCustomAnswer
class Exam(models.Model):
examiner = models.ForeignKey(Examiner, on_delete=models.SET_NULL, null=True, default=1)
exam_code = models.IntegerField(unique=True)
exam_title = models.CharField(max_length=200)
exam_marks = models.IntegerField(null=False, default=100)
exam_date_time = models.DateTimeField()
exam_duration = models.IntegerField(null=False, default=60)
exam_question = models.FileField(upload_to='exam/questions', null=False, blank=False,
default='exam/questions/sample.pdf')
def getParticipant(self):
participant = len(AttemptedExam.objects.filter(exam_id=self.id))
return participant
def getSubmission(self):
from AnswerManagement.models import ExamineeAnswer
submissions = len(ExamineeAnswer.objects.filter(exam_id=self.id))
return submissions
def isStarted(self):
timezone = self.exam_date_time.tzinfo
now = datetime.datetime.now(timezone) + datetime.timedelta(hours=6)
if self.exam_date_time <= now:
return True
return False
def isRunning(self):
timezone = self.exam_date_time.tzinfo
mins = self.exam_duration
mins_added = datetime.timedelta(minutes=mins)
datetime_start = self.exam_date_time
future_date_and_time = datetime_start + mins_added
now = datetime.datetime.now(timezone) + datetime.timedelta(hours=6)
if now < future_date_and_time and not now < datetime_start:
# print("Running")
return True
return False
def isUpcoming(self):
timezone = self.exam_date_time.tzinfo
mins = self.exam_duration
mins_added = datetime.timedelta(minutes=mins)
datetime_start = self.exam_date_time
future_date_and_time = datetime_start + mins_added
now = datetime.datetime.now(timezone) + datetime.timedelta(hours=6)
if now < datetime_start:
# print("Running")
return True
return False
def __str__(self):
return self.exam_title
class AttemptedExam(models.Model):
exam = models.ForeignKey(Exam, on_delete=models.CASCADE, null=True)
examinee = models.ForeignKey(Examinee, on_delete=models.CASCADE)
submit = models.BooleanField(default=False, null=False, blank=False)
def hasReturnAttachment(self):
from ResultManagement.models import Result
attachment = Result.objects.filter(examinee=self.examinee, exam=self.exam)
# print("Function:", attachment[0])
if attachment:
attachment = attachment[0].attachment
if attachment:
return True,
else:
return False
@property
def ReturnAttachment(self):
from ResultManagement.models import Result
attachment = Result.objects.filter(examinee=self.examinee, exam=self.exam)
attachment_url = attachment[0].attachment.url
if self.hasReturnAttachment():
# print("URL", attachment)
return attachment_url
def __str__(self):
return str("Exam: " + self.exam.exam_title + ",Examinee: " + self.examinee.user.username)
class Question(models.Model):
marks = models.IntegerField(null=False, default=100)
time_limit = models.IntegerField(null=False, default=60)
exam = models.ForeignKey(Exam, on_delete=models.CASCADE)
def __str__(self):
return str(self.id) + " " + self.exam.exam_title
class MCQQuestion(models.Model):
question_text = models.CharField(max_length=200)
option1 = models.CharField(max_length=200)
option2 = models.CharField(max_length=200)
option3 = models.CharField(max_length=200)
option4 = models.CharField(max_length=200)
ques_marks = models.IntegerField(blank=False, null=False, default=1)
question = models.ForeignKey(Question, on_delete=models.CASCADE)
def __str__(self):
return str(self.id) + "." + str(self.question_text + "(" + self.question.exam.exam_title + ")")
class CustomQuestion(models.Model):
question_text = models.CharField(max_length=200)
ques_marks = models.IntegerField(blank=False, null=False, default=1)
question = models.ForeignKey(Question, on_delete=models.CASCADE)
def __str__(self):
return str(self.id) + "." + str(self.question_text + "(" + self.question.exam.exam_title + ")")
|
StarcoderdataPython
|
9685095
|
#!/usr/bin/python3
# Python3 script to produce torrent files from existing files and folders
# This script currently only works for archive.org releases.
# It calls 3 helper scripts.
# Import everything we will need
import os
import shutil
import logging
from internetarchive import get_item
import rcc_hardlinks
import rcc_cleanup
# import rcc_torrent
|
StarcoderdataPython
|
1762067
|
<filename>runtime/build.py
#!/usr/bin/env python
import sys
try:
from RuntimeBuilder import *
from Sim import *
except ImportError, e:
print "Couldn't find project-utils modules."
sys.exit(1)
MAXFILES = ['PacketPusher.max']
sources = ['packetpusher.c']
target = 'packetpusher'
includes = []
b = MaxRuntimeBuilder(maxfiles=MAXFILES)
s = MaxCompilerSim(dfeModel="ISCA")
e = Executor(logPrefix="[%s] " % (target))
def build():
compile()
link()
def compile():
b.slicCompile()
b.compile(sources)
def link():
b.link(sources, target)
def clean():
b.clean()
def start_sim():
s.start(netConfig=[{ 'NAME' : 'QSFP_BOT_10G_PORT1', 'TAP': '172.17.2.1', 'NETMASK' : '255.255.255.224' }])
def stop_sim():
s.stop()
def restart_sim():
s.start()
def run_sim(pcap):
build()
start_sim()
e.execCommand([ "./" + target, pcap])
e.wait()
# stop_sim()
def maxdebug():
s.maxdebug(MAXFILES)
if __name__ == '__main__':
fabricate.main()
|
StarcoderdataPython
|
3204331
|
import timeboard as tb
import datetime
import pytest
import pandas as pd
class TestVersion(object):
def test_version(self):
version = tb.read_from('VERSION.txt')
assert version == tb.__version__
class TestTBConstructor(object):
def test_tb_constructor_trivial(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[1])
assert clnd._timeline.labels.eq([1]*12).all()
assert clnd.start_time == datetime.datetime(2017, 1, 1, 0, 0, 0)
assert clnd.end_time > datetime.datetime(2017, 1, 12, 23, 59, 59)
assert clnd.end_time < datetime.datetime(2017, 1, 13, 0, 0, 0)
assert clnd.base_unit_freq == 'D'
def test_tb_constructor_empty_layout(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[],
)
assert clnd._timeline.labels.isnull().all()
assert clnd.start_time == datetime.datetime(2017, 1, 1, 0, 0, 0)
assert clnd.end_time > datetime.datetime(2017, 1, 12, 23, 59, 59)
assert clnd.end_time < datetime.datetime(2017, 1, 13, 0, 0, 0)
assert clnd.base_unit_freq == 'D'
def test_tb_constructor_empty_layout_with_default_label(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[],
default_label=100)
assert clnd._timeline.labels.eq([100]*12).all()
assert clnd.start_time == datetime.datetime(2017, 1, 1, 0, 0, 0)
assert clnd.end_time > datetime.datetime(2017, 1, 12, 23, 59, 59)
assert clnd.end_time < datetime.datetime(2017, 1, 13, 0, 0, 0)
assert clnd.base_unit_freq == 'D'
def test_tb_constructor_trivial_with_amendments(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[1],
amendments={'11 Jan 2017': 2,
'12 Jan 2017': 3})
assert clnd._timeline.labels.eq([1]*10 + [2,3]).all()
assert clnd.start_time == datetime.datetime(2017, 1, 1, 0, 0, 0)
assert clnd.end_time > datetime.datetime(2017, 1, 12, 23, 59, 59)
assert clnd.end_time < datetime.datetime(2017, 1, 13, 0, 0, 0)
assert clnd.base_unit_freq == 'D'
def test_tb_constructor_amendments_outside(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[1],
amendments={'31 Dec 2016': 2,
'12 Jan 2017': 3})
assert clnd._timeline.labels.eq([1]*11 + [3]).all()
assert clnd.start_time == datetime.datetime(2017, 1, 1, 0, 0, 0)
assert clnd.end_time > datetime.datetime(2017, 1, 12, 23, 59, 59)
assert clnd.end_time < datetime.datetime(2017, 1, 13, 0, 0, 0)
assert clnd.base_unit_freq == 'D'
def test_tb_constructor_bad_layout(self):
with pytest.raises(TypeError):
tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=1)
def test_tb_constructor_duplicate_amendments(self):
with pytest.raises(KeyError):
tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[1],
amendments={'02 Jan 2017 12:00': 2,
'02 Jan 2017 15:15': 3})
def test_tb_constructor_bad_amendments(self):
with pytest.raises(TypeError):
tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[1],
amendments=[0])
def test_tb_constructor_trivial_selector(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[0, 1, 0, 2])
sdl = clnd.default_schedule
selector = clnd.default_selector
assert selector(clnd._timeline[1])
assert [selector(x) for x in clnd._timeline.labels] == [False, True] * 6
assert (sdl.on_duty_index == [1, 3, 5, 7, 9, 11]).all()
assert (sdl.off_duty_index == [0, 2, 4, 6, 8, 10]).all()
def test_tb_constructor_trivial_custom_selector(self):
def custom_selector(x):
return x>1
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[0, 1, 0, 2],
default_selector=custom_selector)
sdl = clnd.default_schedule
selector = clnd.default_selector
assert not selector(clnd._timeline[1])
assert [selector(x) for x in clnd._timeline.labels] == [False, False,
False, True] * 3
assert (sdl.on_duty_index == [3, 7, 11]).all()
assert (sdl.off_duty_index == [0, 1, 2, 4, 5, 6, 8, 9, 10]).all()
class TestTBConstructorWithOrgs(object):
def test_tb_constructor_week5x8(self):
week5x8 = tb.Organizer(marker='W', structure=[[1, 1, 1, 1, 1, 0, 0]])
amendments = pd.Series(index=pd.date_range(start='01 Jan 2017',
end='10 Jan 2017',
freq='D'),
data=0).to_dict()
clnd = tb.Timeboard(base_unit_freq='D',
start='28 Dec 2016', end='02 Apr 2017',
layout=week5x8,
amendments=amendments)
assert clnd.start_time == datetime.datetime(2016, 12, 28, 0, 0, 0)
assert clnd.end_time > datetime.datetime(2017, 4, 2, 23, 59, 59)
assert clnd.end_time < datetime.datetime(2017, 4, 3, 0, 0, 0)
assert clnd.base_unit_freq == 'D'
assert clnd('28 Dec 2016').is_on_duty()
assert clnd('30 Dec 2016').is_on_duty()
assert clnd('31 Dec 2016').is_off_duty()
assert clnd('01 Jan 2017').is_off_duty()
assert clnd('10 Jan 2017').is_off_duty()
assert clnd('11 Jan 2017').is_on_duty()
assert clnd('27 Mar 2017').is_on_duty()
assert clnd('31 Mar 2017').is_on_duty()
assert clnd('01 Apr 2017').is_off_duty()
assert clnd('02 Apr 2017').is_off_duty()
class TestTimeboardSchedules(object):
def test_tb_add_schedule(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='31 Dec 2016', end='12 Jan 2017',
#layout=[0, 1, 0, 0, 2, 0])
layout=['O', 'A', 'O', 'O', 'B', 'O'])
assert len(clnd.schedules) == 1
assert 'on_duty' in clnd.schedules
clnd.add_schedule(name='sdl1', selector=lambda x: x == 'B')
clnd.add_schedule(name='sdl2', selector=lambda x: x == 'C')
assert len(clnd.schedules) == 3
assert 'sdl1' in clnd.schedules
sdl1 = clnd.schedules['sdl1']
assert sdl1.name == 'sdl1'
assert not sdl1.is_on_duty(1)
assert sdl1.is_on_duty(4)
assert 'sdl2' in clnd.schedules
sdl2 = clnd.schedules['sdl2']
assert sdl2.name == 'sdl2'
assert not sdl2.is_on_duty(1)
assert not sdl2.is_on_duty(4)
assert clnd.default_schedule.name == 'on_duty'
assert clnd.default_schedule.is_on_duty(1)
assert clnd.default_schedule.is_on_duty(4)
def test_tb_drop_schedule(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='31 Dec 2016', end='12 Jan 2017',
layout=[0, 1, 0, 0, 2, 0])
clnd.add_schedule(name='sdl', selector=lambda x: x > 1)
assert len(clnd.schedules) == 2
sdl = clnd.schedules['sdl']
clnd.drop_schedule(sdl)
assert len(clnd.schedules) == 1
with pytest.raises(KeyError):
clnd.schedules['sdl']
# object itself continues to exists while referenced
assert not sdl.is_on_duty(1)
def test_tb_schedule_names(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='31 Dec 2016', end='12 Jan 2017',
layout=[0, 1, 0, 0, 2, 0])
clnd.add_schedule(name=1, selector=lambda x: x > 1)
assert len(clnd.schedules) == 2
assert clnd.schedules['1'].name == '1'
with pytest.raises(KeyError):
clnd.add_schedule(name='1', selector=lambda x: x > 2)
def test_tb_bad_schedule(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='31 Dec 2016', end='12 Jan 2017',
layout=[0, 1, 0, 0, 2, 0])
with pytest.raises((ValueError, AttributeError)):
clnd.add_schedule(name='sdl', selector='selector')
with pytest.raises(TypeError):
clnd.add_schedule(name='sdl', selector=lambda x,y: x+y)
class TestTimeboardWorktime(object):
def test_tb_default_worktime_source(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='31 Dec 2016', end='12 Jan 2017',
layout=[0, 1, 0, 0, 2, 0])
assert clnd.worktime_source == 'duration'
def test_tb_set_worktime_source(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='31 Dec 2016', end='12 Jan 2017',
layout=[0, 1, 0, 0, 2, 0],
worktime_source='labels')
assert clnd.worktime_source == 'labels'
def test_tb_bad_worktime_source(self):
with pytest.raises(ValueError):
tb.Timeboard(base_unit_freq='D',
start='31 Dec 2016', end='12 Jan 2017',
layout=[0, 1, 0, 0, 2, 0],
worktime_source='bad_source')
#TODO: test timeboards with multiplied freqs
class TestTimeboardToDataFrame(object):
def test_timeboard_to_dataframe(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[0, 1, 0, 2])
clnd.add_schedule('my_schedule', lambda x: True)
df = clnd.to_dataframe()
assert len(df) == 12
# we are not hardcoding the list of columns here;
# however, there must be at least 5 columns: two showing the start
# and the end times of workshifts, one for the labels,
# and two for the schedules
assert len(list(df.columns)) >=5
assert 'my_schedule' in list(df.columns)
def test_timeboard_to_dataframe_selected_ws(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[0, 1, 0, 2])
df = clnd.to_dataframe(1, 5)
assert len(df) == 5
def test_timeboard_to_dataframe_reversed_ws(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[0, 1, 0, 2])
# This is ok. This way an empty df for a void interval is created.
df = clnd.to_dataframe(5, 1)
assert df.empty
def test_timeboard_to_dataframe_bad_locations(self):
clnd = tb.Timeboard(base_unit_freq='D',
start='01 Jan 2017', end='12 Jan 2017',
layout=[0, 1, 0, 2])
with pytest.raises(AssertionError):
clnd.to_dataframe(1, 12)
with pytest.raises(AssertionError):
clnd.to_dataframe(12, 1)
with pytest.raises(AssertionError):
clnd.to_dataframe(-1, 5)
with pytest.raises(AssertionError):
clnd.to_dataframe(5, -1)
|
StarcoderdataPython
|
1726913
|
import re
import time
from ceryle.util import StopWatch
def test_stopwatch():
sw = StopWatch()
sw.start()
str0 = sw.str_last_lap()
time.sleep(0.5)
total1, lap1 = sw.elapse()
str1 = sw.str_last_lap()
time.sleep(0.3)
total2, lap2 = sw.elapse()
str2 = sw.str_last_lap()
assert 0.5 <= total1 and total1 < 0.8
assert 0.5 <= lap1 and lap1 < 0.6
assert 0.8 <= total2 and total2 < 1.0
assert 0.3 <= lap2 and lap2 < 0.4
print(str0, str1, str2)
assert re.match(r'^00:00.000 \(00:00\.000\)$', str0) is not None
assert re.match(r'^00:00\.5[\d]{2} \(00:00\.5[\d]{2}\)$', str1) is not None
assert re.match(r'^00:00\.3[\d]{2} \(00:00\.8[\d]{2}\)$', str2) is not None
|
StarcoderdataPython
|
62574
|
<filename>ch1/2.py
import random
secret = random.randint(1, 99)
guess = 0
tries = 0
print("嘿,我是海盗王,我有个秘密")
print("秘密是1到99间的一个数,给你6次机会猜哦")
while guess != secret and tries <6:
guess = int(input("输入你猜的数:"))
if guess < secret:
print("太小了,笨蛋")
elif guess > secret:
print("太大了,呆子")
tries = tries +1
if guess == secret:
print("你真厉害,猜对了!")
else:
print("你没机会了,下次好运!")
print("我的秘密是:", secret)
|
StarcoderdataPython
|
160971
|
from collections.abc import Collection
from inspect import getmembers
from itertools import starmap
from typing import Any
from graphql import print_schema
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import as_declarative
from apischema import Undefined, deserialize, serialize
from apischema.graphql import graphql_schema
from apischema.json_schema import deserialization_schema
from apischema.objects import ObjectField, set_object_fields
def column_field(name: str, column: Column) -> ObjectField:
required = False
default: Any = ...
if column.default is not None:
default = column.default
elif column.server_default is not None:
default = Undefined
elif column.nullable:
default = None
else:
required = True
col_type = column.type.python_type
if column.nullable:
col_type = col_type | None
return ObjectField(column.name or name, col_type, required, default=default)
# Very basic SQLAlchemy support
@as_declarative()
class Base:
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
columns = getmembers(cls, lambda m: isinstance(m, Column))
if not columns:
return
set_object_fields(cls, starmap(column_field, columns))
class Foo(Base):
__tablename__ = "foo"
bar = Column(Integer, primary_key=True)
baz = Column(String)
foo = deserialize(Foo, {"bar": 0})
assert isinstance(foo, Foo)
assert foo.bar == 0
assert serialize(Foo, foo) == {"bar": 0, "baz": None}
assert deserialization_schema(Foo) == {
"$schema": "http://json-schema.org/draft/2020-12/schema#",
"type": "object",
"properties": {
"bar": {"type": "integer"},
"baz": {"type": ["string", "null"], "default": None},
},
"required": ["bar"],
"additionalProperties": False,
}
def foos() -> Collection[Foo] | None:
...
schema = graphql_schema(query=[foos])
schema_str = """\
type Query {
foos: [Foo!]
}
type Foo {
bar: Int!
baz: String
}"""
assert print_schema(schema) == schema_str
|
StarcoderdataPython
|
4818302
|
name = 'adeline'
|
StarcoderdataPython
|
5011883
|
<filename>compare_variant_file_to_reference.py
#
# Copyright (c) 2018 <NAME>
# This code is licensed under MIT license (see LICENSE for details).
#
import argparse
from Bio import SeqIO
import sys
import vcf
if __name__ == "__main__":
# Parse the command line arguments.
parser = argparse.ArgumentParser(description = "Check the REF column of a variant file.")
parser.add_argument('--vcf', type = argparse.FileType('r'), help = "Variant file")
parser.add_argument('--reference', type = argparse.FileType('r'), help = "Reference FASTA")
parser.add_argument('--chr', type = str, required = False, default = None, help = "Filter by chromosome")
args = parser.parse_args()
# Take the first sequence.
fasta_record = next(SeqIO.parse(args.reference, format = "fasta"))
print("Using sequence with ID '%s' as the reference." % fasta_record.id, file = sys.stderr)
reference_seq = fasta_record.seq
vcf_reader = vcf.Reader(args.vcf)
for row in vcf_reader:
if not (args.chr is None or row.CHROM == args.chr):
continue
pos = row.POS
actual = str(row.REF).upper()
expected = str(reference_seq[pos - 1 : pos - 1 + len(actual)]).upper()
if actual != expected:
print("Mismatch at VCF position %d: expected '%s', got '%s'" % (pos, expected, actual), file = sys.stderr)
|
StarcoderdataPython
|
270072
|
from django.contrib import admin
from .models import *
admin.site.register(Doctor)
admin.site.register(Patient)
admin.site.register(Pharmacist)
admin.site.register(Pathologist)
admin.site.register(Appointment)
admin.site.register(Medecine)
# ----------------------------------------------------
# pharmacy registrations
admin.site.register(specificproducts)
admin.site.register(allproducts)
# admin.site.register(Pharmacist)
admin.site.register(Order)
admin.site.register(Cart)
admin.site.register(WalkinOrder)
admin.site.register(WalkinCart)
# pharmacy Registration over
# ----------------------------------------------------
#----------------------------------------------------
#pathology registrations
admin.site.register(labtest)
admin.site.register(BookTest)
admin.site.register(Tests)
admin.site.register(AnonyTests)
admin.site.register(AddTests)
#pathology Registration over
#----------------------------------------------------
|
StarcoderdataPython
|
8178495
|
<gh_stars>1-10
ami_filters = {
'id': 'image-id',
'name': 'name',
'architecture': 'architecture',
'platform': 'platform',
'owner': 'owner-id',
'public': 'is-public',
'state': 'state',
}
ami_distributions = {
'ubuntu': 'ubuntu/images/hvm-ssd/ubuntu-*-*{version}*-amd64-server-*',
'windows': 'Windows_Server-*{version}*-English-*-Base-20*.*.*',
'amazon': 'amzn-ami-hvm-20*.*.*-x86_64-*',
}
distrib_amis = {
'ubuntu': 'ami-f90a4880',
'windows': 'ami-b5530b5e',
'redhat': 'ami-c86c3f23',
}
def _extract_amis(self, filters=[], regions=[], return_first=False):
filters.append({'Name': 'state', 'Values': ['available', 'pending']})
# Just supported x64 OS
filters.append({'Name': 'architecture', 'Values': ['x86_64']})
filters.append({'Name': 'hypervisor', 'Values': ['xen']})
filters.append({'Name': 'virtualization-type', 'Values': ['hvm']})
filters.append({'Name': 'image-type', 'Values': ['machine']})
filters.append({'Name': 'root-device-type', 'Values': ['ebs']})
curRegion = self.region
regions = self.parse_regions(regions)
results = list()
for region in regions:
self.change_region(region['RegionName'])
amis = self.client.describe_images(Filters=filters)['Images']
amis = self.inject_client_vars(amis)
if return_first and amis:
self.change_region(curRegion)
return amis[0]
results.extend(amis)
self.change_region(curRegion)
return results
def get_amis_by_distribution(self, distrib, version='*', latest=False, regions=[]):
'''
Get one or more Images filtering by distribution
Args:
distrib (str): Distribution of the image (i.e.: ubuntu)
version (str): Version of the system
latest (bool): True if only returns the newest item.
Return:
Image (lst): List with the images requested.
'''
self.validate_filters(distrib, self.ami_distributions.keys())
filters = [
{'Name': 'name', 'Values': [self.ami_distributions[distrib].format(version=version)]},
{'Name': 'is-public', 'Values': ['true']}
]
results = self._extract_amis(filters=filters, regions=regions)
results = sorted(results, key=lambda k: k['Name'])
if latest and results:
return [results[-1]]
return results
def get_ami_by(self, filters, regions=[]):
'''
Get an ami for one or more regions that matches with filter
Args:
filter_key (str): Name of the filter
filter_value (str): Value of the filter
regions (lst): Regions where to look for this element
Return:
Image (dict): Image requested
'''
return self.get_amis_by(filters=filters,
regions=regions,
return_first=True)
def get_amis_by(self, filters, regions=[], return_first=False):
'''
Get list of amis for one or more regions that matches with filter
Args:
filter_key (str): Name of the filter
filter_value (str): Value of the filter
regions (lst): Regions where to look for this element
return_first (bool): True if return first result
Return:
Images (lst): List of requested images
'''
formatted_filters = self.validate_filters(filters, self.ami_filters)
return self._extract_amis(filters=formatted_filters, regions=regions, return_first=return_first)
def get_amis(self, regions=[]):
'''
Get all images
Args:
regions (lst): Regions where to look for this element
Returns:
Images (lst): List of all images
'''
return self._extract_amis(regions=regions)
|
StarcoderdataPython
|
8079769
|
<filename>services/consuming_services_apis/consuming_services_apis/views.py
from pyramid.view import view_config
@view_config(route_name='home', renderer='templates/index.pt')
def my_view(_):
return {}
|
StarcoderdataPython
|
6674364
|
a = 1; b = 2 # a note about b
|
StarcoderdataPython
|
1984709
|
<filename>auto_label.py
import sys
import argparse
from yolo_autolabel import YOLO, detect_video
from PIL import Image
import os
import glob
from xml.dom.minidom import Document
import numpy as np
def make_xml(boxes, classes, imagesize, name, class_name):
doc = Document() # 创建DOM文档对象
DOCUMENT = doc.createElement('annotation') # 创建根元素
doc.appendChild(DOCUMENT)
#folder
folder = doc.createElement('folder')
DOCUMENT.appendChild(folder)
folder_txt = doc.createTextNode('0')
folder.appendChild(folder_txt)
filename = doc.createElement('filename')
DOCUMENT.appendChild(filename)
filename_txt = doc.createTextNode(name+'.jpg')
filename.appendChild(filename_txt)
path = doc.createElement('path')
DOCUMENT.appendChild(path)
path_txt = doc.createTextNode(name + '.jpg')
path.appendChild(path_txt)
#source
source = doc.createElement('source')
DOCUMENT.appendChild(source)
database = doc.createElement('database')
source.appendChild(database)
database_txt = doc.createTextNode('Unknown')
database.appendChild(database_txt)
#size
size = doc.createElement('size')
DOCUMENT.appendChild(size)
width = doc.createElement('width')
size.appendChild(width)
width_txt = doc.createTextNode(str(imagesize[0]))
width.appendChild(width_txt)
heigth = doc.createElement('height')
size.appendChild(heigth)
heigth_txt = doc.createTextNode(str(imagesize[1]))
heigth.appendChild(heigth_txt)
depth = doc.createElement('depth')
size.appendChild(depth)
depth_txt = doc.createTextNode('3')
depth.appendChild(depth_txt)
#segmented
segmented = doc.createElement('segmented')
DOCUMENT.appendChild(segmented)
segmented_txt = doc.createTextNode('0')
segmented.appendChild(segmented_txt)
for i, c in reversed(list(enumerate(classes))):
object = doc.createElement('object')
DOCUMENT.appendChild(object)
#name
classname = doc.createElement('name')
object.appendChild(classname)
classname_txt = doc.createTextNode(class_name[c])
classname.appendChild(classname_txt)
#pose
pose = doc.createElement('pose')
object.appendChild(pose)
pose_txt = doc.createTextNode('Unspecified')
pose.appendChild(pose_txt)
# truncated
truncated = doc.createElement('truncated')
object.appendChild(truncated)
truncated_txt = doc.createTextNode('0')
truncated.appendChild(truncated_txt)
# difficult
difficult = doc.createElement('difficult')
object.appendChild(difficult)
difficult_txt = doc.createTextNode('0')
difficult.appendChild(difficult_txt)
bndbox = doc.createElement('bndbox')
object.appendChild(bndbox)
box = boxes[i]
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(imagesize[1], np.floor(bottom + 0.5).astype('int32'))
right = min(imagesize[0], np.floor(right + 0.5).astype('int32'))
xmin = doc.createElement('xmin')
bndbox.appendChild(xmin)
xmin_txt = doc.createTextNode(str(left))
xmin.appendChild(xmin_txt)
ymin = doc.createElement('ymin')
bndbox.appendChild(ymin)
ymin_txt = doc.createTextNode(str(top))
ymin.appendChild(ymin_txt)
xmax = doc.createElement('xmax')
bndbox.appendChild(xmax)
xmax_txt = doc.createTextNode(str(right))
xmax.appendChild(xmax_txt)
ymax = doc.createElement('ymax')
bndbox.appendChild(ymax)
ymax_txt = doc.createTextNode(str(bottom))
ymax.appendChild(ymax_txt)
return doc
def detect_img_write_xml(yolo):
path = "/home/lzb/remotedata/yolov3/VOCdevkit/VOC2007/SegmentationObject/*.jpg"
outdir = "/home/lzb/remotedata/yolov3/VOCdevkit/VOC2007/SegmentationClass"
for jpgfile in glob.glob(path):
name = jpgfile.split('/')[-1].split('.')[0]
img = Image.open(jpgfile)
boxes, classes, imagesize, class_name = yolo.detect_image(img)
doc = make_xml(boxes, classes, imagesize, name, class_name)
with open(outdir+'/'+name+'.xml', 'w') as f:
doc.writexml(f,indent = '\t',newl = '\n', addindent = '\t',encoding='utf-8')
yolo.close_session()
FLAGS = None
if __name__ == '__main__':
# class YOLO defines the default value, so suppress any default here
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
'''
Command line options
'''
parser.add_argument(
'--model', type=str,
help='path to model weight file, default ' + YOLO.get_defaults("model_path")
)
parser.add_argument(
'--anchors', type=str,
help='path to anchor definitions, default ' + YOLO.get_defaults("anchors_path")
)
parser.add_argument(
'--classes', type=str,
help='path to class definitions, default ' + YOLO.get_defaults("classes_path")
)
parser.add_argument(
'--gpu_num', type=int,
help='Number of GPU to use, default ' + str(YOLO.get_defaults("gpu_num"))
)
parser.add_argument(
'--image', default=False, action="store_true",
help='Image detection mode, will ignore all positional arguments'
)
'''
Command line positional arguments -- for video detection mode
'''
parser.add_argument(
"--input", nargs='?', type=str,required=False,default='./path2your_video',
help = "Video input path"
)
parser.add_argument(
"--output", nargs='?', type=str, default="",
help = "[Optional] Video output path"
)
FLAGS = parser.parse_args()
if FLAGS.image:
"""
Image detection mode, disregard any remaining command line arguments
"""
print("Image detection mode")
if "input" in FLAGS:
print(" Ignoring remaining command line arguments: " + FLAGS.input + "," + FLAGS.output)
detect_img_write_xml(YOLO(**vars(FLAGS)))
print('Start write info to xml...')
elif "input" in FLAGS:
detect_video(YOLO(**vars(FLAGS)), FLAGS.input, FLAGS.output)
else:
print("Must specify at least video_input_path. See usage with --help.")
|
StarcoderdataPython
|
7567
|
from pydantic import BaseModel
from tracardi.domain.entity import Entity
from tracardi.domain.scheduler_config import SchedulerConfig
from tracardi.domain.resource import ResourceCredentials
from tracardi.service.storage.driver import storage
from tracardi.service.plugin.runner import ActionRunner
from tracardi.service.plugin.domain.register import Plugin, Spec, MetaData, Form, FormGroup, FormField, FormComponent
from tracardi.service.plugin.domain.result import Result
class Configuration(BaseModel):
source: Entity
event_type: str
properties: str = "{}"
postpone: str
def validate(config: dict) -> Configuration:
return Configuration(**config)
class SchedulerPlugin(ActionRunner):
@staticmethod
async def build(**kwargs) -> 'SchedulerPlugin':
config = validate(kwargs)
resource = await storage.driver.resource.load(config.source.id)
plugin = SchedulerPlugin(config, resource.credentials)
return plugin
def __init__(self, config: Configuration, credentials: ResourceCredentials):
self.config = config
self.credentials = credentials.get_credentials(
self,
output=SchedulerConfig) # type: SchedulerConfig
async def run(self, payload):
run_in_background = True
if not run_in_background:
return Result(port="response", value=None)
else:
return Result(port="response", value=None)
def register() -> Plugin:
return Plugin(
start=False,
spec=Spec(
module='tracardi.process_engine.action.v1.pro.scheduler.plugin',
className='SchedulerPlugin',
inputs=["payload"],
outputs=['response', 'error'],
version='0.6.2',
license="MIT",
author="<NAME>",
init= {
"source": {
"id": ""
},
"event_type": "",
"properties": "{}",
"postpone": "+1m"
}
),
metadata=MetaData(
name='Schedule event',
desc='This plugin schedules events',
icon='calendar',
group=["Time"],
tags=["Pro", "Scheduler"],
pro=True,
)
)
|
StarcoderdataPython
|
313450
|
<reponame>yeyeto2788/foldercompare
"""Test the foldercompare.py module."""
import filecmp
import os
import shutil
import unittest
import foldercompare
class TestRecursiveDircmpReport(unittest.TestCase):
"""Test the _recursive_dircmp function."""
def setUp(self):
"""Create two folders for testing."""
self.folder1 = os.path.join('tests', 'results1')
self.folder2 = os.path.join('tests', 'results2')
os.mkdir(self.folder1)
os.mkdir(self.folder2)
@unittest.skip('Fails half the time -- enough to avoid same_files feature')
def test_dircmp_diff_files_accuracy(self):
"""Different files are identified as such using filecmp.dircmp()"""
file1 = os.path.join(self.folder1, 'hello_world.txt')
with open(file1, 'w') as file:
file.write('foo')
file2 = os.path.join(self.folder2, 'hello_world.txt')
with open(file2, 'w') as file:
file.write('bar')
comparison = filecmp.dircmp(self.folder1, self.folder2)
self.assertTrue(comparison.diff_files == ['hello_world.txt'])
self.assertTrue(comparison.same_files == [])
def test_file_in_both(self):
"""Classifies two identical files as the same."""
file1 = os.path.join(self.folder1, 'hello_world.txt')
with open(file1, 'w') as file:
file.write('hello world')
file2 = os.path.join(self.folder2, 'hello_world.txt')
with open(file2, 'w') as file:
file.write('hello world')
report = foldercompare._recursive_dircmp(self.folder1, self.folder2)
expected = {'both': ['./hello_world.txt'], 'right': [], 'left': []}
self.assertEqual(report, expected)
def test_file_only_in_left(self):
"""Classifies file only in one directory."""
file1 = os.path.join(self.folder1, 'hello_world.txt')
with open(file1, 'w') as file:
file.write('hello world')
report = foldercompare._recursive_dircmp(self.folder1, self.folder2)
expected = {'left': ['./hello_world.txt'], 'right': [], 'both': []}
self.assertEqual(report, expected)
def test_subdirectory_only_in_left(self):
"""Classifies subdirectory with file only in left folder."""
subdir1 = os.path.join(self.folder1, 'subdir')
os.mkdir(subdir1)
file1 = os.path.join(subdir1, 'hello_world.txt')
with open(file1, 'w') as file:
file.write('hello world')
report = foldercompare._recursive_dircmp(self.folder1, self.folder2)
expected = {'left': ['./subdir'], 'right': [], 'both': []}
self.assertEqual(report, expected)
def test_subdir_file_only_in_left(self):
"""Classifies file only in one subdirectory."""
subdir1 = os.path.join(self.folder1, 'subdir')
os.mkdir(subdir1)
subdir2 = os.path.join(self.folder2, 'subdir')
os.mkdir(subdir2)
file1 = os.path.join(subdir1, 'hello_world.txt')
with open(file1, 'w') as file:
file.write('hello world')
report = foldercompare._recursive_dircmp(self.folder1, self.folder2)
expected = {'left': ['./subdir/hello_world.txt'], 'right': [], 'both': []}
self.assertEqual(report, expected)
def tearDown(self):
"""Delete test folders after each run."""
shutil.rmtree(self.folder1)
shutil.rmtree(self.folder2)
class TestCompareRegression(unittest.TestCase):
"""Test the compare function with known inputs and expected outputs."""
def setUp(self):
"""Set test input data and expected control outputs."""
self.folder1 = os.path.join('.', 'tests', 'control_data_1', 'Data Folder')
self.folder2 = os.path.join('.', 'tests', 'control_data_2', 'Data Folder')
self.resultfile = os.path.join('tests', 'results')
self.controlresults = os.path.join('tests', 'control_results')
def test_create_txt(self):
"""Can create a single TXT file, identical to the control."""
foldercompare.compare(self.folder1, self.folder2,
self.resultfile, output_txt=True)
result = filecmp.cmp(self.resultfile + '.txt',
self.controlresults + '.txt',
shallow=False)
self.assertTrue(result)
def test_create_csv(self):
"""Can create a single CSV file, identical to the control."""
foldercompare.compare(self.folder1, self.folder2,
self.resultfile, output_csv=True)
result = filecmp.cmp(self.resultfile + '.csv',
self.controlresults + '.csv',
shallow=False)
self.assertTrue(result)
def test_create_both(self):
"""Can create both files at once, identical to the control."""
foldercompare.compare(self.folder1, self.folder2, self.resultfile,
output_txt=True, output_csv=True)
result = filecmp.cmp(self.resultfile + '.txt',
self.controlresults + '.txt',
shallow=False)
self.assertTrue(result)
result = filecmp.cmp(self.resultfile + '.csv',
self.controlresults + '.csv',
shallow=False)
self.assertTrue(result)
def tearDown(self):
"""Delete test files after each run."""
try:
os.remove(self.resultfile + '.txt')
except FileNotFoundError:
pass
try:
os.remove(self.resultfile + '.csv')
except FileNotFoundError:
pass
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
9750186
|
<filename>QuickDraw.py
import random
import math
print("")
print("'WELCOME TO QUICKDRAW'")
print("")
Hand = []
DiscardPile = []
CardsDeck = [
"Ace of Spades",
"Ace of Hearts",
"Ace of Clubs",
"Ace of Diamonds",
"Deuce of Spades",
"Deuce of Hearts",
"Deuce of Clubs",
"Deuce of Diamonds",
"Three of Spades",
"Three of Hearts",
"Three of Clubs",
"Three of Diamonds",
"Four of Spades",
"Four of Hearts",
"Four of Clubs",
"Four of Diamonds",
"Five of Spades",
"Five of Hearts",
"Five of Clubs",
"Five of Diamonds",
"Six of Spades",
"Six of Hearts",
"Six of Clubs",
"Six of Diamonds",
"Seven of Spades",
"Seven of Hearts",
"Seven of Clubs",
"Seven of Diamonds",
"Eight of Spades",
"Eight of Hearts",
"Eight of Clubs",
"Eight of Diamonds",
"Nine of Spades",
"Nine of Hearts",
"Nine of Clubs",
"Nine of Diamonds",
"Ten of Spades",
"Ten of Hearts",
"Ten of Clubs",
"Ten of Diamonds",
"Jack of Spades",
"Jack of Hearts",
"Jack of Clubs",
"Jack of Diamonds",
"Queen of Spades",
"Queen of Hearts",
"Queen of Clubs",
"Queen of Diamonds",
"King of Spades",
"King of Hearts",
"King of Clubs",
"King of Diamond",
]
print("Cards and Their Points")
print("")
print("Ace = 1 Point. Deuce = 2 Points. Numbered Cards = Points By Number. Jack = 11 Points. Queen = 12 Points. King = 13 Points.")
print("")
print("How to Win:")
print("")
print("Two Ways To Win!! 1st Winner is Declared by Traditional Poker Rules. 2nd Winner is Declared by Most Points Held. Winners split the earnings.")
print("")
print("How to Play:")
print("")
print("Begin with 5 Cards. You may pay a chip for a new deal, twice. You may pay a chip for up to 6 New Cards, drawn one at a time, taking turns. Each turn you may pay any number of chips for an equal number of cards from which you choose only one. The last round is the 'Draw' round. Bet chips to make your opponent fold. Fold to protect yourself from losing chips. ")
print("")
Shuffled = CardsDeck
print("Please shuffle the deck...")
#user input
#insert delay
print("")
print("'Thank you!'")
#print("'Referee Check!'") # for testing
#Referee = print(Shuffled) # for testing
print("")
print("Let's Deal, Shall We?")
#user input
#insert delay
print("")
TriesLeft = 3
def Shuffle():
random.shuffle(Shuffled)
Shuffle()
def Deal():
NewList = Shuffled
global TriesLeft
TriesLeft = TriesLeft
global Hand
Hand = Hand
global DiscardPile
DiscardPile= DiscardPile
Hand.append(NewList[0])
Hand.append(NewList[1])
Hand.append(NewList[2])
Hand.append(NewList[3])
Hand.append(NewList[4])
NewList.pop(0)
NewList.pop(0)
NewList.pop(0)
NewList.pop(0)
NewList.pop(0)
Shuffle()
#print(len(Hand))
print("")
print(Hand)
global Like
Like = False
print(f"Tries Left = {TriesLeft -1}")
TriesLeft -=1
if TriesLeft >=1:
print("Do You Like This Hand?")
#user input
print("Look at your Hand")
Deal()
if TriesLeft >= 0:
Like = True
print("")
def Reconstitute():
DiscardPile.append(Hand[0:5])
Hand.pop(0)
Hand.pop(0)
Hand.pop(0)
Hand.pop(0)
Hand.pop(0)
if TriesLeft >=0:
Reconstitute()
Deal()
#print(len(Hand))
print("Deal Again")
Reconstitute()
Deal()
elif TriesLeft < 0:
print("")
DiscardPile.append(Hand[0:5])
Hand.pop(0)
Hand.pop(0)
Hand.pop(0)
Hand.pop(0)
Hand.pop(0)
print("Deal Again")
print(Hand)
else:
print("This is Your Hand")
print("")
print("Choose your way to win. Points or Poker.")
print("")
print("Draw!!!")
print("")
Thumb = Hand[0]
Pointer = Hand[1]
Middle = Hand[2]
Ring = Hand[3]
Pinky = Hand[4]
YourFive = [Thumb, Pointer, Middle, Ring, Pinky]
DuplicateCheck = set(YourFive)
print("Testing Card Positions")
print(YourFive)
print("")
print("Reference: Script Line 118")
print("")
#insert user input ThumbDiscard
DiscardThumb = Hand.pop(0)
if DiscardThumb:
Shuffled.pop(0), Hand.insert(0, Shuffled[0])
print("'Testing Thumb Discard'")
print(Hand)
print("")
#insert user input PointerDiscard
DiscardPointer = Hand.pop(1)
if DiscardPointer:
Shuffled.pop(0), Hand.insert(1, Shuffled[0])
print("'Testing for Duplicates in Pointer'")
print(Hand)
#insert user input MiddleDiscard
DiscardMiddle = Hand.pop(2)
if DiscardMiddle:
Shuffled.pop(0), Hand.insert(2, Shuffled[0])
#insert user input RingDiscard
DiscardRing = Hand.pop(3)
if DiscardRing:
Shuffled.pop(0), Hand.insert(3, Shuffled[0])
#insert user input PinkyDiscard
DiscardPinky = Hand.pop(4)
if DiscardPinky:
Shuffled.pop(0), Hand.insert(4, Shuffled[0])
print("")
print("Testing The Rest")
print(Hand)
print("")
print("Testing Remaining Deck")
print(Shuffled)
print("")
print("Testing Discard Pile")
print(DiscardPile)
|
StarcoderdataPython
|
351815
|
<gh_stars>0
# Draw a square
import turtle as t
for i in range(4):
t.forward(100)
t.left(90)
|
StarcoderdataPython
|
1938122
|
<filename>tests/test_marmiton.py
from recipe_scrapers.marmiton import Marmiton
from tests import ScraperTest
class TestMarmitonScraper(ScraperTest):
scraper_class = Marmiton
def test_host(self):
self.assertEqual("marmiton.org", self.harvester_class.host())
def test_canonical_url(self):
self.assertEqual(
"https://www.marmiton.org/recettes/recette_ratatouille_23223.aspx",
self.harvester_class.canonical_url(),
)
def test_title(self):
self.assertEqual(self.harvester_class.title(), "Ratatouille")
def test_total_time(self):
self.assertEqual(80, self.harvester_class.total_time())
def test_yields(self):
self.assertEqual("4 personnes", self.harvester_class.yields())
def test_ingredients(self):
self.assertCountEqual(
[
"350 g d'aubergine",
"350 g de courgette",
"350 g de poivron de couleur rouge et vert",
"350 g d'oignon",
"500 g de tomate bien mûres",
"3 gousses d'ail",
"6 cuillères à soupe d'huile d'olive",
"1 brin de thym",
"1 feuille de laurier",
"poivre",
"sel",
],
self.harvester_class.ingredients(),
)
def test_instructions(self):
return self.assertEqual(
"Coupez les tomates pelées en quartiers,\n"
"les aubergines et les courgettes en rondelles.\n"
"Emincez les poivrons en lamelles\n"
"et l'oignon en rouelles.\n"
"Chauffez 2 cuillères à soupe d'huile dans une poêle\n"
"et faites-y fondre les oignons et les poivrons.\n"
"Lorsqu'ils sont tendres, ajoutez les tomates, l'ail haché, le thym et le laurier.\n"
"Salez, poivrez et laissez mijoter doucement à couvert durant 45 minutes.\n"
"Pendant ce temps, préparez les aubergines et les courgettes. "
"Faites les cuire séparemment ou non dans l'huile d'olive pendant 15 minutes.\n"
"Vérifiez la cuisson des légumes pour qu'ils ne soient plus fermes. "
"Ajoutez les alors au mélange de tomates et prolongez la cuisson sur tout petit feu pendant 10 min.\n"
"Salez et poivrez si besoin.",
self.harvester_class.instructions(),
)
def test_ratings(self):
self.assertEqual(4.8, self.harvester_class.ratings())
|
StarcoderdataPython
|
3310559
|
from app.tests.v1 import utils
test_utils = utils.Utils()
def test_user_register(client):
''' Test user registration '''
response = client.post('api/v1/auth/user/register', json=test_utils.USER)
json_data = response.get_json()
assert response.status_code == 201
assert json_data['status'] == 201
assert isinstance(json_data['data'], list)
assert json_data['data'][0]['message'] == 'User registered successfully'
def test_user_register_without_email(client):
''' Test user registration without email '''
data = {
'firstname': test_utils.USER['firstname'],
'lastname': test_utils.USER['lastname'],
'othername': test_utils.USER['othername'],
'email': '',
'phone_number': test_utils.USER['phone_number'],
'is_admin': False,
'is_politician': test_utils.USER['is_politician'],
'password': test_utils.USER['password'],
}
response = client.post('api/v1/auth/user/register', json=data)
json_data = response.get_json()
assert response.status_code == 400
assert json_data['status'] == 400
assert isinstance(json_data['error'], str)
assert json_data['error'] == 'Please provide your email'
def test_user_register_existing_email(client):
''' Test user registration with existing email '''
data = {
'firstname': test_utils.USER['firstname'],
'lastname': test_utils.USER['lastname'],
'othername': test_utils.USER['othername'],
'email': test_utils.USER['email'],
'phone_number': test_utils.USER['phone_number'],
'is_admin': False,
'is_politician': test_utils.USER['is_politician'],
'password': test_utils.USER['password'],
}
client.post('api/v1/auth/user/register', json=test_utils.USER)
response = client.post('api/v1/auth/user/register', json=data)
json_data = response.get_json()
assert response.status_code == 409
assert json_data['status'] == 409
assert isinstance(json_data['error'], str)
assert json_data['error'] == 'User already exists'
def test_user_register_taken_othername(client):
''' Test user registration with taken othername '''
data = {
'firstname': test_utils.USER['firstname'],
'lastname': test_utils.USER['lastname'],
'othername': test_utils.USER['othername'],
'email': '<EMAIL>',
'phone_number': test_utils.USER['phone_number'],
'is_admin': False,
'is_politician': test_utils.USER['is_politician'],
'password': <PASSWORD>['password'],
}
client.post('api/v1/auth/user/register', json=test_utils.USER)
response = client.post('api/v1/auth/user/register', json=data)
json_data = response.get_json()
assert response.status_code == 409
assert json_data['status'] == 409
assert isinstance(json_data['error'], str)
assert json_data['error'] == 'The othername you chose is taken'
def test_user_register_invalid_email(client):
''' Test user registration with invalid email '''
data = {
'firstname': test_utils.USER['firstname'],
'lastname': test_utils.USER['lastname'],
'othername': test_utils.USER['othername'],
'email': 'emailaddress',
'phone_number': test_utils.USER['phone_number'],
'is_admin': False,
'is_politician': test_utils.USER['is_politician'],
'password': <PASSWORD>['password'],
}
response = client.post('api/v1/auth/user/register', json=data)
json_data = response.get_json()
assert response.status_code == 400
assert json_data['status'] == 400
assert isinstance(json_data['error'], str)
assert json_data['error'] == 'email is invalid'
def test_user_register_invalid_number(client):
''' Test user registration with invalid number '''
data = {
'firstname': test_utils.USER['firstname'],
'lastname': test_utils.USER['lastname'],
'othername': test_utils.USER['othername'],
'email': test_utils.USER['email'],
# phone number should be 12 digits
'phone_number': '234435',
'is_admin': False,
'is_politician': test_utils.USER['is_politician'],
'password': test_utils.USER['password'],
}
response = client.post('api/v1/auth/user/register', json=data)
json_data = response.get_json()
assert response.status_code == 400
assert json_data['status'] == 400
assert isinstance(json_data['error'], str)
assert json_data['error'] == 'phone_number is invalid'
def test_user_register_invalid_boolean_is_admin(client):
''' Test user registration with invalid type of is_admin '''
data = {
'firstname': test_utils.USER['firstname'],
'lastname': test_utils.USER['lastname'],
'othername': test_utils.USER['othername'],
'email': test_utils.USER['email'],
'phone_number': test_utils.USER['phone_number'],
'is_admin': 'False',
'is_politician': test_utils.USER['is_politician'],
'password': <PASSWORD>['password'],
}
response = client.post('api/v1/auth/user/register', json=data)
json_data = response.get_json()
assert response.status_code == 400
assert json_data['status'] == 400
assert isinstance(json_data['error'], str)
assert json_data['error'] == 'is_admin needs to be boolean'
def test_user_register_invalid_boolean_is_politician(client):
''' Test user registration with invalid type of is_politician '''
data = {
'firstname': test_utils.USER['firstname'],
'lastname': test_utils.USER['lastname'],
'othername': test_utils.USER['othername'],
'email': test_utils.USER['email'],
'phone_number': test_utils.USER['phone_number'],
'is_admin': test_utils.USER['is_admin'],
'is_politician': 'False',
'password': test_utils.USER['password'],
}
response = client.post('api/v1/auth/user/register', json=data)
json_data = response.get_json()
assert response.status_code == 400
assert json_data['status'] == 400
assert isinstance(json_data['error'], str)
assert json_data['error'] == 'is_politician needs to be boolean'
def test_user_register_with_empty_payload(client):
''' Test user registration with empty payload '''
data = {
}
response = client.post('api/v1/auth/user/register', json=data)
json_data = response.get_json()
assert response.status_code == 400
assert json_data['status'] == 400
assert isinstance(json_data['error'], str)
def test_user_register_with_no_payload(client):
''' Test user registration with no payload '''
response = client.post('api/v1/auth/user/register')
json_data = response.get_json()
assert response.status_code == 400
assert json_data['status'] == 400
assert isinstance(json_data['error'], str)
def test_admin_register(client):
''' Test admin registration '''
response = client.post('api/v1/auth/user/register', json=test_utils.ADMIN)
json_data = response.get_json()
assert response.status_code == 201
assert json_data['status'] == 201
assert isinstance(json_data['data'], list)
assert json_data['data'][0]['message'] == 'User registered successfully'
def test_politician_register(client):
''' Test politician registration '''
response = client.post('api/v1/auth/user/register', json=test_utils.POLITICIAN)
json_data = response.get_json()
assert response.status_code == 201
assert json_data['status'] == 201
assert isinstance(json_data['data'], list)
assert json_data['data'][0]['message'] == 'User registered successfully'
def test_user_login(client):
''' Test user login '''
test_utils.register_user(client, 'user')
data = {
'email': test_utils.USER['email'],
'password': test_utils.USER['password']
}
response = client.post('api/v1/auth/user/login', json=data)
json_data = response.get_json()
assert response.status_code == 200
assert json_data['status'] == 200
assert isinstance(json_data['data'], list)
assert json_data['data'][0]['message'] == 'Successfull log in'
assert json_data['data'][0]['auth_token'] is not None
def test_user_login_invalid_credentials(client):
''' Test user login with invalid credentials '''
test_utils.register_user(client, 'user')
data = {
'email': test_utils.USER['email'],
'password': '<PASSWORD>'
}
response = client.post('api/v1/auth/user/login', json=data)
json_data = response.get_json()
assert response.status_code == 401
assert json_data['status'] == 401
assert isinstance(json_data['error'], str)
assert json_data['error'] == 'Invalid credentials'
def test_user_login_with_empty_data(client):
''' Test user login with empty data '''
test_utils.register_user(client, 'user')
data = {
}
response = client.post('api/v1/auth/user/login', json=data)
json_data = response.get_json()
assert response.status_code == 400
assert json_data['status'] == 400
assert isinstance(json_data['error'], str)
assert json_data['error'] == 'Provide email and password as json.'
def test_user_login_without_data(client):
''' Test user login without payload '''
test_utils.register_user(client, 'user')
response = client.post('api/v1/auth/user/login')
json_data = response.get_json()
assert response.status_code == 400
assert json_data['status'] == 400
assert isinstance(json_data['error'], str)
assert json_data['error'] == 'Provide email and password as json.'
def test_user_login_without_email(client):
''' Test user login without email '''
test_utils.register_user(client, 'user')
data = {
'email': '',
'password': test_utils.USER['password']
}
response = client.post('api/v1/auth/user/login', json=data)
json_data = response.get_json()
assert response.status_code == 400
assert json_data['status'] == 400
assert isinstance(json_data['error'], str)
assert json_data['error'] == 'Please provide your email'
def test_user_login_without_password(client):
''' Test user login without password '''
test_utils.register_user(client, 'user')
data = {
'email': test_utils.USER['email'],
'password': ''
}
response = client.post('api/v1/auth/user/login', json=data)
json_data = response.get_json()
assert response.status_code == 400
assert json_data['status'] == 400
assert isinstance(json_data['error'], str)
assert json_data['error'] == 'Please provide your password'
def test_user_login_with_invalid_email(client):
''' Test user login with invalid email '''
test_utils.register_user(client, 'user')
data = {
'email': ' erw ',
'password': test_utils.USER['password']
}
response = client.post('api/v1/auth/user/login', json=data)
json_data = response.get_json()
assert response.status_code == 400
assert json_data['status'] == 400
assert isinstance(json_data['error'], str)
assert json_data['error'] == 'email is invalid'
def test_user_login_with_invalid_password(client):
''' Test user login with invalid password '''
test_utils.register_user(client, 'user')
data = {
'email': test_utils.USER['email'],
'password': ' '
}
response = client.post('api/v1/auth/user/login', json=data)
json_data = response.get_json()
assert response.status_code == 400
assert json_data['status'] == 400
assert isinstance(json_data['error'], str)
assert json_data['error'] == 'password is invalid'
|
StarcoderdataPython
|
3420218
|
import pandas as pd
# 相对路径
df = pd.read_excel("result_data.xlsx")
print(df)
print(df.info())
# 显示所有行
# pd.set_option('display.max_rows', None)
print(df.isnull())
print(df.dropna())
print(df.dropna(how="any"))
print(df.fillna(0))
print(df.fillna({'read_num': 10}))
print(df)
print(df.drop_duplicates())
print(df.drop_duplicates(subset='read_num'))
print(df.drop_duplicates(subset='plantform', keep='last'))
print(df.dtypes)
print(df['read_num'].dtypes)
print(df['fans_num'].astype('float64'))
df1 = pd.read_excel("demo.xlsx")
print(df1)
df1.columns = ['编号', '序号', '姓名', '消费金额']
print(df1)
print(df1.set_index('编号'))
|
StarcoderdataPython
|
8146742
|
<filename>services/core-api/app/api/mines/reports/resources/mine_report_category.py
import uuid
from flask_restplus import Resource, reqparse, fields, inputs
from app.extensions import api, db
from app.api.utils.resources_mixins import UserMixin
from app.api.utils.access_decorators import requires_any_of, VIEW_ALL, MINESPACE_PROPONENT
from app.api.mines.reports.models.mine_report_category import MineReportCategory
from app.api.utils.custom_reqparser import CustomReqparser
from app.api.mines.response_models import MINE_REPORT_DEFINITION_CATEGORIES
class MineReportCategoryListResource(Resource, UserMixin):
@api.marshal_with(MINE_REPORT_DEFINITION_CATEGORIES, envelope='records', code=200, as_list=True)
@api.doc(description='returns the report categories for possible reports.')
@requires_any_of([VIEW_ALL, MINESPACE_PROPONENT])
def get(self):
return MineReportCategory.get_all()
|
StarcoderdataPython
|
3233244
|
# -*- coding: utf-8 -*-
import os
import xlsxwriter
def write_stats(filename, data, **kwargs):
"""
Write statistics to an Excel file.
This function writes to an Excel file FILENAME the statistics
provided in each of the dictionaries contained in DATA. The first
2 arguments must be the inputs as described below followed by
keyword arguments in the format of OPTION = VALUE. Each statistic
will be labeled according to the name under which it is stored in
the DATA data structure, e.g. data.bias will be labeled as "bias".
INPUTS:
filename : name for statistics Excel file
data : a dictionary containing the statistics
data['stat'] : statistics, e.g. data.bias for Bias.
OUTPUTS:
None.
LIST OF OPTIONS:
A title description for each dictionary (TITLE) can be
optionally provided as well as an overwrite option if the file
name currently exists.
title = title : title descriptor data set, e.g. 'Expt. 01.0'
overwrite = boolean : true/false flag to overwrite Excel file
Author: <NAME>
Symplectic, LLC
www.thesymplectic.com
<EMAIL>
Created on Dec 10, 2016
"""
option = get_write_stats_options(**kwargs)
# Check for existence of file
if os.path.isfile(filename):
if option["overwrite"]:
os.remove(filename)
else:
ValueError("File already exists: " + filename)
# Write title information to file
workbook = xlsxwriter.Workbook(filename)
worksheet = workbook.add_worksheet()
# Write descriptive title
if len(option["title"]) > 0:
worksheet.write(1, 0, option["title"])
else:
worksheet.write(1, 0, "Skill Metrics")
# Determine number of elements in the dictionary lists and write
# appropriate header
worksheet.write(3, 0, "Skill Metric")
ncell = len(list(data.items())[0]) - 1
for i in range(ncell):
worksheet.write(3, i + 1, "Case " + str(i + 1))
# Write data of all the fields
row = 4
col = 0
for key, value in data.items():
worksheet.write(row, col, key)
for i, v in enumerate([value]):
worksheet.write(row, col + 1 + i, v)
row += 1
workbook.close()
def get_write_stats_options(**kwargs):
"""
Get optional arguments for write_stats function.
Retrieves the keywords supplied to the WRITE_STATS function (**KWARGS),
and returns the values in a OPTION dicitonary. Default values are
assigned to selected optional arguments. The function will terminate
with an error if an unrecognized optional argument is supplied.
INPUTS:
**kwargs : keyword argument list
OUTPUTS:
option : data structure containing option values.
option['title'] : title descriptor for data set.
option['overwrite'] : boolean to overwrite Excel file.
LIST OF OPTIONS:
A title description for each dataset TITLE can be optionally
provided as well as an overwrite option if the file name currently
exists.
title = title : title descriptor for each data set in data, e.g.
'Expt. 01.0'
overwrite = boolean : true/false flag to overwrite Excel file
Author: <NAME>
Acorn Science & Innovation
<EMAIL>
Created on Dec 10, 2016
@author: rochfordp
"""
from . import check_on_off
nargin = len(kwargs)
# Set default parameters
option = {}
option["title"] = ""
option["overwrite"] = False
if nargin == 0:
# No options requested, so return with only defaults
return option
# Load custom options, storing values in option data structure
# Check for valid keys and values in dictionary
for optname, optvalue in kwargs.items():
optname = optname.lower()
if optname not in option:
raise ValueError("Unrecognized option: " + optname)
# Replace option value with that from arguments
option[optname] = optvalue
# Check values for specific options
if optname == "overwrite":
option["overwrite"] = check_on_off(option["overwrite"])
return option
|
StarcoderdataPython
|
12845869
|
<filename>bot.py
import urllib
from pyrogram import Client, filters
from pyrogram.types import (InlineKeyboardButton, InlineKeyboardMarkup, InlineQueryResultArticle, InputTextMessageContent)
from config import Config
bot = Client(
'shareurl-generator',
bot_token = Config.BOT_TOKEN,
api_id = Config.API_ID,
api_hash = Config.API_HASH
)
@bot.on_message(filters.command(['start']))
def start(client, message):
rep = f"**Hi {message.from_user.username}**\n\n**Am a bot to convert __text into Shareable telegram link__.**\nWorks on both **in pm and in Inline😊**\n\nClick __/help__ if needed.."
message.reply_text(
text=rep,
quote=False,
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton('SOURCE', url='https://github.com/ashkar2001/shareurlbotv1')],[InlineKeyboardButton("Search Here", switch_inline_query_current_chat=""),InlineKeyboardButton("Go Inline", switch_inline_query="")], [InlineKeyboardButton('Share Me', url='https://t.me/share/url?url=%2A%2AHello%20Plox%20%F0%9F%91%8B%2A%2A%0A%0A__I%20just%20found%20a%20Bot%20to%20convert__%20%2A%2AText%20as%20a%20Shareable%20Text%20Link%2A%2A%20__format%20%F0%9F%A4%A9.%20Hope%20it%20would%20be%20very%20helpful%20for%20u%20too...%F0%9F%A4%97%F0%9F%A4%97__%0A%0A%2A%2ABot%20Link%3A%20%40ShareUrlBot%20%F0%9F%A5%B0%2A%2A')]]))
@bot.on_message(filters.command(['help']))
def help(client, message):
message.reply_text("**Nothing Complicated..🤓**\n\n**For PM:**\n__Send your desired text to this bot to get your link.__\n\n**For Inline Method:**\n__Type__ `@ShareUrlBot your text`\n__in any chats keyboard and hit the inline result.__", reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton('SOURCE', url='https://github.com/ashkar2001/shareurlbotv1')]]))
@bot.on_message(filters.command(['about']))
def about(client, message):
message.reply_text(f"""**• Bot Info •**
**My Name** :- `Share Url Generator`
**Creator** :- @B_woy
**Language** :- `Python3`
**Library** :- `Pyrogram 1.2.8`
**Server** :- `Heroku.com`
**Build Status** :- `V 0.2`
**• User Info •**
**Name** :- `{message.from_user.first_name} {message.from_user.last_name}`
**ID** :- `{message.from_user.id}`
**Username** :- @{message.from_user.username}
**DC ID** :- `{message.from_user.dc_id}`""", reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton('SOURCE', url = 'https://github.com/ashkar2001/shareurlbotv1')]]))
@bot.on_message(filters.text)
def shareurl(client, message):
query = message.text
url = urllib.parse.quote(query)
rpl = f"https://t.me/share/url?url={url}"
rslt = f"""**Click to CopY ⬇️⬇️** \n\n```{rpl}```"""
message.reply_text(text=rslt, reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton('Click to Try on This Link ⬆️⬆️', url=f'{rpl}')]]))
@bot.on_inline_query()
def inline(client, message):
query = message.query.lower()
if query == "":
result= [InlineQueryResultArticle(title = "Help !!",
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("Search Here", switch_inline_query_current_chat=""),InlineKeyboardButton("Go Inline", switch_inline_query="")]]),
description ="How t0 usE meH !!",
thumb_url="https://telegra.ph/file/99d8f16a777c2ee2781c1.jpg",
input_message_content = InputTextMessageContent(message_text ="**Nothing Complicated..**🤓\n\nType `@ShareUrlBot your text` \nin any chats keyboard and hit the inline result.\n\nNote: __U can also use Me in PM!__"))
]
message.answer(result)
return
else:
url = urllib.parse.quote(query)
rpl = f"https://t.me/share/url?url={url}"
rslt = f"""**Click to CopY⬇️⬇️** \n\n```{rpl}```"""
result = [InlineQueryResultArticle(title = f'{query}',
description =f'{rpl}',
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton('Click to Try on This linK ⬆️⬆️', url=f'{rpl}')], [InlineKeyboardButton("Search Again", switch_inline_query_current_chat=""),InlineKeyboardButton("Go Inline", switch_inline_query="")]]),
input_message_content = InputTextMessageContent(message_text = rslt))
]
message.answer(result)
bot.run()
|
StarcoderdataPython
|
259193
|
<filename>lib/gobbet/wordlist.py
from collections import Counter, defaultdict
def pairwise(a):
return zip(a, a[1::])
class Wordlist(Counter):
def bigrams(self):
bigrams = defaultdict(set)
for word in self.keys():
for a,b in pairwise(word):
bigrams[a+b].add(word)
return bigrams
def filter_popularity(self, threshold=3):
return Wordlist({x: count for x, count in self.items() if count >= threshold})
def filter_length(self, threshold=3):
return Wordlist({x: count for x, count in self.items() if len(x) >= threshold})
def filter_unicodes(self, codepoint_ranges):
def _included_letter(l):
return any(ord(l) in r for r in codepoint_ranges)
def _included(word):
return all(_included_letter(l) for l in word)
return Wordlist({x: count for x, count in self.items() if _included(x)})
|
StarcoderdataPython
|
6500326
|
<gh_stars>0
# Copyright (c) 2021 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handle direct i/o reading and writing controls as markdown."""
import copy
import logging
import pathlib
import re
from typing import Any, Dict, List, Optional, Tuple, Union
import frontmatter
import trestle.oscal.catalog as cat
import trestle.oscal.ssp as ossp
from trestle.core import const
from trestle.core import generators as gens
from trestle.core.err import TrestleError
from trestle.core.markdown.markdown_api import MarkdownAPI
from trestle.core.markdown.md_writer import MDWriter
from trestle.core.utils import spaces_and_caps_to_snake
from trestle.oscal import common
from trestle.oscal import profile as prof
logger = logging.getLogger(__name__)
class ControlIOWriter():
"""Class write controls as markdown."""
def __init__(self):
"""Initialize the class."""
self._md_file: Optional[MDWriter] = None
# Start of section to write controls to markdown
@staticmethod
def _wrap_label(label: str):
l_side = '\['
r_side = '\]'
wrapped = '' if label == '' else f'{l_side}{label}{r_side}'
return wrapped
@staticmethod
def _get_label(part: common.Part) -> str:
# get the label from the props of a part
if part.props is not None:
for prop in part.props:
if prop.name == 'label':
return prop.value.strip()
return ''
def _get_part(self, part: common.Part, item_type: str, skip_id: Optional[str]) -> List[Union[str, List[str]]]:
"""
Find parts with the specified item type, within the given part.
For a part in a control find the parts in it that match the item_type
Return list of string formatted labels and associated descriptive prose
"""
items = []
if part.name in ['statement', item_type]:
# the options here are to force the label to be the part.id or the part.label
# the label may be of the form (a) while the part.id is ac-1_smt.a.1.a
# here we choose the latter and extract the final element
label = part.id.split('.')[-1]
wrapped_label = self._wrap_label(label)
pad = '' if wrapped_label == '' or not part.prose else ' '
prose = '' if part.prose is None else part.prose
# top level prose has already been written out, if present
# use presence of . in id to tell if this is top level prose
if part.id != skip_id:
items.append(f'{wrapped_label}{pad}{prose}')
if part.parts:
sub_list = []
for prt in part.parts:
sub_list.extend(self._get_part(prt, item_type, skip_id))
sub_list.append('')
items.append(sub_list)
return items
def _add_part_and_its_items(self, control: cat.Control, name: str, item_type: str) -> None:
"""For a given control add its one statement and its items to the md file after replacing params."""
items = []
if control.parts:
for part in control.parts:
if part.name == name:
# If the part has prose write it as a raw line and not list element
skip_id = part.id
if part.prose:
# need to avoid split lines in statement items
self._md_file.new_line(part.prose.replace('\n', ' '))
items.append(self._get_part(part, item_type, skip_id))
# unwrap the list if it is many levels deep
while not isinstance(items, str) and len(items) == 1:
items = items[0]
self._md_file.new_paragraph()
self._md_file.new_list(items)
def _add_yaml_header(self, yaml_header: Optional[Dict]) -> None:
if yaml_header:
self._md_file.add_yaml_header(yaml_header)
@staticmethod
def _gap_join(a_str: str, b_str: str) -> str:
a_clean = a_str.strip()
b_clean = b_str.strip()
if not b_clean:
return a_clean
gap = '\n' if a_clean else ''
return a_clean + gap + b_clean
def _add_control_statement(self, control: cat.Control, group_title: str) -> None:
"""Add the control statement and items to the md file."""
self._md_file.new_paragraph()
title = f'{control.id} - \[{group_title}\] {control.title}'
self._md_file.new_header(level=1, title=title)
self._md_file.new_header(level=2, title='Control Statement')
self._md_file.set_indent_level(-1)
self._add_part_and_its_items(control, 'statement', 'item')
self._md_file.set_indent_level(-1)
def _add_control_objective(self, control: cat.Control) -> None:
if control.parts:
for part in control.parts:
if part.name == 'objective':
self._md_file.new_paragraph()
self._md_file.new_header(level=2, title='Control Objective')
self._md_file.set_indent_level(-1)
self._add_part_and_its_items(control, 'objective', 'objective')
self._md_file.set_indent_level(-1)
return
@staticmethod
def _get_control_section_part(part: common.Part, section: str) -> str:
"""Get the prose for a named section in the control."""
prose = ''
if part.name == section and part.prose is not None:
prose = ControlIOWriter._gap_join(prose, part.prose)
if part.parts:
for sub_part in part.parts:
prose = ControlIOWriter._gap_join(prose, ControlIOWriter._get_control_section_part(sub_part, section))
return prose
@staticmethod
def _get_control_section(control: cat.Control, section: str) -> str:
prose = ''
if control.parts:
for part in control.parts:
prose = ControlIOWriter._gap_join(prose, ControlIOWriter._get_control_section_part(part, section))
return prose
@staticmethod
def _find_section_info(part: common.Part, section_list: List[str]):
"""Find section not in list."""
if part.prose and part.name not in section_list:
return part.id, part.name
if part.parts:
for part in part.parts:
id_, name = ControlIOWriter._find_section_info(part, section_list)
if id_:
return id_, name
return '', ''
@staticmethod
def _find_section(control: cat.Control, section_list: List[str]) -> Tuple[str, str]:
"""Find next section not in list."""
if control.parts:
for part in control.parts:
id_, name = ControlIOWriter._find_section_info(part, section_list)
if id_:
return id_, name
return '', ''
@staticmethod
def _get_section(control: cat.Control, section_list: List[str]) -> Tuple[str, str, str]:
"""Get sections that are not in the list."""
id_, name = ControlIOWriter._find_section(control, section_list)
if id_:
return id_, name, ControlIOWriter._get_control_section(control, name)
return '', '', ''
def _add_sections(self, control: cat.Control) -> None:
"""Add the extra control sections after the main ones."""
skip_section_list = ['statement', 'item', 'objective']
while True:
name, id_, prose = self._get_section(control, skip_section_list)
if not name:
return
if prose:
skip_section_list.append(id_)
if self._sections and id_ in self._sections:
id_ = self._sections[id_]
self._md_file.new_header(level=2, title=f'Control {id_}')
self._md_file.new_line(prose)
self._md_file.new_paragraph()
def _insert_existing_text(self, part_label: str, existing_text: Dict[str, List[str]]) -> None:
"""Insert text captured in the previous markdown and reinsert to avoid overwrite."""
if part_label in existing_text:
self._md_file.new_paragraph()
for line in existing_text[part_label]:
self._md_file.new_line(line)
def _add_response(self, control: cat.Control, existing_text: Dict[str, List[str]]) -> None:
"""Add the response request text for all parts to the markdown along with the header."""
self._md_file.new_hr()
self._md_file.new_paragraph()
self._md_file.new_header(level=2, title=f'{const.SSP_MD_IMPLEMENTATION_QUESTION}')
# if the control has no parts written out then enter implementation in the top level entry
# but if it does have parts written out, leave top level blank and provide details in the parts
# Note that parts corresponding to sections don't get written out here so a check is needed
did_write_part = False
if control.parts:
for part in control.parts:
if part.parts:
if part.name == 'statement':
for prt in part.parts:
if prt.name != 'item':
continue
if not did_write_part:
self._md_file.new_line(const.SSP_MD_LEAVE_BLANK_TEXT)
# insert extra line to make mdformat happy
self._md_file._add_line_raw('')
did_write_part = True
self._md_file.new_hr()
part_label = self._get_label(prt)
# if no label guess the label from the sub-part id
if not part_label:
part_label = prt.id.split('.')[-1]
self._md_file.new_header(level=2, title=f'Implementation {part_label}')
# don't write out the prompt for text if there is some already there
if part_label not in existing_text:
self._md_file.new_line(f'{const.SSP_ADD_IMPLEMENTATION_FOR_ITEM_TEXT} {prt.id}')
self._insert_existing_text(part_label, existing_text)
self._md_file.new_paragraph()
if not did_write_part:
self._md_file.new_line(f'{const.SSP_ADD_IMPLEMENTATION_FOR_CONTROL_TEXT} {control.id}')
self._md_file.new_hr()
@staticmethod
def _get_adds(control_id: str, profile: prof.Profile) -> List[Tuple[str, str]]:
adds = []
if profile and profile.modify and profile.modify.alters:
for alter in profile.modify.alters:
if alter.control_id == control_id and alter.adds:
for add in alter.adds:
if add.parts:
for part in add.parts:
if part.prose:
adds.append((part.name, part.prose))
return adds
def _add_additional_content(self, control: cat.Control, profile: prof.Profile) -> None:
adds = ControlIOWriter._get_adds(control.id, profile)
has_content = len(adds) > 0
self._md_file.new_header(level=1, title='Editable Content')
self._md_file.new_line('<!-- Make additions and edits below -->')
self._md_file.new_line(
'<!-- The above represents the contents of the control as received by the profile, prior to additions. -->' # noqa E501
)
self._md_file.new_line(
'<!-- If the profile makes additions to the control, they will appear below. -->' # noqa E501
)
self._md_file.new_line(
'<!-- The above may not be edited but you may edit the content below, and/or introduce new additions to be made by the profile. -->' # noqa E501
)
self._md_file.new_line(
'<!-- The content here will then replace what is in the profile for this control, after running profile-assemble. -->' # noqa E501
)
if has_content:
self._md_file.new_line(
'<!-- The added parts in the profile for this control are below. You may edit them and/or add new ones. -->' # noqa E501
)
else:
self._md_file.new_line(
'<!-- The current profile has no added parts for this control, but you may add new ones here. -->'
)
self._md_file.new_line('<!-- Each addition must have a heading of the form ## Control my_addition_name -->')
self._md_file.new_line(
'<!-- See https://ibm.github.io/compliance-trestle/tutorials/ssp_profile_catalog_authoring/ssp_profile_catalog_authoring for guidance. -->' # noqa E501
)
# next is to make mdformat happy
self._md_file._add_line_raw('')
for add in adds:
name, prose = add
self._md_file.new_header(level=2, title=f'Control {name}')
self._md_file.new_paraline(prose)
@staticmethod
def get_part_prose(control: cat.Control, part_name: str) -> str:
"""Get the prose for a named part."""
prose = ''
if control.parts:
for part in control.parts:
prose += ControlIOWriter._get_control_section_part(part, part_name)
return prose.strip()
@staticmethod
def merge_dicts_deep(dest: Dict[Any, Any], src: Dict[Any, Any]) -> None:
"""
Merge dict src into dest in a deep manner and handle lists.
All contents of dest are retained and new values from src do not change dest.
But any new items in src are added to dest.
This changes dest in place.
"""
for key in src.keys():
if key in dest:
if isinstance(dest[key], dict) and isinstance(src[key], dict):
ControlIOWriter.merge_dicts_deep(dest[key], src[key])
elif isinstance(dest[key], list):
# grow dest list for the key by adding new items from src
if isinstance(src[key], list):
try:
# Simple types (e.g. lists of strings) will get merged neatly
missing = set(src[key]) - set(dest[key])
dest[key].extend(missing)
except TypeError:
# This is a complex type - use simplistic safe behaviour
logger.debug('Ignoring complex types within lists when merging dictionaries.')
else:
if src[key] not in dest[key]:
dest[key].append(src[key])
elif isinstance(src[key], list):
dest[key] = [dest[key]]
dest[key].extend(src[key])
# if the item is in both, leave dest as-is and ignore the src value
else:
# if the item was not already in dest, add it from src
dest[key] = src[key]
def write_control(
self,
dest_path: pathlib.Path,
control: cat.Control,
group_title: str,
yaml_header: Optional[Dict],
sections: Optional[Dict[str, str]],
additional_content: bool,
prompt_responses: bool,
profile: Optional[prof.Profile],
header_dont_merge: bool
) -> None:
"""
Write out the control in markdown format into the specified directory.
Args:
dest_path: Path to the directory where the control will be written
control: The control to write as markdown
group_title: Title of the group containing the control
yaml_header: Optional dict to be written as markdown yaml header
sections: Optional string lookup dict mapping section abbrev. to pretty version for display
additional_content: Should the additional content be printed corresponding to profile adds
prompt_responses: Should the markdown include prompts for implementation detail responses
profile: Profile containing the adds making up additional content
Returns:
None
Notes:
The filename is constructed from the control's id, so only the markdown directory is required.
If a yaml header is present in the file it is merged with the optional provided header.
The header in the file takes precedence over the provided one.
"""
control_file = dest_path / (control.id + '.md')
existing_text, header = ControlIOReader.read_all_implementation_prose_and_header(control_file)
self._md_file = MDWriter(control_file)
self._sections = sections
# Need to merge any existing header info with the new one. Either could be empty.
if header_dont_merge and not header == {}:
merged_header = {}
else:
merged_header = copy.deepcopy(yaml_header) if yaml_header else {}
if header:
ControlIOWriter.merge_dicts_deep(merged_header, header)
self._add_yaml_header(merged_header)
self._add_control_statement(control, group_title)
self._add_control_objective(control)
self._add_sections(control)
if prompt_responses:
self._add_response(control, existing_text)
if additional_content:
self._add_additional_content(control, profile)
self._md_file.write_out()
# Start of section to read controls from markdown
class ControlIOReader():
"""Class to read controls from markdown."""
@staticmethod
def _strip_to_make_ncname(label: str) -> str:
"""Strip chars to conform with NCNAME regex."""
orig_label = label
# make sure first char is allowed
while label and label[0] not in const.NCNAME_UTF8_FIRST_CHAR_OPTIONS:
label = label[1:]
new_label = label[:1]
# now check remaining chars
if len(label) > 1:
for ii in range(1, len(label)):
if label[ii] in const.NCNAME_UTF8_OTHER_CHAR_OPTIONS:
new_label += label[ii]
# do final check to confirm it is NCNAME
match = re.search(const.NCNAME_REGEX, new_label)
if not match:
raise TrestleError(f'Unable to convert label {orig_label} to NCNAME format.')
return new_label
@staticmethod
def _trim_prose_lines(lines: List[str]) -> List[str]:
"""
Trim empty lines at start and end of list of lines in prose.
Also need to exclude the line requesting implementation prose
"""
ii = 0
n_lines = len(lines)
while ii < n_lines and (lines[ii].strip(' \r\n') == ''
or lines[ii].find(const.SSP_ADD_IMPLEMENTATION_PREFIX) >= 0):
ii += 1
jj = n_lines - 1
while jj >= 0 and lines[jj].strip(' \r\n') == '':
jj -= 1
if jj < ii:
return ''
return lines[ii:(jj + 1)]
@staticmethod
def _read_label_prose(ii: int, lines: List[str]) -> Tuple[int, str, List[str]]:
r"""
Return the found label and its corresponding list of prose lines.
ii should point to start of file or directly at a new Part or control
This looks for two types of reference lines:
_______\n## Control label
_______\n# label
If a section is meant to be left blank it goes ahead and reads the comment text
"""
nlines = len(lines)
prose_lines: List[str] = []
item_label = ''
tld_prose_lines = []
if ii == 0:
# read the entire control to validate contents
ii, _ = ControlIOReader._read_control_statement(0, lines, 'dummy_id')
ii, _ = ControlIOReader._read_sections(ii, lines, 'xx', [])
# go back to beginning and seek the implementation question
ii = 0
while ii < nlines and not lines[ii].strip().endswith(const.SSP_MD_IMPLEMENTATION_QUESTION):
ii += 1
# skip over the question
ii += 1
while -1 < ii < nlines:
# start of new part
if lines[ii].startswith('## Implementation'):
split = lines[ii].strip().split()
if len(split) < 3:
raise TrestleError('Implementation line must include label')
item_label = split[-1]
ii += 1
if ii < nlines and lines[ii] and ControlIOReader._indent(lines[ii]) <= 0:
msg = f'Implementation line for control appears broken by newline: {lines[ii]}'
raise TrestleError(msg)
# collect until next hrule
while ii < nlines:
if lines[ii].startswith(const.SSP_MD_HRULE_LINE) or lines[ii].startswith('## Implementation'):
return ii, item_label, ControlIOReader._trim_prose_lines(prose_lines)
prose_lines.append(lines[ii].strip())
ii += 1
elif lines[ii].startswith('# ') or lines[ii].startswith('## '):
raise TrestleError(f'Improper heading level in control statement: {lines[ii]}')
else:
tld_prose = lines[ii].strip()
if tld_prose and not tld_prose.startswith(const.SSP_ADD_IMPLEMENTATION_PREFIX):
tld_prose_lines.append(tld_prose)
ii += 1
# if we did not find normal labelled prose regard any found prose as top_level_description
if not item_label and tld_prose_lines:
return nlines, 'top_level_description', tld_prose_lines
return -1, item_label, prose_lines
@staticmethod
def _load_control_lines(control_file: pathlib.Path) -> List[str]:
lines: List[str] = []
try:
content = control_file.open('r', encoding=const.FILE_ENCODING).read()
except UnicodeDecodeError as e:
logger.error('utf-8 decoding failed.')
logger.error(f'See: {const.WEBSITE_ROOT}/errors/#utf-8-encoding-only')
logger.debug(f'Underlying exception {e}')
raise TrestleError('Unable to load file due to utf-8 encoding issues.')
try:
fm = frontmatter.loads(content)
except Exception as e:
logger.error(f'Error parsing yaml header from file {control_file}')
logger.error('This is most likely due to an incorrect yaml structure.')
logger.debug(f'Underlying error: {str(e)}')
raise TrestleError(f'Failure parsing yaml header on file {control_file}')
raw_lines = fm.content.split('\n')
# Any fully blank lines will be retained but as empty strings
lines = [line.strip('\r\n').rstrip() for line in raw_lines]
clean_lines = []
# need to keep indentation and empty lines
for line in lines:
if line.startswith('<!--') or line.startswith('__________________'):
continue
clean_lines.append(line)
return clean_lines
@staticmethod
def _read_id_group_id_title(line: str) -> Tuple[int, str, str]:
"""Process the line and find the control id, group id and control title."""
if line.count('-') < 2:
raise TrestleError(f'Markdown control title format error: {line}')
control_id = line.split()[1]
first_dash = line.find('-')
title_line = line[first_dash + 1:]
group_start = title_line.find('\[')
group_end = title_line.find('\]')
if group_start < 0 or group_end < 0 or group_start > group_end:
raise TrestleError(f'unable to read group and title for control {control_id}')
group_id = title_line[group_start + 2:group_end].strip()
control_title = title_line[group_end + 2:].strip()
return control_id, group_id, control_title
@staticmethod
def _indent(line: str) -> int:
"""Measure indent of non-empty line."""
if not line:
raise TrestleError('Empty line queried for indent.')
if line[0] not in [' ', '-']:
return -1
for ii in range(len(line)):
if line[ii] == '-':
return ii
# if line is indented it must start with -
if line[ii] != ' ':
break
raise TrestleError(f'List elements must start with -: {line}')
@staticmethod
def _get_next_line(ii: int, lines: List[str]) -> Tuple[int, str]:
while ii < len(lines):
line = lines[ii]
if line:
return ii, line
ii += 1
return -1, ''
@staticmethod
def _get_next_indent(ii: int, lines: List[str]) -> Tuple[int, int, str]:
"""Seek to next content line. ii remains at line read."""
while 0 <= ii < len(lines):
line = lines[ii]
if line:
if line[0] == '#':
return ii, -1, line
indent = ControlIOReader._indent(line)
if indent >= 0:
# extract text after -
start = indent + 1
while start < len(line) and line[start] != ' ':
start += 1
if start >= len(line):
raise TrestleError(f'Invalid line {line}')
return ii, indent, line[start:]
return ii, indent, line
ii += 1
return ii, -1, ''
@staticmethod
def _read_part_id_prose(line: str) -> Tuple[str, str]:
"""Extract the part id letter or number and prose from line."""
start = line.find('\\[')
end = line.find('\\]')
prose = line.strip() if start < 0 else line[end + 2:].strip()
id_ = '' if start < 0 or end < 0 else line[start + 2:end]
return id_, prose
@staticmethod
def _read_parts(indent: int, ii: int, lines: List[str], parent_id: str,
parts: List[common.Part]) -> Tuple[int, List[common.Part]]:
"""If indentation level goes up or down, create new list or close current one."""
while True:
ii, new_indent, line = ControlIOReader._get_next_indent(ii, lines)
if new_indent < 0:
# we are done reading control statement
return ii, parts
if new_indent == indent:
# create new item part and add to current list of parts
id_text, prose = ControlIOReader._read_part_id_prose(line)
id_ = ControlIOReader._strip_to_make_ncname(parent_id + '.' + id_text)
name = 'objective' if id_.find('_obj') > 0 else 'item'
part = common.Part(name=name, id=id_, prose=prose)
parts.append(part)
ii += 1
elif new_indent > indent:
# add new list of parts to last part and continue
if len(parts) == 0:
raise TrestleError(f'Improper indentation structure: {line}')
ii, new_parts = ControlIOReader._read_parts(new_indent, ii, lines, parts[-1].id, [])
if new_parts:
parts[-1].parts = new_parts
else:
# return list of sub-parts
return ii, parts
@staticmethod
def _read_control_statement(ii: int, lines: List[str], control_id: str) -> Tuple[int, common.Part]:
"""Search for the Control statement and read until next ## Control."""
while 0 <= ii < len(lines) and not lines[ii].startswith('## Control '):
ii += 1
if ii >= len(lines):
raise TrestleError(f'Control statement not found for control {control_id}')
ii += 1
ii, line = ControlIOReader._get_next_line(ii, lines)
if ii < 0:
# This means no statement and control withdrawn (this happens in NIST catalog)
return ii, None
if line and line[0] == ' ' and line.lstrip()[0] != '-':
# prose that appears indented but has no - : treat it as the normal statement prose
line = line.lstrip()
indent = -1
ii += 1
else:
ii, indent, line = ControlIOReader._get_next_indent(ii, lines)
statement_part = common.Part(name='statement', id=f'{control_id}_smt')
# first line is either statement prose or start of statement parts
if indent < 0:
statement_part.prose = line
ii += 1
# we have absorbed possible statement prose.
# now just read parts recursively
# if there was no statement prose, this will re-read the line just read
# as the start of the statement's parts
ii, parts = ControlIOReader._read_parts(0, ii, lines, statement_part.id, [])
statement_part.parts = parts if parts else None
return ii, statement_part
@staticmethod
def _read_control_objective(ii: int, lines: List[str], control_id: str) -> Tuple[int, Optional[common.Part]]:
ii_orig = ii
while 0 <= ii < len(lines) and not lines[ii].startswith('## Control Objective'):
ii += 1
if ii >= len(lines):
return ii_orig, None
ii += 1
ii, line = ControlIOReader._get_next_line(ii, lines)
if ii < 0:
raise TrestleError(f'Unable to parse objective from control markdown {control_id}')
if line and line[0] == ' ' and line.lstrip()[0] != '-':
# prose that appears indented but has no - : treat it as the normal objective prose
line = line.lstrip()
indent = -1
ii += 1
else:
ii, indent, line = ControlIOReader._get_next_indent(ii, lines)
objective_part = common.Part(name='objective', id=f'{control_id}_obj')
# first line is either objective prose or start of objective parts
if indent < 0:
objective_part.prose = line
ii += 1
# we have absorbed possible objective prose.
# now just read parts recursively
# if there was no objective prose, this will re-read the line just read
# as the start of the objective's parts
ii, parts = ControlIOReader._read_parts(0, ii, lines, objective_part.id, [])
objective_part.parts = parts if parts else None
return ii, objective_part
@staticmethod
def _read_sections(ii: int, lines: List[str], control_id: str,
control_parts: List[common.Part]) -> Tuple[int, List[common.Part]]:
"""Read all sections following the section separated by ## Control."""
new_parts = []
prefix = '## Control '
while 0 <= ii < len(lines):
line = lines[ii]
if line.startswith('## What is the solution') or line.startswith('# Editable Content'):
ii += 1
continue
if not line:
ii += 1
continue
if line and not line.startswith(prefix):
# the control has no sections to read, so exit the loop
break
label = line[len(prefix):].lstrip()
prose = ''
ii += 1
while 0 <= ii < len(lines) and not lines[ii].startswith(prefix) and not lines[ii].startswith(
'# Editable Content'):
prose = '\n'.join([prose, lines[ii]])
ii += 1
if prose:
id_ = ControlIOReader._strip_to_make_ncname(control_id + '_smt.' + label)
label = ControlIOReader._strip_to_make_ncname(label)
new_parts.append(common.Part(id=id_, name=label, prose=prose.strip('\n')))
if new_parts:
if control_parts:
control_parts.extend(new_parts)
else:
control_parts = new_parts
if not control_parts:
control_parts = None
return ii, control_parts
@staticmethod
def read_all_implementation_prose_and_header(
control_file: pathlib.Path
) -> Tuple[Dict[str, List[str]], Dict[str, List[str]]]:
"""
Find all labels and associated prose in this control.
Args:
control_file: path to the control markdown file
Returns:
Dictionary of part labels and corresponding prose read from the markdown file.
"""
if not control_file.exists():
return {}, {}
md_api = MarkdownAPI()
header, _ = md_api.processor.process_markdown(control_file)
lines = ControlIOReader._load_control_lines(control_file)
ii = 0
# keep moving down through the file picking up labels and prose
responses: Dict[str, List[str]] = {}
while True:
ii, part_label, prose_lines = ControlIOReader._read_label_prose(ii, lines)
while prose_lines and not prose_lines[0].strip(' \r\n'):
del prose_lines[0]
while prose_lines and not prose_lines[-1].strip(' \r\n'):
del prose_lines[-1]
if part_label and prose_lines:
responses[part_label] = prose_lines
if ii < 0:
break
return responses, header
@staticmethod
def read_implementations(control_file: pathlib.Path,
component: ossp.SystemComponent) -> List[ossp.ImplementedRequirement]:
"""Get implementation requirements associated with given control and link to the one component we created."""
control_id = control_file.stem
imp_reqs: List[ossp.ImplementedRequirement] = []
responses, _ = ControlIOReader.read_all_implementation_prose_and_header(control_file)
for response in responses.items():
label = response[0]
prose_lines = response[1]
# create a new by-component to hold this statement
by_comp: ossp.ByComponent = gens.generate_sample_model(ossp.ByComponent)
# link it to the one dummy component uuid
by_comp.component_uuid = component.uuid
# add the response prose to the description
by_comp.description = '\n'.join(prose_lines)
# create a statement to hold the by-component and assign the statement id
statement: ossp.Statement = gens.generate_sample_model(ossp.Statement)
statement.statement_id = ControlIOReader._strip_to_make_ncname(f'{control_id}_smt.{label}')
statement.by_components = [by_comp]
# create a new implemented requirement linked to the control id to hold the statement
imp_req: ossp.ImplementedRequirement = gens.generate_sample_model(ossp.ImplementedRequirement)
imp_req.control_id = control_id
imp_req.statements = [statement]
imp_reqs.append(imp_req)
return imp_reqs
@staticmethod
def _read_added_part(ii: int, lines: List[str], control_id: str) -> Tuple[int, Optional[common.Part]]:
"""Read a single part indicated by ## Control foo."""
while 0 <= ii < len(lines):
# look for ## Control foo - then read prose
line = lines[ii]
prefix = '## Control '
if line:
if not line.startswith(prefix):
raise TrestleError(f'Unexpected line in Editable Content for control {control_id}: {line}')
part_name_raw = line[len(prefix):]
part_name = spaces_and_caps_to_snake(part_name_raw)
prose_lines = []
ii += 1
have_content = False
while 0 <= ii < len(lines):
line = lines[ii]
if not line.startswith(prefix):
if line:
have_content = True
prose_lines.append(line)
ii += 1
continue
break
if have_content:
prose = '\n'.join(prose_lines)
id_ = f'{control_id}_{part_name}'
part = common.Part(id=id_, name=part_name, prose=prose)
return ii, part
ii += 1
return -1, None
@staticmethod
def read_new_alters(control_path: pathlib.Path) -> List[prof.Alter]:
"""Get parts for the markdown control corresponding to Editable Content - if any."""
control_id = control_path.stem
new_alters: List[prof.Alter] = []
lines = ControlIOReader._load_control_lines(control_path)
ii = 0
while 0 <= ii < len(lines):
line = lines[ii]
if line.startswith('# Editable Content'):
ii += 1
while 0 <= ii < len(lines):
ii, part = ControlIOReader._read_added_part(ii, lines, control_id)
if ii < 0:
break
alter = prof.Alter(
control_id=control_id,
adds=[prof.Add(parts=[part], position='after', by_id=f'{control_id}_smt')]
)
new_alters.append(alter)
else:
ii += 1
return new_alters
@staticmethod
def read_control(control_path: pathlib.Path) -> cat.Control:
"""Read the control markdown file."""
control = gens.generate_sample_model(cat.Control)
md_api = MarkdownAPI()
_, control_tree = md_api.processor.process_markdown(control_path)
control_titles = list(control_tree.get_all_headers_for_level(1))
if len(control_titles) == 0:
raise TrestleError(f'Control markdown: {control_path} contains no control title.')
control.id, _, control.title = ControlIOReader._read_id_group_id_title(control_titles[0])
control_headers = list(control_tree.get_all_headers_for_level(2))
if len(control_headers) == 0:
raise TrestleError(f'Control markdown: {control_path} contains no control statements.')
control_statement = control_tree.get_node_for_key(control_headers[0])
rc, statement_part = ControlIOReader._read_control_statement(
0, control_statement.content.raw_text.split('\n'), control.id
)
if rc < 0:
return control
control.parts = [statement_part] if statement_part else None
control_objective = control_tree.get_node_for_key('## Control Objective')
if control_objective is not None:
_, objective_part = ControlIOReader._read_control_objective(
0, control_objective.content.raw_text.split('\n'), control.id
)
if objective_part:
if control.parts:
control.parts.append(objective_part)
else:
control.parts = [objective_part]
for header_key in control_tree.get_all_headers_for_key('## Control', False):
if header_key not in {control_headers[0], '## Control Objective', control_titles[0]}:
section_node = control_tree.get_node_for_key(header_key)
_, control.parts = ControlIOReader._read_sections(
0, section_node.content.raw_text.split('\n'), control.id, control.parts
)
return control
|
StarcoderdataPython
|
23486
|
# encoding: utf-8
'''
@author: <NAME>
@contact: <EMAIL>
@software: basenef
@file: doc_generator.py
@date: 4/13/2019
@desc:
'''
import os
import sys
import time
from getpass import getuser
import matplotlib
import numpy as np
import json
from srfnef import Image, MlemFull
matplotlib.use('Agg')
author = getuser()
def title_block_gen():
timestamp = time.time()
datetime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(timestamp)))
title_block = f'''
# NEF AutoDoc {datetime}
- Author: {author}
- Generation time: {datetime}
- Operation system: {sys.platform}
- OS language: {os.environ['LANG']}
- Duration: 0.0 sec
- Total errors: 0
- Total warning: 0
- Description:
'''
return title_block
def _text_gen_as_table(dct: dict = {}):
out_text = ['|key|values|\n|:---|:---|\n']
for key, val in dct.items():
if key == 'data':
out_text.append(f"| {key} | Ignored |\n")
elif not isinstance(val, dict):
if isinstance(val, str) and len(val) > 30:
out_text.append(f"| {key} | Ignored |\n")
else:
out_text.append(f"| {key} | {val} |\n")
else:
out_text.append(f"| {key} | {'Ignored'} |\n")
return out_text
def json_block_gen(dct: dict = {}):
if isinstance(dct, str):
dct = json.loads(dct)
dct['image_config']['size'] = np.round(dct['image_config']['size'], decimals = 3).tolist()
if dct['emap'] is not None:
dct['emap']['size'] = np.round(dct['emap']['size'], decimals = 3).tolist()
json_str = json.dumps(dct, indent = 4)
out_text = "## RECON JSON\n"
out_text += "```javascript\n"
out_text += json_str + '\n'
out_text += "```\n"
return out_text
def image_block_gen(img: Image, path: str):
print('Generating text blocks...')
from matplotlib import pyplot as plt
vmax = np.percentile(img.data, 99.99)
midind = [int(img.shape[i] / 2) for i in range(3)]
plt.figure(figsize = (30, 10))
plt.subplot(231)
plt.imshow(img.data[midind[0], :, :], vmax = vmax)
plt.subplot(232)
plt.imshow(img.data[:, midind[1], :].transpose(), vmax = vmax)
plt.subplot(233)
plt.imshow(img.data[:, :, midind[2]].transpose(), vmax = vmax)
plt.subplot(234)
plt.plot(img.data[midind[0], midind[1], :])
plt.subplot(235)
plt.plot(img.data[midind[0], :, midind[2]])
plt.subplot(236)
plt.plot(img.data[:, midind[1], midind[2]])
timestamp = time.time()
datetime_str = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(int(timestamp)))
plt.savefig(path + f'/out_img{datetime_str}.png')
out_text = f'\n'
return out_text
def statistic_block_gen(dct: dict = {}):
out_text = []
key_set = set()
for name, sub_dct in dct.items():
for key, val in sub_dct.items():
if isinstance(val, str) and len(val) < 30:
key_set.add(key)
col_names = ['|name ', '|:---']
for key in key_set:
col_names[0] += '|' + key + ''
else:
col_names[0] += '|\n'
for _ in key_set:
col_names[1] += '|:---'
else:
col_names[1] += '|\n'
out_text += col_names
for name, sub_dct in dct.items():
row = '| ' + name + ' '
for key in key_set:
if key in sub_dct:
row += '|' + str(sub_dct[key]) + ''
else:
row += '|-'
else:
row += '|\n'
out_text += [row]
return out_text
def metric_block_gen(mask: np.ndarray, img: Image):
from srfnef import image_metric as metric
dct = {}
# contrast hot
dct.update(
contrast_hot = {str(ind_): float(val_) for ind_, val_ in metric.contrast_hot(mask, img)})
dct.update(
contrast_cold = {str(ind_): float(val_) for ind_, val_ in metric.contrast_cold(mask, img)})
dct.update(contrast_noise_ratio1 = metric.cnr1(mask, img))
dct.update(contrast_noise_ratio2 = metric.cnr2(mask, img))
dct.update(contrast_recovery_coefficiency1 = metric.crc1(mask, img))
dct.update(contrast_recovery_coefficiency2 = metric.crc2(mask, img))
dct.update(standard_error = metric.standard_error(mask, img))
dct.update(normalized_standard_error = metric.nsd(mask, img))
dct.update(standard_deviation = metric.sd(mask, img))
dct.update(background_visibility = metric.bg_visibility(mask, img))
dct.update(noise1 = metric.noise1(mask, img))
dct.update(noise2 = metric.noise2(mask, img))
dct.update(signal_noise_ratio1 = metric.snr1(mask, img))
dct.update(signal_noise_ratio2 = metric.snr2(mask, img))
dct.update(positive_deviation = metric.pos_dev(mask, img))
for ind, val in dct.items():
if not isinstance(val, dict):
dct[ind] = float(val)
json_str = json.dumps(dct, indent = 4)
out_text = "## IMAGE METRIC JSON\n"
out_text += "```javascript\n"
out_text += json_str + '\n'
out_text += "```\n"
return out_text
def doc_gen(mlem_obj: MlemFull, img: Image, path: str, filename: str = None,
mask: np.ndarray = None):
timestamp = time.time()
datetime_str = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(int(timestamp)))
if filename is None:
filename = 'doc_gen-' + datetime_str + '.md'
out_text = title_block_gen()
out_text += image_block_gen(img, path)
out_text += json_block_gen(mlem_obj.asdict(recurse = True))
if mask is not None:
if isinstance(mask, str):
mask = np.load(mask)
out_text += metric_block_gen(mask, img)
# out_text += statistic_block_gen(dct)
with open(filename, 'w') as fout:
fout.writelines(out_text)
# print('Converting MD to PDF...')
# import pypandoc
# print(filename)
# pypandoc.convert_file(filename, 'pdf', outputfile = filename + '.pdf')
return filename
|
StarcoderdataPython
|
11222629
|
<reponame>CDL-Project-Euler/Solutions
# We shall say that an n-digit number is pandigital if it makes use of all the digits 1 to n exactly once;
# for example, the 5-digit number, 15234, is 1 through 5 pandigital.
# The product 7254 is unusual, as the identity, 39 × 186 = 7254, containing multiplicand, multiplier, and product is 1 through 9 pandigital.
# Find the sum of all products whose multiplicand/multiplier/product identity can be written as a 1 through 9 pandigital.
# HINT: Some products can be obtained in more than one way so be sure to only include it once in your sum.
#At most 1 number can be 4 digits, so only look at up to 9876
def are_pandigital(number_list: list, digits = "123456789"):
#Checks through numbers in list to see if they are pandigital
total_length = 0
length_digits = len(digits)
for number in number_list:
total_length += len(str(number))
for digit in str(number):
if digit in digits:
digits = digits.replace(digit, "")
else:
return False
if total_length != length_digits:
return False
return True
def problem_32():
product_list = []
for a in range(9876):
for b in range (a + 1, 9876):
product = a*b
if are_pandigital([a,b,product]):
print(a,b,product)
if not product in product_list:
product_list.append(product)
return(sum(product_list))
if __name__ == "__main__":
print(problem_32())
|
StarcoderdataPython
|
8040542
|
# -*- coding: utf-8 -*-
# Standard library imports
# Third party imports
# Local application / specific library imports
|
StarcoderdataPython
|
3364035
|
<reponame>Unviray/pun<filename>pun/__init__.py
"""
pun
===
Pun mini task to build, deploy or anything you like in your project.
"""
__author__ = 'Unviray'
__email__ = '<EMAIL>'
__version__ = '0.1.0'
from .walker import cd
from .core import task, run, fixture, env
__all__ = ('cd', 'env', 'task', 'run', 'fixture')
|
StarcoderdataPython
|
6503429
|
<filename>coder_directory_api/__init__.py
"""
Coders Directory Api
~~~~~~~~~~~~~~~~~~~~
A flask powered rest api for managing and displaying coder data.
:copyright: (c) 2017 by <NAME>.
:license: MIT, see LICENSE for details.
"""
__version__ = '1.0.0'
|
StarcoderdataPython
|
327810
|
# -*- coding: utf-8 -*-
"""Implementation of Linear Programming IRL methods by Ng and Russell, 2000
Copyright 2018 <NAME>
"""
import warnings
import numpy as np
from cvxopt import matrix, solvers
from pprint import pprint
from copy import copy
from irl_methods.utils import rollout
def linear_programming(
sorted_transition_tensor,
discount_factor,
*,
l1_regularisation_weight=0,
r_max=1.0,
verbose=False
):
"""Linear Programming IRL by NG and Russell, 2000
Given a transition matrix T[s, a, s'] encoding a stationary
deterministic policy and a discount factor, finds a reward vector
R(s) for which the policy is optimal.
This method implements the `Linear Programming IRL algorithm by Ng and
Russell <http://ai.stanford.edu/~ang/papers/icml00-irl.pdf>. See
<https://www.inf.ed.ac.uk/teaching/courses/rl/slides17/8_IRL.pdf> for an
accessible overview.
TODO: Adjust L1 norm constraint generation to allow negative rewards in
the final vector.
Args:
sorted_transition_tensor (numpy array): A sorted transition matrix
T[s, a, s'] encoding a stationary deterministic policy. The
structure must be such that the 0th action T[:, 0, :] corresponds to
the expert policy, and T[:, i, :], i != 0 corresponds to the ith
non-expert action at each state.
discount_factor (float): The expert's discount factor. Must be in the
range [0, 1).
l1_regularisation_weight (float): L1 norm regularization weight for the
LP optimisation objective function
r_max (float): Maximum reward value
verbose (bool): Print progress information
Returns:
(numpy array): The reward vector for which the given policy is
optimal, and
(dict): A result object from the LP optimiser
"""
# Measure size of state and action sets
n = sorted_transition_tensor.shape[0]
k = sorted_transition_tensor.shape[1]
if verbose:
print("Vanilla Linear Programming IRL")
print(
"num_states={:d}, num_actions={:d}, discount_factor={:.3f}, "
"l1_regularisation_weight={:.3f}, r_max={:.3f}".format(
n,
k,
discount_factor,
l1_regularisation_weight,
r_max
)
)
# Compute the discounted transition matrix inverse term
_T_disc_inv = np.linalg.inv(
np.identity(n) - discount_factor * sorted_transition_tensor[:, 0, :]
)
# Formulate the linear programming problem constraints
# NB: The general form for adding a constraint looks like this
# c, A_ub, b_ub = f(c, A_ub, b_ub)
if verbose:
print("Composing LP problem...")
# Prepare LP constraint matrices
c = np.zeros(shape=[1, n], dtype=float)
A_ub = np.zeros(shape=[0, n], dtype=float)
b_ub = np.zeros(shape=[0, 1])
def add_optimal_policy_constraints(c, A_ub, b_ub):
"""
Add constraints to ensure the expert policy is optimal
This will add (k-1) * n extra constraints
"""
for i in range(k - 1):
constraint_rows = -1 * \
(
sorted_transition_tensor[:, 0, :] - \
sorted_transition_tensor[:, i, :]
)\
@ _T_disc_inv
A_ub = np.vstack((A_ub, constraint_rows))
b_ub = np.vstack(
(b_ub, np.zeros(shape=[constraint_rows.shape[0], 1]))
)
return c, A_ub, b_ub
def add_costly_single_step_constraints(c, A_ub, b_ub):
"""
Augment the optimisation objective to add the costly-single-step
degeneracy heuristic
This will add n extra optimisation variables and (k-1) * n extra
constraints
NB: Assumes the true optimisation variables are first in the objective
function
"""
# Expand the c vector add new terms for the min{} operator
c = np.hstack((c, -1 * np.ones(shape=[1, n])))
css_offset = c.shape[1] - n
A_ub = np.hstack((A_ub, np.zeros(shape=[A_ub.shape[0], n])))
# Add min{} operator constraints
for i in range(k - 1):
# Generate the costly single step constraint terms
constraint_rows = -1 * (sorted_transition_tensor[:, 0, :] - sorted_transition_tensor[:, i, :]) @ _T_disc_inv
# constraint_rows is nxn - we need to add the min{} terms though
min_operator_entries = np.identity(n)
# And we have to make sure we put the min{} operator entries in
# the correct place in the A_ub matrix
num_padding_cols = css_offset - n
padding_entries = np.zeros(shape=[constraint_rows.shape[0], num_padding_cols])
constraint_rows = np.hstack((constraint_rows, padding_entries, min_operator_entries))
# Finally, add the new constraints
A_ub = np.vstack((A_ub, constraint_rows))
b_ub = np.vstack((b_ub, np.zeros(shape=[constraint_rows.shape[0], 1])))
return c, A_ub, b_ub
def add_l1norm_constraints(c, A_ub, b_ub, l1):
"""
Augment the optimisation objective to add an l1 norm regularisation
term z += l1 * ||R||_1
This will add n extra optimisation variables and 2n extra constraints
NB: Assumes the true optimisation variables are first in the objective
function
"""
# We add an extra variable for each each true optimisation variable
c = np.hstack((c, l1 * np.ones(shape=[1, n])))
l1_offset = c.shape[1] - n
# Don't forget to resize the A_ub matrix to match
A_ub = np.hstack((A_ub, np.zeros(shape=[A_ub.shape[0], n])))
# Now we add 2 new constraints for each true optimisation variable to
# enforce the absolute value terms in the l1 norm
for i in range(n):
# An absolute value |x1| can be enforced via constraints
# -x1 <= 0 (i.e., x1 must be positive or 0)
# x1 + -xe1 <= 0
# Where xe1 is the replacement for |x1| in the objective
#
# TODO ajs 04/Apr/2018 This enforces that R must be positive or 0,
# but I was under the impression that it was also possible to
# enforce an abs operator without this requirement - e.g. see
# http://lpsolve.sourceforge.net/5.1/absolute.htm
constraint_row_1 = [0] * A_ub.shape[1]
constraint_row_1[i] = -1
A_ub = np.vstack((A_ub, constraint_row_1))
b_ub = np.vstack((b_ub, [[0]]))
constraint_row_2 = [0] * A_ub.shape[1]
constraint_row_2[i] = 1
constraint_row_2[l1_offset + i] = -1
A_ub = np.vstack((A_ub, constraint_row_2))
b_ub = np.vstack((b_ub, [[0]]))
return c, A_ub, b_ub
def add_rmax_constraints(c, A_ub, b_ub, Rmax):
"""
Add constraints for a maximum R value r_max
This will add n extra constraints
"""
for i in range(n):
constraint_row = [0] * A_ub.shape[1]
constraint_row[i] = 1
A_ub = np.vstack((A_ub, constraint_row))
b_ub = np.vstack((b_ub, Rmax))
return c, A_ub, b_ub
# Compose LP optimisation problem
c, A_ub, b_ub = add_optimal_policy_constraints(c, A_ub, b_ub)
c, A_ub, b_ub = add_costly_single_step_constraints(c, A_ub, b_ub)
c, A_ub, b_ub = add_rmax_constraints(c, A_ub, b_ub, r_max)
c, A_ub, b_ub = add_l1norm_constraints(c, A_ub, b_ub, l1_regularisation_weight)
if verbose:
print("Number of optimisation variables: {}".format(c.shape[1]))
print("Number of constraints: {}".format(A_ub.shape[0]))
# Solve for a solution
if verbose:
print("Solving LP problem...")
# NB: cvxopt.solvers.lp expects a 1d c vector
solvers.options['show_progress'] = verbose
res = solvers.lp(matrix(c[0, :]), matrix(A_ub), matrix(b_ub))
if verbose:
pprint(res)
def normalize(vals):
"""
Helper function to normalize a vector to the range (0, 1)
"""
min_val = np.min(vals)
max_val = np.max(vals)
return (vals - min_val) / (max_val - min_val)
# Extract the true optimisation variables and re-scale
rewards = r_max * normalize(res['x'][0:n]).T
return rewards, res
def add_alpha_size_constraints(c, a_ub, b_ub, num_basis_functions):
"""Add constraints for maximum reward coefficient values of 1
This will add 2*d extra constraints, where d is the number of basis
functions.
NB: Assumes the true optimisation variables are first in the c vector.
Args:
c (numpy array): Objective function weights
a_ub (numpy array): Upper bound constraint coefficient matrix
b_ub (numpy array): Upper bound constraint RHS vector
Returns
(numpy array): Objective function weights
(numpy array): Upper bound constraint coefficient matrix
(numpy array): Upper bound constraint RHS vector
"""
for i in range(num_basis_functions):
constraint_row = [0] * a_ub.shape[1]
constraint_row[i] = 1
a_ub = np.vstack((a_ub, constraint_row))
b_ub = np.vstack((b_ub, 1))
constraint_row = [0] * a_ub.shape[1]
constraint_row[i] = -1
a_ub = np.vstack((a_ub, constraint_row))
b_ub = np.vstack((b_ub, 1))
return c, a_ub, b_ub
def large_linear_programming(
state_sample,
num_actions,
ordered_transition_function,
basis_value_fn,
*,
penalty_coefficient=2.0,
num_transition_samples=100,
verbose=False
):
"""Linear Programming IRL for large state spaces by Ng and Russell, 2000
Given a sampling transition function T(s, a_i) -> s' encoding a stationary
deterministic policy and a set of basis functions phi(s) over the state
space, finds a weight vector alpha for which the given policy is optimal
with respect to R(s) = alpha · phi(s).
See https://thinkingwires.com/posts/2018-02-13-irl-tutorial-1.html for a
good overview of this method.
Args:
state_sample (list): A list of states that will be used to approximate
the reward function over the full state space
num_actions (int): The number of actions
ordered_transition_function (function): A sampling transition function
T(s, a_i) -> s' encoding a stationary deterministic policy. The
structure of T must be that the 0th action T(:, 0) corresponds to a
sample from the expert policy, and T(:, i), i!=0 corresponds to a
sample from the ith non-expert action at each state, for some
arbitrary but consistent ordering of actions.
basis_value_fn (function): A function bv(s) -> list taking a state
and returning a vector where each entry i is an estimate of the
value of that state, if ith basis function was the reward function.
penalty_coefficient (float): Penalty function coefficient. Ng and
Russell find 2 is robust. Must be >= 1.
num_transition_samples (int): Number of transition samples to use when
computing expectations. For deterministic MDPs, this can be left
as 1.
verbose (bool): Print progress information
Returns:
(numpy array) A weight vector for the reward function basis functions
that makes the given policy optimal
(dict): A result object from the LP optimiser
"""
# Enforce valid penalty function coefficient
assert penalty_coefficient >= 1, \
"Penalty function coefficient must be >= 1, was {}".format(
penalty_coefficient
)
# Measure number of sampled states
num_states = len(state_sample)
# Measure number of basis functions
num_basis_functions = len(basis_value_fn(state_sample[0]))
if verbose:
print("Large Linear Programming IRL")
print(
"num_states={}, num_actions={}, num_basis_functions={}, "
"penalty_coefficient={:.3f}, num_transition_samples={}".format(
num_states,
num_actions,
num_basis_functions,
penalty_coefficient,
num_transition_samples
)
)
# Formulate the linear programming problem constraints
# NB: The general form for adding a constraint looks like this
# c, A_ub, b_ub = f(c, a_ub, b_ub)
if verbose:
print("Composing LP problem...")
def add_costly_single_step_constraints(c, a_ub, b_ub):
"""Implement Linear Programming IRL method for large state spaces
This will add m new objective parameters and 2*(k-1)*m new constraints,
where k is the number of actions and m is the number of sampled states
NB: Assumes the true optimisation variables are first in the c vector.
Args:
c (numpy array): Objective function weights
a_ub (numpy array): Upper bound constraint coefficient matrix
b_ub (numpy array): Upper bound constraint RHS vector
Returns
(numpy array): Objective function weights
(numpy array): Upper bound constraint coefficient matrix
(numpy array): Upper bound constraint RHS vector
"""
# Extend the objective function adding one dummy parameter per
# sampled state
# NB: cvxopt minimises, so we have a -1 here, not a 1
c = np.hstack((c, -1 * np.ones(shape=(1, num_states))))
a_ub = np.hstack((a_ub, np.zeros(shape=(a_ub.shape[0], num_states))))
# Loop over states
for si, state in enumerate(state_sample):
# Show progress
if verbose:
percent = int(si / num_states * 100)
if percent % 5 == 0:
print("{:d}%".format(percent))
# Compute the value-expectations for the possible actions
a_ve = np.zeros((num_basis_functions, num_actions))
for i in range(num_transition_samples):
for a in range(num_actions):
a_ve[:, a] += basis_value_fn(
ordered_transition_function(state, a)
)
a_ve /= num_transition_samples
# Find the difference w.r.t the expert (0th action)
a_ve_diff = np.subtract(np.array([a_ve[:, 0]]).T, a_ve[:, 1:])
# Prepare the RHS block of the a_ub matrix
tmp = np.zeros((1, num_states))
tmp[0, si] = -1
tmp = np.vstack((tmp for _ in range(num_actions-1)))
# Append to first half of penalty constraints to a_ub, b_ub
a_ub = np.vstack((
a_ub,
np.hstack((a_ve_diff.T, tmp))
))
b_ub = np.vstack((
b_ub,
np.vstack((0 for _ in range(num_actions-1)))
))
# Append to second half of penalty constraints to a_ub, b_ub
a_ub = np.vstack((
a_ub,
np.hstack((penalty_coefficient * a_ve_diff.T, tmp))
))
b_ub = np.vstack((
b_ub,
np.vstack((0 for _ in range(num_actions-1)))
))
# TODO ajs 12/Jun/2018 Remove redundant constraints
# *twitch*
if verbose:
print("100%")
return c, a_ub, b_ub
# Prepare LP constraint matrices
c = np.zeros(shape=(1, num_basis_functions), dtype=float)
a_ub = np.zeros(shape=(0, num_basis_functions), dtype=float)
b_ub = np.zeros(shape=(0, 1))
# Compose LP optimisation problem
c, a_ub, b_ub = add_costly_single_step_constraints(c, a_ub, b_ub)
c, a_ub, b_ub = add_alpha_size_constraints(c, a_ub, b_ub)
if verbose:
print("Number of optimisation variables: {}".format(c.shape[1]))
print("Number of constraints: {}".format(a_ub.shape[0]))
print("Solving LP problem...")
# NB: cvxopt.solvers.lp expects a 1d c vector
solvers.options['show_progress'] = verbose
res = solvers.lp(matrix(c[0, :]), matrix(a_ub), matrix(b_ub))
if verbose:
pprint(res)
# Extract the true optimisation variables
alpha_vector = np.array(res['x'][0:num_basis_functions].T)
return alpha_vector, res
def trajectory_linear_programming(
mdp,
trajectories,
basis_functions,
discount_factor,
solver,
*,
penalty_coefficient=2.0,
num_iterations=float("inf"),
tolerance=1e-6,
verbose=True
):
"""Trajectory-based Linear Programming IRL by Ng and Russell, 2000
Args:
mdp (gym.Env): MDP to run IRL over. Used for rollint out trajectories.
trajectories (list): List of state trajectories from the expert
basis_functions (list): List of basis functions b(s) -> float
discount_factor (float): Expert's discount factor
solver (function): A function that solves an mdp to find an optimal
policy. Should take a gym.Env object and a reward function,
and return a policy function pi(s) -> a mapping states to actions
penalty_coefficient (float): Penalty function coefficient. Ng and
Russell find 2 is robust. Must be >= 1.
num_iterations (int): Number of iterations to loop for
tolerance (float): Convergence tolerance for the reward coefficients
verbose (bool): Print status information
Returns:
(numpy array): Vector of basis function coefficients such that the
expert's demonstrations are optimal.
"""
# Enforce valid penalty function coefficient
assert penalty_coefficient >= 1, \
"Penalty function coefficient must be >= 1, was {}".format(
penalty_coefficient
)
# Convenience variable for the number of basis functions
d = len(basis_functions)
# Slice out the initial state set
starting_state_set = [t[0] for t in trajectories]
# Measure maximum trajectory length
max_trajectory_length = max([len(t) for t in trajectories])
if verbose:
print("Trajectory-based Linear Programming IRL")
print(
"num_trajectories={}, max_trajectory_length={}, "
"num_basis_functions={}, "
"penalty_coefficient={:.3f}, num_iterations={}".format(
len(trajectories),
max_trajectory_length,
d,
penalty_coefficient,
num_iterations
)
)
def generate_state_trajectories(
mdp,
start_state_set,
policy,
max_trajectory_length
):
"""Generate a list of state-only trajectories
Args:
mdp (gym.Env): The MDP to roll-out trajectories in
start_state_set (list): List of starting states to use
policy (function): Policy to follow for rollouts
max_trajectory_length: Maximum trajectory length to allow
Returns:
(list): A list of state trajectories, (each a list of states)
"""
trajectories = []
for s0 in start_state_set:
trajectory = rollout(
mdp,
s0,
policy,
max_length=max_trajectory_length
)
# Slice out just the state trajectory and append it
trajectories.append([sar[0] for sar in trajectory])
return trajectories
def discounted_feature_expectations(
trajectories,
basis_functions,
discount_factor
):
"""Compute discounted feature expectations under the basis function set
Args:
trajectories (list): List of state trajectories
basis_functions (list): List of basis functions b(s) -> float
discount_factor: Expert's discount factor
Returns:
(numpy array): A numpy array of shape (1, d) where d is the number
of basis functions. The ith column indicates the average
discounted feature expectation for feature i given the set of
trajectories.
"""
feature_expectations = np.zeros((1, len(basis_functions)))
# Average over all trajectories
for trajectory in trajectories:
# Compute one row per basis function
for i, basis_function in enumerate(basis_functions):
discount_vector = np.array(
[discount_factor ** j for j in range(len(trajectory))]
)
feature_values = np.array(
[basis_function(s) for s in trajectory])
feature_expectations[0, i] += discount_vector @ feature_values
# Normalize expectations
feature_expectations /= len(trajectories)
return feature_expectations
# Compute expert discounted feature expectations
v_hat_pi_star = discounted_feature_expectations(
trajectories,
basis_functions,
discount_factor
)
# Keep a list of discounted policy feature expectations
policy_set = []
policy_discounted_feature_expectations = np.empty(
shape=(0, v_hat_pi_star.shape[1])
)
def add_optimal_expert_constraints(c, a_ub, b_ub):
"""Add constraints and augment the objective to make the expert optimal
This will add k extra optimisation variables, and 2k extra
constraints, where k is the number of policies in the policy set.
NB: Assumes the true optimisation variables are first in the c vector.
Args:
c (numpy array): Objective function weights
a_ub (numpy array): Upper bound constraint coefficient matrix
b_ub (numpy array): Upper bound constraint RHS vector
Returns
(numpy array): Objective function weights
(numpy array): Upper bound constraint coefficient matrix
(numpy array): Upper bound constraint RHS vector
"""
# Measure the current number of policies in the policy set
k = policy_discounted_feature_expectations.shape[0]
# Add optimisation variables for all policies
c = np.hstack((c, np.ones((1, k))))
a_ub = np.hstack((a_ub, np.zeros((a_ub.shape[0], k))))
diff = v_hat_pi_star - policy_discounted_feature_expectations
tmp = -1 * np.identity(policy_discounted_feature_expectations.shape[0])
# Add Constraints for both sides of the penalty function
a_ub = np.vstack((
a_ub,
np.hstack((diff, tmp)),
np.hstack((penalty_coefficient * diff, tmp))
))
b_ub = np.vstack((
b_ub,
np.zeros((k, 1)),
np.zeros((k, 1))
))
return c, a_ub, b_ub
# Iterate the requested number of times
alpha_vector = None
i = 0
while True:
i += 1
if verbose:
print("TLP: Iteration={}, alpha_vector={}".format(i, alpha_vector))
if alpha_vector is None:
# First iteration - pick an initial, random policy
policy_set.append(
lambda s: mdp.action_space.sample()
)
else:
# Make a copy of the MPD
mdp_copy = copy(mdp)
mdp_copy.reset()
# Compose a new reward function
new_reward_fn = lambda s: (
alpha_vector @ np.array([bfn(s) for bfn in basis_functions])
)[0]
def _step(self, action):
"""Overload an MDP's reward function
Args:
action (object): an action provided by the environment
Returns:
observation (object): agent's observation of the current
environment
reward (float) : amount of reward returned after previous
action
done (boolean): whether the episode has ended, in which
case further step() calls will return undefined results
info (dict): contains auxiliary diagnostic information
(helpful for debugging, and sometimes learning)
"""
# Call original function
s, r, d, i = self._step()
# Replace reward
r = new_reward_fn(s)
return s, r, d, i
# Overload the MDP to use a new reward function
mdp_copy._step = mdp_copy.step
mdp_copy.step = _step
# Call the provided solver to find a policy that maximises V under
# the new reward function
policy_set.append(solver(mdp, new_reward_fn))
# Estimate the value of the newest policy
policy_discounted_feature_expectations = np.vstack((
policy_discounted_feature_expectations,
discounted_feature_expectations(
generate_state_trajectories(
mdp,
starting_state_set,
policy_set[-1],
max_trajectory_length
),
basis_functions,
discount_factor
)
))
if verbose:
print("Composing LP problem...")
# Form the LP problem
c = np.zeros((1, d), dtype=float)
a_ub = np.zeros((0, c.shape[1]), dtype=float)
b_ub = np.zeros((a_ub.shape[0], 1), dtype=float)
c, a_ub, b_ub = add_optimal_expert_constraints(c, a_ub, b_ub)
c, a_ub, b_ub = add_alpha_size_constraints(c, a_ub, b_ub, d)
if verbose:
print("Number of optimisation variables: {}".format(c.shape[1]))
print("Number of constraints: {}".format(a_ub.shape[0]))
print("Solving LP problem...")
# NB: cvxopt.solvers.lp expects a 1d c vector
solvers.options['show_progress'] = verbose
res = solvers.lp(matrix(c[0, :]), matrix(a_ub), matrix(b_ub))
if verbose:
pprint(res)
# Extract the true optimisation variables
new_alpha_vector = np.array(res['x'][0:d].T)
delta = [float("inf")]
if alpha_vector is not None:
delta = alpha_vector - new_alpha_vector
alpha_vector = new_alpha_vector
if i > num_iterations or np.linalg.norm(delta) <= tolerance:
break
return alpha_vector
def demo():
""" Demonstrate these methods on some gridworld problems
"""
# region === Get some imports
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from irl_methods.utils import gaussian
from irl_methods.mdp.gridworld import (
GridWorldDiscEnv,
GridWorldCtsEnv,
FEATUREMAP_IDENTITY,
EDGEMODE_WRAP,
EDGEMODE_CLAMP,
EDGEMODE_STRINGS,
ACTION_STRINGS
)
#endregion
# region === GridWorld construction
# #################################################################
# Size of the gridworld
size = 8
# Wind probability for the discrete gridworld
wind_prob = 0.3
# Edge mode for the gridworlds
edge_mode = EDGEMODE_CLAMP
#edge_mode = EDGEMODE_WRAP
# Per-step reward
per_step_reward = 0
# Goal reward
goal_reward = 1
# Goal state
#goal_state = np.random.randint(0, size, 2)
goal_state = np.array((size-1, size-1))
# Discount factor
#discount_factor = np.random.uniform(0.1, 0.9)
discount_factor = 0.9
print("Discrete GridWorld, "
"size={:d}, wind_prob={:d}%, edge_mode={}".format(
size,
int(wind_prob*100),
EDGEMODE_STRINGS[edge_mode]
)
)
num_states = size * size
gw_disc = GridWorldDiscEnv(
size=size,
wind=wind_prob,
goal_states=[goal_state],
per_step_reward=per_step_reward,
goal_reward=goal_reward,
edge_mode=edge_mode
)
disc_optimal_policy = gw_disc.get_optimal_policy()
ordered_transition_tensor = gw_disc.get_ordered_transition_tensor(
disc_optimal_policy
)
#endregion
#
# # region === Vanilla LP IRL
# ##################################################################
# print("")
#
# # L1 regularisation weight
# # l1 = 0
# l1 = np.random.uniform(0, 1)
#
# # ===
#
# # Run LP IRL
# lp_reward, _ = linear_programming(
# ordered_transition_tensor,
# discount_factor,
# l1_regularisation_weight=l1,
# r_max=(per_step_reward + goal_reward),
# verbose=True
# )
#
# print("Recovered reward vector:")
# print("{}".format(lp_reward))
#
# fig = plt.figure()
# plt.suptitle('Vanilla Linear Programming IRL')
# plt.set_cmap("viridis")
#
# # Plot ground truth reward
# ax = plt.subplot(1, 3, 1)
# gw_disc.plot_reward(ax, gw_disc.ground_truth_reward)
# plt.title("Ground truth reward")
# plt.colorbar(
# cax=make_axes_locatable(ax).append_axes("right", size="5%", pad=0.05)
# )
#
# # Plot provided policy
# ax = plt.subplot(1, 3, 2)
# gw_disc.plot_policy(ax, disc_optimal_policy)
# plt.title("Provided policy")
#
# # Plot recovered reward
# ax = plt.subplot(1, 3, 3)
# gw_disc.plot_reward(ax, lp_reward)
# plt.title("IRL result")
# plt.colorbar(
# cax=make_axes_locatable(ax).append_axes("right", size="5%", pad=0.05)
# )
#
# plt.tight_layout()
# plt.show()
#
# # endregion
#
# # region === LLP IRL
# ##################################################################
#
# # Number of transition samples to use when estimating values in LLP
# # If the wind is 0, this can be 1
# num_transition_samples = 100
#
# # Penalty coefficient to use in LLP/TLP
# # Min value is 1, Ng and Russell suggest 2
# penalty_coefficient = 10
#
# # ===
#
# # Feature function
# feature_fn = lambda s: gw_disc.get_state_features(
# s=s,
# feature_map=FEATUREMAP_IDENTITY
# )
#
# # Convert basis functions to value functions
# print("Computing value estimates...")
# basis_value_functions = []
# for bfi in range(num_states):
#
# # Progress update
# print("{:d}%".format(int(bfi/num_states*100)))
#
# basis_value_functions.append(
# gw_disc.estimate_value(
# disc_optimal_policy,
# discount_factor,
# reward=lambda s: feature_fn(s)[bfi]
# )
# )
# # A vector function that computes the value vector given a state
# basis_value_fn = lambda s: [bvf[s] for bvf in basis_value_functions]
# print("")
#
# state_sample = list(range(num_states))
# num_actions = len(gw_disc._A)
# ordered_transition_function = lambda s, a: np.random.choice(
# state_sample,
# p=ordered_transition_tensor[s, a]
# )
#
# alpha_vector, _ = large_linear_programming(
# state_sample,
# num_actions,
# ordered_transition_function,
# basis_value_fn,
# penalty_coefficient=penalty_coefficient,
# num_transition_samples=num_transition_samples,
# verbose=True
# )
#
# print("Recovered alpha vector:")
# print("{}".format(alpha_vector))
#
# # Compose reward function lambda
# llp_reward_fn = lambda s: (alpha_vector @ basis_value_fn(s))[0]
# llp_reward_vector = [llp_reward_fn(s) for s in range(num_states)]
#
# # Plot results
# fig = plt.figure()
# plt.suptitle('Large Linear Programming IRL')
# plt.set_cmap("viridis")
#
# # Plot ground truth reward
# ax = plt.subplot(1, 3, 1)
# gw_disc.plot_reward(ax, gw_disc.ground_truth_reward)
# plt.title("Ground truth reward")
# plt.colorbar(
# cax=make_axes_locatable(ax).append_axes("right", size="5%", pad=0.05)
# )
#
# # Plot provided policy
# ax = plt.subplot(1, 3, 2)
# gw_disc.plot_policy(ax, disc_optimal_policy)
# plt.title("Provided policy")
#
# # Plot recovered reward
# ax = plt.subplot(1, 3, 3)
# gw_disc.plot_reward(ax, llp_reward_vector)
# plt.title("IRL result")
# plt.colorbar(
# cax=make_axes_locatable(ax).append_axes("right", size="5%", pad=0.05)
# )
#
# plt.tight_layout()
# plt.show()
#
# # endregion
# region === TLP IRL
##################################################################
# Maximum trajectory length to use for TLP
max_trajectory_length = 3 * size
# Number of iterations to run the TLP solver for
num_iterations = 20
# Number demonstration trajectories to provide to the TLP algorithm
num_trajectories = 5000
# Roll out some trajectories to use for TLP
trajectories = []
for i in range(num_trajectories):
trajectory = rollout(
gw_disc,
gw_disc.observation_space.sample(),
disc_optimal_policy,
max_length=max_trajectory_length
)
# Slice out just the state trajectory and append it
trajectories.append([sar[0] for sar in trajectory])
# A vector of basis functions
basis_functions = [
lambda s: gw_disc.get_state_features(s=s)[0],
lambda s: gw_disc.get_state_features(s=s)[1]
]
def solver(mdp, reward):
"""Solves a GridWorldDiscEnv MDP using policy iteration to find a policy
Args:
mdp (GridWorldDiscEnv): MDP with a valid reward function to solve
reward (function): Reward function r(s) -> float
Returns:
(function): Policy pi(s) -> a that acts optimally with respect to
the given MDP's reward function
"""
# Convergence tolerance for the value function
tol = 1e-6
# Max number of PI iterations
max_num_iterations = 5
# Start with random policy and zero value vector
pi = lambda s: mdp.action_space.sample()
v = np.zeros(len(mdp._S))
print("Running PI")
i = 0
while True:
i += 1
print("PI step {}".format(i))
# Update value
v_new = mdp.estimate_value(
pi,
discount_factor,
reward=reward,
max_iterations=1000
)
delta = v_new - v
v = v_new
print("PI: Value delta is: {}".format(np.linalg.norm(delta)))
# Get new greedy policy
pi = mdp.greedy_policy(v_new)
if np.linalg.norm(delta) < tol or i >= max_num_iterations:
break
print("PI: Found new policy")
return pi
alpha_vector = trajectory_linear_programming(
gw_disc,
trajectories,
basis_functions,
discount_factor,
solver,
verbose=True
)
print("Got final alpha vector: {}".format(alpha_vector))
tlp_reward_fn = lambda s: (alpha_vector @ [bf(s) for bf in basis_functions])[0]
tlp_reward_vector = [tlp_reward_fn(s) for s in range(num_states)]
# Plot results
fig = plt.figure()
plt.suptitle('Trajectory-based Linear Programming IRL')
plt.set_cmap("viridis")
# Plot ground truth reward
ax = plt.subplot(1, 3, 1)
gw_disc.plot_reward(ax, gw_disc.ground_truth_reward)
plt.title("Ground truth reward")
plt.colorbar(
cax=make_axes_locatable(ax).append_axes("right", size="5%", pad=0.05)
)
# Plot provided policy
#ax = plt.subplot(1, 3, 2)
#gw_disc.plot_policy(ax, disc_optimal_policy)
plt.title("Provided trajectories")
# Plot recovered reward
ax = plt.subplot(1, 3, 3)
gw_disc.plot_reward(ax, tlp_reward_vector)
plt.title("IRL result")
plt.colorbar(
cax=make_axes_locatable(ax).append_axes("right", size="5%", pad=0.05)
)
plt.tight_layout()
plt.show()
# endregion
return
if __name__ == "__main__":
demo()
|
StarcoderdataPython
|
6551289
|
<reponame>azaddeveloper/api-snippets
# Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import TwilioTaskRouterClient
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "<KEY>"
auth_token = "<PASSWORD>"
workspace_sid = "WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
workflow_sid = "WWXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
client = TwilioTaskRouterClient(account_sid, auth_token)
task = client.tasks(workspace_sid).create(
workflow_sid=workflow_sid, attributes='{"type":"support"}'
)
print(task.attributes)
|
StarcoderdataPython
|
8158546
|
<reponame>rababerladuseladim/dudes<gh_stars>1-10
import numpy as np
from collections import Counter, defaultdict
from dudes.Ranks import Ranks
from dudes.Util import *
class Ident:
columns = ['Iter','TaxID','ParentTaxID','RankID','CumulativeMatches','MatchScoreSum','Abundance','CumulativeAbundance']
def __init__(self,mat=None,pv=None):
self.cols = {c:i for i,c in enumerate(Ident.columns)}
self.ident = []
self.pvals = dict()
if not mat is None: self.ident = np.array(mat,ndmin=2)
if not pv is None: self.pvals = pv
def __iter__(self):
for v in self.ident:
yield {c:v[i] for i,c in enumerate(Ident.columns)}
def getIter(self):
return self.ident[-1,0]
def getSize(self):
return len(self.ident)
def getCol(self,col):
return self.ident[:,self.cols[col]]
def getSubSet(self,ind):
return Ident(self.ident[ind])
def add(self,id):
if id.getSize():
if len(self.ident): self.ident = np.vstack((self.ident,id.ident)) # Add a Ident object
else: self.ident = np.array(id.ident,ndmin=2) #First entry
self.pvals.update(id.pvals) # Update p-val dict
def remove(self,id):
for r in id:
# Reassign the parent node to the children
self.ident[np.logical_and(self.ident[:,0]==r['Iter'],self.ident[:,2]==r['TaxID']),2] = r['ParentTaxID']
# Remove entry
self.ident = self.ident[~np.logical_and(self.ident[:,0]==r['Iter'],self.ident[:,1]==r['TaxID'])]
# Remove pval
if (r['Iter'],r['TaxID']) in self.pvals: del self.pvals[(r['Iter'],r['TaxID'])]
def setMatchScoreSum(self,iter,taxid,mssum):
idx = np.logical_and(self.getCol('Iter')==iter,self.getCol('TaxID')==taxid)
self.ident[idx,5] = mssum
def setAbundance(self,iter,taxid,ab):
idx = np.logical_and(self.getCol('Iter')==iter,self.getCol('TaxID')==taxid)
self.ident[idx,6] = ab
def setCumulativeAbundance(self,iter,taxid,ab):
idx = np.logical_and(self.getCol('Iter')==iter,self.getCol('TaxID')==taxid)
self.ident[idx,7] = ab
def getLeafs(self,taxid_start,iter=None):
leaf_ident = Ident()
if iter!=None:
sub_ident = self.ident[self.ident[:,0]==iter]
else:
sub_ident = self.ident
stack = list(sub_ident[sub_ident[:,2]==taxid_start])
while stack:
node = stack.pop()
children = list(sub_ident[sub_ident[:,2]==node[1]])
if children:
stack.extend(children)
else:
leaf_ident.add(Ident(sub_ident[sub_ident[:,1]==node[1]]))
return leaf_ident
def printTree(self,taxid_start,total_matches_filter,total_abundance_norm,names):
tree = []
if np.sum(self.ident)!=0:
for it in range(self.getIter()+1):
tree.append(str(it))
sub_ident = self.ident[self.ident[:,0]==it]
stack = list(sub_ident[sub_ident[:,2]==taxid_start])
while stack:
node = stack.pop()
tree.append("-%s %d %s %s- %d (%d) M:%d(%.6f) MSS:%d A:%.6f CA:%.6f %s %s" % ((node[3])*'-',node[3],getNameRank(node[3]),(len(Ranks.ranks)-node[3]-len(getNameRank(node[3]))+9)*'-',
node[1],(node[2]),node[4],node[4]/total_matches_filter,node[5],node[6]/float(total_abundance_norm),node[7]/float(total_abundance_norm),Counter(self.pvals[(node[0],node[1])]),names.getName(node[1])))
children = list(sub_ident[sub_ident[:,2]==node[1]])
stack.extend(children)
return tree
|
StarcoderdataPython
|
82018
|
<filename>angr/engines/vex/statements/dirty.py
from . import SimIRStmt
from .. import dirty
from .... import sim_options as o
from ....errors import UnsupportedDirtyError
import logging
l = logging.getLogger(name=__name__)
class SimIRStmt_Dirty(SimIRStmt):
__slots__ = []
# Example:
# t1 = DIRTY 1:I1 ::: ppcg_dirtyhelper_MFTB{0x7fad2549ef00}()
def _execute(self):
exprs = self._translate_exprs(self.stmt.args)
if hasattr(dirty, self.stmt.cee.name):
s_args = [ex.expr for ex in exprs]
if o.ACTION_DEPS in self.state.options:
if len(exprs) == 0:
reg_deps = frozenset()
tmp_deps = frozenset()
else:
reg_deps = frozenset.union(*[e.reg_deps() for e in exprs])
tmp_deps = frozenset.union(*[e.tmp_deps() for e in exprs])
else:
reg_deps = None
tmp_deps = None
func = getattr(dirty, self.stmt.cee.name)
retval, retval_constraints = func(self.state, *s_args)
self._add_constraints(*retval_constraints)
if self.stmt.tmp not in (0xffffffff, -1):
self.state.scratch.store_tmp(self.stmt.tmp, retval, reg_deps, tmp_deps)
else:
l.error("Unsupported dirty helper %s", self.stmt.cee.name)
raise UnsupportedDirtyError("Unsupported dirty helper %s" % self.stmt.cee.name)
|
StarcoderdataPython
|
1749357
|
import unittest
from testfixtures import LogCapture
from flexp.flow import Chain
from flexp.flow import inspector
from .utils import Add, DummyModule
class TestChain(unittest.TestCase):
def test_chain_inspect(self):
data = {"input": 20}
with LogCapture() as l:
c = Chain([
inspector.inspect(Add(10), stream=True)])
c.process(data)
c.close()
l.check(
('flexp.flow.flow', 'DEBUG', 'Add.process()'),
('flexp.flow.inspector', 'INFO', 'Data flow structure'),
('flexp.flow.inspector', 'INFO', "{'input': 20, 'output': 30}"),
('flexp.flow.inspector', 'INFO', 'End of data flow structure'),
('flexp.flow.flow', 'INFO', 'Add average execution time 0.00 sec')
)
def test_chain_inspect_deep(self):
data = {"input": {i: i for i in range(11)}}
with LogCapture() as l:
c = Chain([
inspector.inspect(DummyModule(), stream=True)])
c.process(data)
c.close()
l.check(
('flexp.flow.flow', 'DEBUG', 'DummyModule.process()'),
('flexp.flow.inspector', 'INFO', 'Data flow structure'),
('flexp.flow.inspector', 'INFO', "{\'input\': {\"<class \'int\'>#11 times (0)\": 0}}"),
('flexp.flow.inspector', 'INFO', 'End of data flow structure'),
('flexp.flow.flow', 'INFO', 'DummyModule average execution time 0.00 sec')
)
|
StarcoderdataPython
|
295321
|
<reponame>shreya-n-kumari/python<filename>class_import.py
from class_car import ElectricCar
Tesla = ElectricCar('tesla','model s',2016)
print(Tesla.get_name())
print(Tesla.describe_battery())
|
StarcoderdataPython
|
3413274
|
<gh_stars>0
"""The WaveBlocks Project
Various small utility functions.
@author: <NAME>
@copyright: Copyright (C) 2011 <NAME>
@license: Modified BSD License
"""
from numpy.lib.arraysetops import in1d
#TODO: Consider merging this into the TimeManager
def common_timesteps(timegridA, timegridB):
r"""
Find the indices (wrt to A and B) of the timesteps common to both timegrids.
"""
IA = in1d(timegridA, timegridB)
IB = in1d(timegridB, timegridA)
return (IA, IB)
|
StarcoderdataPython
|
8126047
|
<reponame>agustinhenze/mibs.snmplabs.com
#
# PySNMP MIB module HH3C-FC-TC-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HH3C-FC-TC-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:13:56 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueSizeConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint", "ValueSizeConstraint", "ValueRangeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Bits, NotificationType, ModuleIdentity, Counter64, IpAddress, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, Integer32, ObjectIdentity, TimeTicks, MibIdentifier, iso, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "NotificationType", "ModuleIdentity", "Counter64", "IpAddress", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "Integer32", "ObjectIdentity", "TimeTicks", "MibIdentifier", "iso", "Gauge32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
class Hh3cFcAddressType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("wwn", 1), ("fcid", 2))
class Hh3cFcAddress(TextualConvention, OctetString):
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ConstraintsUnion(ValueSizeConstraint(3, 3), ValueSizeConstraint(8, 8), )
class Hh3cFcAddressId(TextualConvention, OctetString):
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(3, 3)
fixedLength = 3
class Hh3cFcAddressIdOrZero(TextualConvention, OctetString):
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(3, 3), )
class Hh3cFcNameId(TextualConvention, OctetString):
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(8, 8)
fixedLength = 8
class Hh3cFcNameIdOrZero(TextualConvention, OctetString):
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(8, 8), ValueSizeConstraint(16, 16), )
class Hh3cFcClassOfServices(TextualConvention, Bits):
status = 'current'
namedValues = NamedValues(("classF", 0), ("class1", 1), ("class2", 2), ("class3", 3), ("class4", 4), ("class5", 5), ("class6", 6))
class Hh3cFcBbCredit(TextualConvention, Integer32):
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 32767)
class Hh3cFcRxMTU(TextualConvention, Integer32):
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(128, 2112)
class Hh3cFcVsanIndex(TextualConvention, Unsigned32):
status = 'current'
displayHint = 'd'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(1, 4095)
class Hh3cFcStartOper(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("enable", 1), ("disable", 2))
class Hh3cFcDomainId(TextualConvention, Integer32):
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(1, 239)
class Hh3cFcDomainIdOrZero(TextualConvention, Integer32):
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 239)
class Hh3cFcDomainPriority(TextualConvention, Unsigned32):
status = 'current'
displayHint = 'd'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(1, 254)
class Hh3cFcDmState(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
namedValues = NamedValues(("disabledWithNoDomain", 1), ("disabledWithDomainCfg", 2), ("stableWithNoEports", 3), ("stableWithDomainCfg", 4), ("stableWithNoDomain", 5), ("principalSwitchInSelect", 6), ("domainIdRequesting", 7), ("buildFabricPhase", 8), ("reconfigureFabricPhase", 9), ("unknown", 10))
class Hh3cFcDomainIdList(TextualConvention, OctetString):
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 32)
mibBuilder.exportSymbols("HH3C-FC-TC-MIB", Hh3cFcStartOper=Hh3cFcStartOper, Hh3cFcAddressIdOrZero=Hh3cFcAddressIdOrZero, Hh3cFcBbCredit=Hh3cFcBbCredit, Hh3cFcDomainPriority=Hh3cFcDomainPriority, Hh3cFcNameId=Hh3cFcNameId, Hh3cFcDmState=Hh3cFcDmState, Hh3cFcNameIdOrZero=Hh3cFcNameIdOrZero, Hh3cFcVsanIndex=Hh3cFcVsanIndex, Hh3cFcDomainIdOrZero=Hh3cFcDomainIdOrZero, Hh3cFcRxMTU=Hh3cFcRxMTU, Hh3cFcClassOfServices=Hh3cFcClassOfServices, Hh3cFcAddressId=Hh3cFcAddressId, Hh3cFcDomainId=Hh3cFcDomainId, Hh3cFcDomainIdList=Hh3cFcDomainIdList, Hh3cFcAddress=Hh3cFcAddress, Hh3cFcAddressType=Hh3cFcAddressType)
|
StarcoderdataPython
|
1670417
|
<gh_stars>0
from django.shortcuts import render
from django.contrib.auth import get_user_model
from rest_framework import generics
from rest_framework import permissions
from . import serializers
CustomUser = get_user_model()
class UserRetrieveUpdateDestroyAPIView(generics.RetrieveUpdateDestroyAPIView):
queryset = CustomUser.objects.all()
serializer_class = serializers.CustomUserRetrieveSerializer
permission_classes = (permissions.IsAuthenticated,)
def get_object(self):
return self.request.user
|
StarcoderdataPython
|
245038
|
<filename>historic_hebrew_dates/annotated_corpus.py
#!/usr/bin/env python3
import re
import os
import csv
import pandas as pd
from bidi.algorithm import get_display
from .grammars.annotation_grammar import get_patterns
from .pattern_factory import create_parsers
parsers = create_parsers('hebrew')
def DateTypeParser():
return parsers['date_types']
def NumeralParser():
return parsers['numerals']
pd.set_option('display.max_colwidth', -1)
class AnnotatedCorpus:
def __init__(self):
self.tags = pd.read_csv(os.path.join(
os.path.dirname(__file__), 'tags.csv'))
self.raw_df = pd.read_excel(os.path.join(
os.getcwd(), 'data/Inscription DB for Time Project.xlsx'), header=0)
self.cleaned = self.clean_transcriptions(self.raw_df)
self.infixed = self.infix_transcriptions(self.cleaned)
self.parsed = self.parse_transcriptions(self.infixed)
def clean_transcriptions(self, dataframe):
cleaned_df = dataframe.copy(deep=True)
cleaned_df['Transcription'] = dataframe['Transcription'] \
.str.replace('\n', ' ') \
.str.replace('[', '') \
.str.replace(']', '') \
.str.replace('(\?)', ' ') \
.str.replace(' ', ' ') \
.str.replace('…', '...')
return cleaned_df
def infix_transcriptions(self, dataframe):
""" Rewrite transcriptions of the form {text}(tag) to {text(tag)}"""
pattern = r'(})(\(.+?\))'
infixed_df = dataframe.copy(deep=True)
infixed_df['Transcription'].replace(
to_replace=pattern, value=r'\2\1', regex=True, inplace=True)
return infixed_df
def parse_transcriptions(self, dataframe):
dataframe = self.parse_column(dataframe, 'Transcription')
dataframe = self.parse_column(dataframe, 'T_date')
dataframe = self.parse_column(dataframe, 'T_age')
dataframe = self.parse_column(dataframe, 'T_date_type')
return dataframe
def parse_column(self, dataframe, column_name):
tag_pattern = r'(^.+)\((.+)\)$'
c_name = 'T' if column_name == 'Transcription' else column_name
for i, row in enumerate(dataframe[column_name]):
try:
parsed_level = parse_level_parentheses(row)
if parsed_level:
for annotation in parsed_level:
match = re.match(tag_pattern, annotation)
if match:
text, tag = match.groups()
full_tag = '_'.join(
[c_name, self.translate_tag(tag)])
if full_tag not in dataframe.columns:
dataframe[full_tag] = None
dataframe.loc[i, full_tag] = text
if full_tag + '_raw' not in dataframe.columns:
dataframe[full_tag+'_raw'] = None
dataframe.loc[i, full_tag+'_raw'] = strip_annotations(
text)
except:
pass
return dataframe
def translate_tag(self, input_tag):
return self.tags[self.tags.tag == input_tag]['translation'].values[0]
def translate_tag_hebrew(self, english):
return self.tags[self.tags.translation == english]['tag'].values[0]
def write_test_standards(self):
# age (all)
age_df = pd.concat(
[
self.parsed['T_age_raw'],
self.parsed['Age at Death']
.str.replace('\n', ' ')
], axis=1).dropna()
age_df.to_csv('data/age_all.csv', header=[
'text', 'age at death'], index=False)
# age (only clear numbers)
age_df = age_df[pd.to_numeric(
age_df['Age at Death'], errors='coerce').notnull()]
age_df.to_csv('data/age_clear.csv', header=[
'text', 'age at death'], index=False)
# date (all)
date_df = self.parsed[['T_date_raw', 'Date', 'Year', 'Type']]
date_df = date_df.replace('\n', ' ', regex=True).dropna()
date_df.to_csv('data/date_all.csv', header=[
'text', 'date', 'year', 'type'], index=False)
# date (clear)
date_df = date_df[
(pd.to_numeric(date_df['Date'], errors='coerce').notnull()) &
(pd.to_numeric(date_df['Year'], errors='coerce').notnull())
]
date_df.to_csv('data/date_clear.csv', header=[
'text', 'date', 'year', 'type'], index=False)
def aggr_row_patterns(self, aggr_patterns, patterns, row):
for context, tag, tag_type, pattern in patterns:
translated_tag = self.translate_tag(tag)
if not translated_tag:
print(f'Unknown tag: {tag}')
elif pattern:
if translated_tag in ['age', 'date']:
subtags = list(map(lambda match: match.groups()[
0], re.finditer(r'\{(\w+)(:\w+|)\}', pattern)))
if len(subtags) != len(set(subtags)):
print(f"Duplicate tags in {pattern}")
print(row)
else:
value = '[' + \
', '.join(
map(lambda tag: f'{tag}: \'{tag}\'', subtags)) + ']'
aggr_patterns[translated_tag][pattern] = value
elif translated_tag == 'year':
value = row['Year'] if context == 'date' else row['Age at Death']
if re.match(r'^\d+$', str(value)):
aggr_patterns['number'][pattern] = value
else:
if pattern not in aggr_patterns['number']:
# mark that it exists
aggr_patterns['number'][pattern] = None
elif translated_tag == 'type':
if not re.match(r'[\{\}]', pattern):
# type patterns with some dependency aren't supported
value = row['Type']
if not ';' in value:
aggr_patterns['type'][pattern] = value
elif translated_tag == 'month':
aggr_patterns[translated_tag][pattern] = row['Month']
else:
raise Exception(
f'Unknown tag: {translated_tag} in {pattern}')
def aggr_patterns(self):
date_df = pd.concat(
[
self.cleaned['Transcription'],
self.cleaned['Age at Death'],
self.parsed['Year'],
self.parsed['Month'],
self.parsed['Day'],
self.parsed['Type']
], axis=1)
aggr_patterns = {}
for index, tag in self.tags.iterrows():
aggr_patterns[tag['translation']] = {}
for index, row in date_df.iterrows():
transcription = row['Transcription']
if transcription != None:
try:
patterns = get_patterns(transcription, {
self.translate_tag_hebrew('day'): self.translate_tag_hebrew('number'),
self.translate_tag_hebrew('year'): self.translate_tag_hebrew('number')
})
except Exception as error:
print(f'Error parsing: {transcription}')
print(error)
else:
self.aggr_row_patterns(aggr_patterns, patterns, row)
return aggr_patterns
def write_patterns(self):
aggr_patterns = self.aggr_patterns()
#
# Dates
max_number, known_date_patterns = self.load_known_patterns(
'hebrew_dates.csv')
new_date_patterns = []
for date_pattern, value in aggr_patterns['date'].items():
if not date_pattern in known_date_patterns:
new_date_patterns += [(max_number + 1, date_pattern, value)]
#
# Months
max_number, known_month_patterns = self.load_known_patterns(
'hebrew_months.csv')
new_month_patterns = []
for month_pattern, value in aggr_patterns['month'].items():
if not month_pattern in known_month_patterns:
new_month_patterns += [(max_number + 1, month_pattern, value)]
#
# Numbers
numeral_parser = NumeralParser()
new_number_patterns = []
for number_pattern, value in aggr_patterns['number'].items():
if not numeral_parser.parse(number_pattern.replace('\\w*', '')):
new_number_patterns += [('?', number_pattern, value)]
#
# Date type
date_type_parser = DateTypeParser()
new_date_type_patterns = []
for date_type_pattern, value in aggr_patterns['type'].items():
if not date_type_parser.parse(date_type_pattern.replace('\\w*', '')):
if value != 'From the destruction of the Temple':
raise Exception(f"Unknown date type {value}")
new_date_type_patterns += [('Destruction temple',
date_type_pattern, 'קדש')]
self.append_patterns('hebrew_date_types.csv', new_date_type_patterns)
self.append_patterns('hebrew_dates.csv', new_date_patterns)
self.append_patterns('hebrew_months.csv', new_month_patterns)
self.append_patterns('hebrew_numerals.csv', new_number_patterns)
return {
'new_dates': new_date_patterns,
'new_date_types': new_date_type_patterns,
'new_numbers': new_number_patterns,
'new_month_patterns': new_month_patterns
}
def load_known_patterns(self, filename):
patterns = set()
max_type = 0
with open(os.path.join(os.path.dirname(__file__), 'patterns', filename), encoding='utf8') as f:
reader = csv.reader(f, delimiter=',', quotechar='"')
next(reader) # skip header
for row in reader:
if re.match(r'^\d+$', row[0]):
row_type = int(row[0])
if row_type > max_type:
max_type = row_type
patterns.add(row[1])
return max_type, patterns
def append_patterns(self, filename, patterns):
def escape_cell(value):
str_value = str(value)
if ',' in str_value:
return f'"{str_value}"'
else:
return str_value
with open(os.path.join(os.path.dirname(__file__), 'patterns', filename), mode='a', encoding='utf8') as f:
f.write('\n'.join(map(lambda row: ','.join(
map(lambda cell: escape_cell(cell), row)), patterns)))
def parse_level_parentheses(string, open='{', close='}'):
""" Parse a single level of matching brackets """
stack = []
parsed = []
for i, c in enumerate(string):
if c == open:
stack.append(i)
elif c == '}' and stack:
start = stack.pop()
if len(stack) == 0:
parsed.append((string[start + 1: i]))
return parsed
def strip_annotations(string):
string = string.replace('{', '')
string = string.replace('}', '')
string = re.sub(r'\(.+?\)', '', string)
return string
|
StarcoderdataPython
|
8043799
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 <NAME>, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import logging
from nova.auth.users import UserManager
from nova.auth import rbac
from nova import exception
from nova import flags
from nova import test
FLAGS = flags.FLAGS
class Context(object):
pass
class AccessTestCase(test.BaseTestCase):
def setUp(self):
super(AccessTestCase, self).setUp()
FLAGS.fake_libvirt = True
FLAGS.fake_storage = True
um = UserManager.instance()
# Make test users
try:
self.testadmin = um.create_user('testadmin')
except Exception, err:
logging.error(str(err))
try:
self.testpmsys = um.create_user('testpmsys')
except: pass
try:
self.testnet = um.create_user('testnet')
except: pass
try:
self.testsys = um.create_user('testsys')
except: pass
# Assign some rules
try:
um.add_role('testadmin', 'cloudadmin')
except: pass
try:
um.add_role('testpmsys', 'sysadmin')
except: pass
try:
um.add_role('testnet', 'netadmin')
except: pass
try:
um.add_role('testsys', 'sysadmin')
except: pass
# Make a test project
try:
self.project = um.create_project('testproj', 'testpmsys', 'a test project', ['testpmsys', 'testnet', 'testsys'])
except: pass
try:
self.project.add_role(self.testnet, 'netadmin')
except: pass
try:
self.project.add_role(self.testsys, 'sysadmin')
except: pass
self.context = Context()
self.context.project = self.project
#user is set in each test
def tearDown(self):
um = UserManager.instance()
# Delete the test project
um.delete_project('testproj')
# Delete the test user
um.delete_user('testadmin')
um.delete_user('testpmsys')
um.delete_user('testnet')
um.delete_user('testsys')
super(AccessTestCase, self).tearDown()
def test_001_allow_all(self):
self.context.user = self.testadmin
self.assertTrue(self._allow_all(self.context))
self.context.user = self.testpmsys
self.assertTrue(self._allow_all(self.context))
self.context.user = self.testnet
self.assertTrue(self._allow_all(self.context))
self.context.user = self.testsys
self.assertTrue(self._allow_all(self.context))
def test_002_allow_none(self):
self.context.user = self.testadmin
self.assertTrue(self._allow_none(self.context))
self.context.user = self.testpmsys
self.assertRaises(exception.NotAuthorized, self._allow_none, self.context)
self.context.user = self.testnet
self.assertRaises(exception.NotAuthorized, self._allow_none, self.context)
self.context.user = self.testsys
self.assertRaises(exception.NotAuthorized, self._allow_none, self.context)
def test_003_allow_project_manager(self):
self.context.user = self.testadmin
self.assertTrue(self._allow_project_manager(self.context))
self.context.user = self.testpmsys
self.assertTrue(self._allow_project_manager(self.context))
self.context.user = self.testnet
self.assertRaises(exception.NotAuthorized, self._allow_project_manager, self.context)
self.context.user = self.testsys
self.assertRaises(exception.NotAuthorized, self._allow_project_manager, self.context)
def test_004_allow_sys_and_net(self):
self.context.user = self.testadmin
self.assertTrue(self._allow_sys_and_net(self.context))
self.context.user = self.testpmsys # doesn't have the per project sysadmin
self.assertRaises(exception.NotAuthorized, self._allow_sys_and_net, self.context)
self.context.user = self.testnet
self.assertTrue(self._allow_sys_and_net(self.context))
self.context.user = self.testsys
self.assertTrue(self._allow_sys_and_net(self.context))
def test_005_allow_sys_no_pm(self):
self.context.user = self.testadmin
self.assertTrue(self._allow_sys_no_pm(self.context))
self.context.user = self.testpmsys
self.assertRaises(exception.NotAuthorized, self._allow_sys_no_pm, self.context)
self.context.user = self.testnet
self.assertRaises(exception.NotAuthorized, self._allow_sys_no_pm, self.context)
self.context.user = self.testsys
self.assertTrue(self._allow_sys_no_pm(self.context))
@rbac.allow('all')
def _allow_all(self, context):
return True
@rbac.allow('none')
def _allow_none(self, context):
return True
@rbac.allow('projectmanager')
def _allow_project_manager(self, context):
return True
@rbac.allow('sysadmin', 'netadmin')
def _allow_sys_and_net(self, context):
return True
@rbac.allow('sysadmin')
@rbac.deny('projectmanager')
def _allow_sys_no_pm(self, context):
return True
if __name__ == "__main__":
# TODO: Implement use_fake as an option
unittest.main()
|
StarcoderdataPython
|
4851069
|
<filename>tests/test_integration.py
import sys
import unittest
import time
from flask import Flask
# remove mocks and import flask_mqtt
try:
sys.modules.pop('paho.mqtt.client')
sys.modules.pop('flask_mqtt')
except KeyError:
pass
from flask_mqtt import Mqtt, MQTT_ERR_SUCCESS
def wait(seconds=2):
time.sleep(seconds)
class FlaskMQTTTestCase(unittest.TestCase):
def setUp(self):
self.app = Flask(__name__)
def test_simple_connect(self):
self.mqtt = Mqtt(self.app)
self.mqtt._disconnect()
def test_connect_with_username(self):
self.app.config['MQTT_USERNAME'] = 'user'
self.app.config['MQTT_PASSWORD'] = '<PASSWORD>'
self.mqtt = Mqtt(self.app)
self.mqtt._disconnect()
def test_subscribe(self):
self.mqtt = Mqtt(self.app)
self.subscribe_handled = False
self.unsubscribe_handled = False
@self.mqtt.on_subscribe()
def handle_subscribe(client, userdata, mid_, granted_qos):
self.subscribe_handled = True
@self.mqtt.on_unsubscribe()
def handle_unsubscribe(client, userdata, mid_):
self.unsubscribe_handled = True
ret, mid = self.mqtt.subscribe('home/test')
self.assertEqual(ret, MQTT_ERR_SUCCESS)
wait()
ret, mid = self.mqtt.unsubscribe('home/test')
self.assertEqual(ret, MQTT_ERR_SUCCESS)
wait()
self.assertTrue(self.subscribe_handled)
self.assertTrue(self.unsubscribe_handled)
def test_qos(self):
self.mqtt = Mqtt(self.app)
# subscribe to a topic with qos = 1
self.mqtt.subscribe('test', 1)
self.assertEqual(1, len(self.mqtt.topics))
self.assertEqual(('test', 1), self.mqtt.topics['test'])
# subscribe to same topic, overwrite qos
self.mqtt.subscribe('test', 2)
self.assertEqual(1, len(self.mqtt.topics))
self.assertEqual(('test', 2), self.mqtt.topics['test'])
# unsubscribe
self.mqtt.unsubscribe('test')
self.assertEqual(0, len(self.mqtt.topics))
def test_topic_count(self):
self.mqtt = Mqtt(self.app)
ret, mid = self.mqtt.subscribe('test')
self.assertEqual(1, len(self.mqtt.topics))
ret, mid = self.mqtt.subscribe('test')
self.assertEqual(1, len(self.mqtt.topics))
self.mqtt.unsubscribe('test')
self.assertEqual(0, len(self.mqtt.topics))
self.mqtt.unsubscribe('test')
self.assertEqual(0, len(self.mqtt.topics))
ret, mid = self.mqtt.subscribe('test1')
ret, mid = self.mqtt.subscribe('test2')
self.assertEqual(2, len(self.mqtt.topics))
self.mqtt.unsubscribe_all()
self.assertEqual(0, len(self.mqtt.topics))
self.mqtt._disconnect()
def test_publish(self):
self.mqtt = Mqtt(self.app)
self.handled_message = False
self.handled_topic = False
self.handled_publish = False
@self.mqtt.on_message()
def handle_message(client, userdata, message):
self.handled_message = True
@self.mqtt.on_publish()
def handle_publish(client, userdata, mid):
self.handled_publish = True
self.mqtt.subscribe('home/test')
wait()
self.mqtt.publish('home/test', 'hello world')
wait()
self.assertTrue(self.handled_message)
# self.assertTrue(self.handled_topic)
self.assertTrue(self.handled_publish)
def test_on_topic(self):
self.mqtt = Mqtt(self.app)
self.handled_message = False
self.handled_topic = False
@self.mqtt.on_message()
def handle_message(client, userdata, message):
self.handled_message = True
@self.mqtt.on_topic('home/test')
def handle_on_topic(*args, **kwargs):
self.handled_topic = True
@self.mqtt.on_connect()
def handle_connect(*args, **kwargs):
self.mqtt.subscribe('home/test')
wait()
self.mqtt.publish('home/test', 'hello world')
wait()
self.assertFalse(self.handled_message)
self.assertTrue(self.handled_topic)
def test_logging(self):
self.mqtt = Mqtt(self.app)
@self.mqtt.on_log()
def handle_logging(client, userdata, level, buf):
self.assertIsNotNone(client)
self.assertIsNotNone(level)
self.assertIsNotNone(buf)
self.mqtt.publish('test', 'hello world')
def test_disconnect(self):
self.mqtt = Mqtt()
self.connected = False
@self.mqtt.on_connect()
def handle_connect(*args, **kwargs):
self.connected = True
@self.mqtt.on_disconnect()
def handle_disconnect(*args, **kwargs):
self.connected = False
self.assertFalse(self.connected)
self.mqtt.init_app(self.app)
wait()
self.assertTrue(self.connected)
self.mqtt._disconnect()
wait()
self.assertFalse(self.connected)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
4964789
|
<reponame>oleglpts/easy_daemon
from setuptools import setup
setup(
name='easy_daemon',
version='0.0.3',
packages=['easy_daemon'],
requires=[],
url='https://github.com/oleglpts/easy_daemon',
license='MIT',
platforms='any',
author='<NAME>',
author_email='<EMAIL>',
description='Easy daemon base class',
long_description='Very simple base daemon class. Just override method \'run\'. See examples',
classifiers=[
'Operating System :: POSIX',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5'
]
)
|
StarcoderdataPython
|
125194
|
<reponame>karthiksekaran/skip-tracking-selenium
import requests
import re
import json
import time
import logging
import pandas
from collections import OrderedDict
from bs4 import BeautifulSoup
def get_bs(session, url):
"""Makes a GET requests using the given Session object
and returns a BeautifulSoup object.
"""
r = None
while True:
r = session.get(url)
if r.ok:
break
return BeautifulSoup(r.text, 'lxml')
def make_login(session, base_url, credentials):
"""Returns a Session object logged in with credentials.
"""
login_form_url = '/login/device-based/regular/login/?refsrc=https%3A' \
'%2F%2Fmobile.facebook.com%2Flogin%2Fdevice-based%2Fedit-user%2F&lwv=100'
params = {'email': credentials['email'], 'pass': credentials['<PASSWORD>']}
while True:
time.sleep(3)
logged_request = session.post(base_url + login_form_url, data=params)
if logged_request.ok:
logging.info('[*] Logged in.')
break
def crawl_profile(session, base_url, profile_url, post_limit):
"""Goes to profile URL, crawls it and extracts posts URLs.
"""
profile_bs = get_bs(session, profile_url)
n_scraped_posts = 0
scraped_posts = list()
posts_id = None
while n_scraped_posts < post_limit:
try:
posts_id = 'recent'
posts = profile_bs.find('div', id=posts_id).div.div.contents
except Exception:
posts_id = 'structured_composer_async_container'
posts = profile_bs.find('div', id=posts_id).div.div.contents
posts_urls = [a['href'] for a in profile_bs.find_all('a', text='Full Story')]
for post_url in posts_urls:
# print(post_url)
try:
post_data = scrape_post(session, base_url, post_url)
scraped_posts.append(post_data)
except Exception as e:
logging.info('Error: {}'.format(e))
n_scraped_posts += 1
if posts_completed(scraped_posts, post_limit):
break
show_more_posts_url = None
if not posts_completed(scraped_posts, post_limit):
show_more_posts_url = profile_bs.find('div', id=posts_id).next_sibling.a['href']
profile_bs = get_bs(session, base_url + show_more_posts_url)
time.sleep(3)
else:
break
return scraped_posts
def posts_completed(scraped_posts, limit):
"""Returns true if the amount of posts scraped from
profile has reached its limit.
"""
if len(scraped_posts) == limit:
return True
else:
return False
def scrape_post(session, base_url, post_url):
"""Goes to post URL and extracts post data.
"""
post_data = OrderedDict()
post_bs = get_bs(session, base_url + post_url)
time.sleep(5)
# Here we populate the OrderedDict object
post_data['url'] = post_url
try:
post_text_element = post_bs.find('div', id='u_0_0').div
string_groups = [p.strings for p in post_text_element.find_all('p')]
strings = [repr(string) for group in string_groups for string in group]
post_data['text'] = strings
except Exception:
post_data['text'] = []
try:
post_data['media_url'] = post_bs.find('div', id='u_0_0').find('a')['href']
except Exception:
post_data['media_url'] = ''
try:
post_data['comments'] = extract_comments(session, base_url, post_bs, post_url)
except Exception:
post_data['comments'] = []
return dict(post_data)
def extract_comments(session, base_url, post_bs, post_url):
"""Extracts all coments from post
"""
comments = list()
show_more_url = post_bs.find('a', href=re.compile('/story\.php\?story'))['href']
first_comment_page = True
logging.info('Scraping comments from {}'.format(post_url))
while True:
logging.info('[!] Scraping comments.')
time.sleep(3)
if first_comment_page:
first_comment_page = False
else:
post_bs = get_bs(session, base_url + show_more_url)
time.sleep(3)
try:
comments_elements = post_bs.find('div', id=re.compile('composer')).next_sibling \
.find_all('div', id=re.compile('^\d+'))
except Exception:
pass
if len(comments_elements) != 0:
logging.info('[!] There are comments.')
else:
break
for comment in comments_elements:
comment_data = OrderedDict()
comment_data['text'] = list()
try:
comment_strings = comment.find('h3').next_sibling.strings
for string in comment_strings:
comment_data['text'].append(string)
except Exception:
pass
try:
media = comment.find('h3').next_sibling.next_sibling.children
if media is not None:
for element in media:
comment_data['media_url'] = element['src']
else:
comment_data['media_url'] = ''
except Exception:
pass
comment_data['profile_name'] = comment.find('h3').a.string
comment_data['profile_url'] = comment.find('h3').a['href'].split('?')[0]
comments.append(dict(comment_data))
show_more_url = post_bs.find('a', href=re.compile('/story\.php\?story'))
if 'View more' in show_more_url.text:
logging.info('[!] More comments.')
show_more_url = show_more_url['href']
else:
break
return comments
def json_to_obj(filename):
"""Extracts dta from JSON file and saves it on Python object
"""
obj = None
with open(filename) as json_file:
obj = json.loads(json_file.read())
return obj
def save_data(data):
"""Converts data to JSON.
"""
with open('profile_posts_data.json', 'w') as json_file:
json.dump(data, json_file, indent=4)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
base_url = 'https://mobile.facebook.com'
session = requests.session()
# Extracts credentials for the login and all of the profiles URL to scrape
credentials = json_to_obj('credentials.json')
profiles_urls = json_to_obj('profiles_urls.json')
make_login(session, base_url, credentials)
posts_data = None
for profile_url in profiles_urls:
posts_data = crawl_profile(session, base_url, profile_url, 25)
logging.info('[!] Scraping finished. Total: {}'.format(len(posts_data)))
logging.info('[!] Saving.')
save_data(posts_data)
|
StarcoderdataPython
|
1827187
|
<gh_stars>1-10
from torch.utils.data import Dataset
import torch
class SentDataset(Dataset):
def __init__(self, data, label, tokenizer, context_length) -> None:
self.tokenizer = tokenizer
self.data = data
self.label = label
self.inputs = []
for i, example in enumerate(self.data):
tok_result = self.tokenizer(
example[0], # A sequence
example[1], # B sequence
max_length=context_length,
padding="max_length",
truncation=True,
return_tensors="pt"
)
tok_result['input_ids'] = torch.squeeze(tok_result['input_ids'])
tok_result['attention_mask'] = torch.squeeze(tok_result['attention_mask'])
tok_result['labels'] = self.label[i]
self.inputs.append(tok_result)
def __len__(self) -> int:
return len(self.inputs)
def __getitem__(self, index: int):
return self.inputs[index]
|
StarcoderdataPython
|
1732211
|
<reponame>uncommoncode/robopen
import numpy as np
# TODO(emmett):
# * Dynamic programming TSP
# * Explore algorithm that picks between reversed line order or forward line order
# * Remove pen tap (down/up/down) or (up/down/up)
class PenPath:
def __init__(self, start_pt, end_pt):
self.start_pt = start_pt
self.end_pt = end_pt
class PointSearch:
def __init__(self, draw_paths):
self.draw_paths = draw_paths
self.start_points = np.array([path.start_pt for path in self.draw_paths])
self.end_points = np.array([path.end_pt for path in self.draw_paths])
self.se_cost_matrix = np.zeros((len(draw_paths), len(draw_paths)))
self.ss_cost_matrix = np.zeros_like(self.se_cost_matrix)
self.es_cost_matrix = np.zeros_like(self.se_cost_matrix)
self.ee_cost_matrix = np.zeros_like(self.se_cost_matrix)
def build_cost_matrix(self):
max_value = 1e9
# Start-End
for i in range(len(self.draw_paths)):
source_pt = self.draw_paths[i].start_pt
self.se_cost_matrix[i, :] = self.compute_distance(self.end_points, source_pt)
self.se_cost_matrix[i, i] = max_value
# Start-Start
for i in range(len(self.draw_paths)):
source_pt = self.draw_paths[i].start_pt
self.ss_cost_matrix[i, :] = self.compute_distance(self.start_points, source_pt)
self.ss_cost_matrix[i, i] = max_value
# End-Start
for i in range(len(self.draw_paths)):
source_pt = self.draw_paths[i].end_pt
self.es_cost_matrix[i, :] = self.compute_distance(self.start_points, source_pt)
self.es_cost_matrix[i, i] = max_value
# End-End
for i in range(len(self.draw_paths)):
source_pt = self.draw_paths[i].end_pt
self.ee_cost_matrix[i, :] = self.compute_distance(self.end_points, source_pt)
self.ee_cost_matrix[i, i] = max_value
@staticmethod
def compute_distance(points, point):
d = np.sqrt(np.sum((points - point)**2))
return d
def find_order(self):
# Start to start (reverse first)
# Start to end (reverse first and second)
# end to start (normal)
# end to end (reverse second)
visited = np.zeros(len(self.draw_paths), dtype=np.int)
max_value = 1e9
search_matrix = np.array([
self.se_cost_matrix,
self.ss_cost_matrix,
self.es_cost_matrix,
self.ee_cost_matrix,
])
current_node = 0
order = []
reverse = []
total_cost = 0
current_reverse = False
while (visited == 0).any():
order.append(current_node)
reverse.append(current_reverse)
visited[current_node] = 1
if not (visited == 0).any():
break
prior_node = current_node
prior_reverse = current_reverse
ordering_index = None
while True:
# if reverse path beats normal path, then go the reverse route
# return reversal code with order
search_offset = 0
if not prior_reverse:
search_offset = 2
search_indices = []
search_values = []
# NOTE(emmett): hardcoded from ordering of searches
search_reverse = [False, True]
for i in range(2):
index = search_matrix[prior_node, :, search_offset + i].argmin()
search_indices.append(())
search_values.append((
index,
search_offset + i,
search_reverse[i],
search_matrix[prior_node, index, search_offset + i],
))
# Search from lowest to highest cost. If something found abort this loop. Otherwise keep going.
# NOTE(emmett): this is not optimal, because it could be the next item *not* reversed could be closer
# than the current closest reversed item.
# Ugh, maybe should just go ahead and make this a DP
found_visited = False
for (index, offset, reverse, value) in sorted(search_values, key=lambda t: t[3]):
current_node = index
ordering_index = offset
current_reverse = reverse
if not visited[current_node]:
found_visited = True
break
if found_visited:
break
# No one else can go to current node
total_cost += search_matrix[prior_node, current_node, ordering_index]
search_matrix[:, current_node, :] = max_value
return order, reverse
def greedy_tsp(draw_paths):
# TODO(emmett): refactor to allow reversal
# TODO(emmett): coarsen to allow ~O(n^3) floyd-whatever shortest path traversal
cost_matrix = np.zeros((len(draw_paths), len(draw_paths)))
# O(n^2)
for i in range(len(draw_paths)):
xN, yN = draw_paths[i].end_pt
for j in range(len(draw_paths)):
if i == j:
continue
draw_path = draw_paths[j]
x0, y0 = draw_path.start_pt
dx = x0 - xN
dy = y0 - yN
distance = np.sqrt(dx * dx + dy * dy)
cost_matrix[i, j] = distance
min_i = 0
min_distance = cost_matrix.max()
for i in range(len(draw_paths)):
x0, y0 = draw_paths[i].start_pt
distance_to_origin = np.sqrt(x0 ** 2 + y0 ** 2)
if distance_to_origin <= min_distance:
min_i = i
min_distance = distance_to_origin
visited = np.zeros(cost_matrix.shape[0], dtype=np.int) # pylint: disable=E1136
max_value = 1e9
search_matrix = cost_matrix + np.eye(cost_matrix.shape[0]) * max_value # pylint: disable=E1136
current_node = min_i
order = []
total_cost = 0
while (visited == 0).any():
order.append(current_node)
visited[current_node] = 1
if not (visited == 0).any():
break
prior_node = current_node
while True:
# TODO(emmett): have reversal_search_matrix
# if reverse path beats normal path, then go the reverse route
# return reversal code with order
current_node = search_matrix[prior_node, :].argmin()
if visited[current_node] == 0:
break
search_matrix[prior_node, current_node] = max_value
# No one else can go to current node
total_cost += search_matrix[prior_node, current_node]
search_matrix[:, current_node] = max_value
return order
def remove_repeated_ops(gcode_ops):
output_ops = []
last_op = None
for op in gcode_ops:
if op == last_op:
continue
last_op = op
output_ops.append(op)
return output_ops
|
StarcoderdataPython
|
6527489
|
<gh_stars>1-10
import copy
from operator import itemgetter
import numpy as np
import torch
def batch_generator(dataset, batch_size, shuffle=True, mask=False):
"""
Generates a batch iterator for a dataset.
"""
data = dataset['data']
data_original = dataset['original']
data_size = len(data)
num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1
if shuffle:
perm = np.random.permutation(data_size)
data = itemgetter(*perm)(data)
data_original = itemgetter(*perm)(data_original)
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
batched_data = copy.deepcopy(list(data[start_index:end_index]))
batched_original = copy.deepcopy(list(data_original[start_index:end_index]))
max_len_batch = len(max(batched_data, key=len))
for j in range(len(batched_data)):
batched_data[j].extend([0] * (max_len_batch - len(batched_data[j])))
yield torch.from_numpy(np.array(batched_data)).long(), batched_original
def batch_test_generator(dataset, batch_size):
"""
Generates a batch iterator for a dataset.
"""
data = dataset['data']
data_size = len(data)
num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
batched_data = copy.deepcopy(list(data[start_index:end_index]))
max_len_batch = len(max(batched_data, key=len))
for j in range(len(batched_data)):
batched_data[j].extend([0] * (max_len_batch - len(batched_data[j])))
yield torch.from_numpy(np.array(batched_data)).long()
def load_aspects(aspect_seeds, word2id, w_emb_array):
aspects_ids = []
seed_weights = []
with open(aspect_seeds, 'r') as fseed:
for line in fseed:
seeds = []
weights = []
for tok in line.split():
word, weight = tok.split(':')
if word in word2id:
seeds.append(word2id[word])
weights.append(float(weight))
else:
seeds.append(0)
weights.append(0.0)
aspects_ids.append(seeds)
seed_weights.append(weights)
seed_w = np.array(seed_weights)
seed_w = seed_w / np.linalg.norm(seed_w, ord=1, axis=1, keepdims=True) # 9 * 30
seed_w = np.expand_dims(seed_w, axis=2)
clouds = []
for seeds in aspects_ids:
clouds.append(w_emb_array[seeds])
a_emb = np.array(clouds) # 9,30,200
a_emb = (a_emb * seed_w).sum(axis=1).astype(np.float32) # 9, 200
return a_emb
|
StarcoderdataPython
|
1840347
|
<gh_stars>1-10
"""AyudaEnPython: https://www.facebook.com/groups/ayudapython
"""
from time import sleep
# pip install prototools
from prototools import Menu, ProtoDB, textbox, progressbar
from prototools.colorize import *
data = ProtoDB("songs")
ALBUM = data.get_data()
def play(song):
print()
textbox(
yellow(f"{song['title']} - {song['artist']}"),
light=False, bcolor="magenta", width=56, ml=1,
)
for _ in progressbar(
range(int(song['duration'])*10),
width=51, units=False, per=False,
spinvar_color=red, fg=green, bg=cyan,
):
sleep(0.1)
def main():
menu = Menu(
green("Jukebox"),
yellow("Simple CLI Jukebox"),
yellow("Playlist"),
yellow("Selecciona una opción"),
exit_option_text=magenta("Finalizar"),
exit_option_color=magenta,
arrow_keys=True,
)
for song in ALBUM:
menu.add_option(
"{} {}".format(
green(f"({song['duration']:.2f})"),
cyan(f"{song['title']} {song['artist']}")),
play, [song],
)
menu.settings(
dimension=(60, 20),
style="double",
color=magenta,
options_color=yellow,
separators=True,
paddings=(1, 1, 0, 0),
items_paddings=(1, 1, 1, 1),
subtitle_align="center",
)
menu.run()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
244236
|
<filename>metrics.py
"""
In this module we store functions to measuer the performance of our model.
"""
import numpy as np
from numpy.core.numeric import argwhere
from sklearn.metrics import mean_absolute_error, make_scorer, f1_score, precision_score
def get_metric_name_mapping():
return {_mae(): mean_absolute_error, _cm(): custom_error, _f1_score(): f1_score , _precision_score(): precision_score}
def custom_error(
y_true, y_pred, *, overflow_cost: float = 0.7, underflow_cost: float = 0.3, aggregate : bool = True
):
"""A custom metric that is related to the business, the lower the better."""
diff = y_true - y_pred # negative if predicted value is greater than true value
sample_weight = np.ones_like(diff)
mask_underflow = diff > 0
sample_weight[mask_underflow] = underflow_cost
mask_overflow = diff <= 0
sample_weight[mask_overflow] = overflow_cost
if aggregate:
return mean_absolute_error(y_true, y_pred, sample_weight=sample_weight)
return np.abs(diff * sample_weight)
def get_metric_function(name: str, **params):
mapping = get_metric_name_mapping()
def fn(y, y_pred):
return mapping[name](y, y_pred, **params)
return fn
def get_scoring_function(name: str, **params):
mapping = {
_mae(): make_scorer(mean_absolute_error, greater_is_better=False, **params),
_f1_score(): make_scorer(f1_score, greater_is_better=True, **params),
_precision_score(): make_scorer(precision_score, greater_is_better=True, **params)
}
return mapping[name]
def _mae():
return "mean absolute error"
def _cm():
return "custom prediction error"
def _f1_score():
return "f1 score"
def _precision_score():
return "precision score"
|
StarcoderdataPython
|
3314175
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@Author: <NAME>
@Time:05/26/2021 15:39 PM
@Email: <EMAIL>
"""
import os
import math
import numpy as np
import random
from config.config import Config
class Reader(object):
def __init__(self):
self.config = Config()
self.root = self.config.root
self.training_data_path = self.config.training_data_path
self.validation_data_path = self.config.validation_data_path
self.test_data_path = self.config.test_data_path
self.batch_size = self.config.batch_size
self.radius = 5
self.data_size_list = {'Suqian':[201, 76, 2], 'Changning':[151, 60, 2],
'Weifang':[151, 95, 2]}
self.data_range_V = {'Suqian':[1, 5, 0.02], 'Changning':[1, 4, 0.02],
'Weifang':[1, 4, 0.02]}
self.data_range_T = {'Suqian':[0.5, 8, 0.1], 'Changning':[0.1, 6, 0.1],
'Weifang':[0.6, 10, 0.1]}
self.uniform_data_size = [201, 76]
def get_all_filename(self, file_path):
filename_list = []
for file in os.listdir(file_path):
filename = os.path.splitext(file)[0]
filename_list.append(filename)
#filename_list = np.array(filename_list)
#filename_list.sort()
return filename_list
def get_batch_data(self, data_area, start_point, seed, file_list):
np.random.seed(seed)
np.random.shuffle(file_list)
train_batch_file = file_list[start_point:start_point + self.batch_size]
# print('Train File: \n')
# print(train_batch_file)
batch_data = []
batch_label = []
data_size = self.data_size_list[data_area]
VRange = self.data_range_V[data_area]
TRange = self.data_range_T[data_area]
row = data_size[0]
col = data_size[1]
for each_train_file in train_batch_file:
each_data1 = np.loadtxt(self.training_data_path + '/' + data_area +
'/group_image/' + each_train_file + '.dat')
each_label1 = np.loadtxt(self.training_data_path + '/' + data_area +
'/group_velocity/' + each_train_file + '.dat')
each_data1 = each_data1[:, :col]
each_label1 = each_label1[:col, 1]
if len(each_label1) < col:
zero = np.zeros(col - len(each_label1))
each_label1 = np.concatenate((each_label1, zero), axis=0)
else:
each_label1 = each_label1[:col]
len_each_data1 = len(each_data1)
if row > len_each_data1:
zero = np.zeros((row - len_each_data1, col))
each_data1 = np.concatenate((zero, each_data1), axis=0)
else:
each_data1 = each_data1[len_each_data1-row:]
num = 0
matrix1 = np.zeros((row, col))
r = self.radius
for i in each_label1:
if i != 0:
y_index = int((i - VRange[0])/VRange[2])
for j in range(len(matrix1)):
matrix1[j, num] = np.exp(-((y_index-j)**2)/(2*r**2))
num += 1
each_data2 = np.loadtxt(self.training_data_path + '/' + data_area +
'/phase_image/' + each_train_file + '.dat')
each_label2 = np.loadtxt(self.training_data_path + '/' + data_area +
'/phase_velocity/' + each_train_file + '.dat')
each_data2 = each_data2[:, :col]
each_label2 = each_label2[:col, 1]
if len(each_label2) < col:
zero = np.zeros(col - len(each_label2))
each_label2 = np.concatenate((each_label2, zero), axis=0)
else:
each_label2 = each_label2[:col]
len_each_data2 = len(each_data2)
if row > len_each_data2:
zero = np.zeros((row - len_each_data2, col))
each_data2 = np.concatenate((zero, each_data2), axis=0)
else:
each_data2 = each_data2[len_each_data2-row:]
num = 0
matrix2 = np.zeros((row, col))
r = self.radius
for i in each_label2:
if i != 0:
y_index = int((i - VRange[0])/VRange[2])
for j in range(len(matrix2)):
matrix2[j, num] = np.exp(-((y_index-j)**2)/(2*r**2))
num += 1
each_data = []
each_data.append(each_data1)
each_data.append(each_data2)
each_label = []
each_label.append(matrix1)
each_label.append(matrix2)
each_data = np.array(each_data)
each_label = np.array(each_label)
each_data = each_data[:, :self.uniform_data_size[0], :self.uniform_data_size[1]]
each_label = each_label[:, :self.uniform_data_size[0], :self.uniform_data_size[1]]
if data_area == 'Weifang' or data_area == 'Changning':
uniformData = np.zeros((2, self.uniform_data_size[0], self.uniform_data_size[1]))
uniformLabel = np.zeros((2, self.uniform_data_size[0], self.uniform_data_size[1]))
uniformData[:, :each_data.shape[1], :each_data.shape[2]] = each_data
uniformLabel[:, :each_label.shape[1], :each_label.shape[2]] = each_label
each_data = uniformData
each_label = uniformLabel
else:
pass
batch_data.append(each_data)
batch_label.append(each_label)
batch_data = np.array(batch_data)
batch_data = batch_data.transpose((0, 2, 3, 1))
batch_label = np.array(batch_label)
batch_label = batch_label.transpose((0, 2, 3, 1))
return batch_data, batch_label
def get_validation_data(self, data_area, file_list):
# random.seed(0)
file_list = random.sample(file_list, self.config.batch_size)
validation_data = []
validation_label = []
data_size = self.data_size_list[data_area]
VRange = self.data_range_V[data_area]
TRange = self.data_range_T[data_area]
row = data_size[0]
col = data_size[1]
for each_valid_file in file_list:
each_data1 = np.loadtxt(self.validation_data_path + '/' + data_area +
'/group_image/' + each_valid_file + '.dat')
each_label1 = np.loadtxt(self.validation_data_path + '/' + data_area +
'/group_velocity/' + each_valid_file + '.dat')
each_data1 = each_data1[:, :col]
each_label1 = each_label1[:col, 1]
if len(each_label1) < col:
zero = np.zeros(col - len(each_label1))
each_label1 = np.concatenate((each_label1, zero), axis=0)
else:
each_label1 = each_label1[:col]
len_each_data1 = len(each_data1)
if row > len_each_data1:
zero = np.zeros((row - len_each_data1, col))
each_data1 = np.concatenate((zero, each_data1), axis=0)
else:
each_data1 = each_data1[len_each_data1-row:]
num = 0
matrix1 = np.zeros((row,col))
r = self.radius
for i in each_label1:
if i != 0:
y_index = int((i - VRange[0])/VRange[2])
for j in range(len(matrix1)):
matrix1[j, num] = np.exp(-((y_index-j)**2)/(2*r**2))
num = num + 1
each_data2 = np.loadtxt(self.validation_data_path + '/' + data_area +
'/phase_image/' + each_valid_file + '.dat')
each_label2 = np.loadtxt(self.validation_data_path + '/' + data_area +
'/phase_velocity/' + each_valid_file + '.dat')
each_data2 = each_data2[:, :col]
each_label2 = each_label2[:col, 1]
if len(each_label2) < col:
zero = np.zeros(col - len(each_label2))
each_label2 = np.concatenate((each_label2, zero), axis=0)
else:
each_label2 = each_label2[:col]
len_each_data2 = len(each_data2)
if row > len_each_data2:
zero = np.zeros((row - len_each_data2, col))
each_data2 = np.concatenate((zero, each_data2), axis=0)
else:
each_data2 = each_data2[len_each_data2-row:]
num = 0
matrix2 = np.zeros((row,col))
r = self.radius
for i in each_label2:
if i != 0:
y_index = int((i - VRange[0])/VRange[2])
for j in range(len(matrix2)):
matrix2[j, num] = np.exp(-((y_index-j)**2)/(2*r**2))
num += 1
each_data = []
each_data.append(each_data1)
each_data.append(each_data2)
each_label = []
each_label.append(matrix1)
each_label.append(matrix2)
each_data = np.array(each_data)
each_label = np.array(each_label)
each_data = each_data[:, :self.uniform_data_size[0], :self.uniform_data_size[1]]
each_label = each_label[:, :self.uniform_data_size[0], :self.uniform_data_size[1]]
if data_area == 'Weifang' or data_area == 'Changning':
uniformData = np.zeros((2, self.uniform_data_size[0], self.uniform_data_size[1]))
uniformLabel = np.zeros((2, self.uniform_data_size[0], self.uniform_data_size[1]))
uniformData[:, :each_data.shape[1], :each_data.shape[2]] = each_data
uniformLabel[:, :each_label.shape[1], :each_label.shape[2]] = each_label
each_data = uniformData
each_label = uniformLabel
else:
pass
validation_data.append(each_data)
validation_label.append(each_label)
validation_data = np.array(validation_data)
validation_data = validation_data.transpose((0,2,3,1))
validation_label = np.array(validation_label)
validation_label = validation_label.transpose((0,2,3,1))
return validation_data, validation_label, file_list
def get_test_file(self):
filename_list = []
for file in os.listdir(self.config.test_data_path + '/' + 'group_image'):
filename = os.path.splitext(file)[0]
filename_list.append(filename)
filename_list = np.array(filename_list)
filename_list.sort()
return filename_list
def get_disp_matrix(self, file_path, size):
''' Read a dispersion matrix(image).
Attributes:
file_path: File path.
size ([int, int]): Expected matrix size. Zero padding or cutting
if the matrix in the file is not consist with you want.
Raises:
Exception: Wrong input size.
Returns:
A numpy array with the size of 'size'.
'''
input_matrix = np.loadtxt(file_path)
input_matrix = input_matrix[:size[0], :size[1]]
input_size = input_matrix.shape
if input_size[0] <= size[0]:
st = size[0] - input_size[0]
matrix = np.zeros(size)
if input_size[1] >= size[1]:
matrix[int(st):, :] = input_matrix[:, :size[1]]
else:
matrix[int(st):, :input_size[1]] = input_matrix
else:
raise Exception('Wrong input size!')
return matrix
def get_label_matrix(self, file_path, size):
''' Read a dispersion curve and generate a label prob matrix.
Attributes:
file_path: File path.
size ([int, int]): Expected matrix size. ssss
Returns:
A numpy array with the size of 'size'.
'''
try:
disp_curve = np.loadtxt(file_path)
disp_curve = disp_curve[:size[1], 1]
except:
disp_curve = np.zeros(size[1])
matrix = np.zeros(size)
for i in range(len(disp_curve)):
vel = disp_curve[i]
if vel != 0:
y_index = int((vel - self.config.range_V[0])/self.config.dV)
for j in range(size[0]):
matrix[j, i] = np.exp(-((y_index - j)**2)/(2*self.radius**2))
return matrix
|
StarcoderdataPython
|
4884677
|
<reponame>earth-emoji/infotechia
from django.shortcuts import render, redirect
from .forms import ThreadForm
from .models import Topic, Thread
# Create your views here.
def topic_list(request):
template_name = "topics/list.html"
topics = Topic.objects.all()
search_term = ''
if 'q' in request.GET:
search_term = request.GET['q']
topics = topics.filter(name__icontains=search_term)
context = {
'topics': topics,
'search_term': search_term,
}
return render(request, template_name, context)
def thread_list(request, slug):
topic = Topic.objects.get(slug=slug)
template_name = "threads/list.html"
threads = Thread.objects.filter(topic=topic)
search_term = ''
if 'q' in request.GET:
search_term = request.GET['q']
threads = threads.filter(body__icontains=search_term)
context = {
'topic': topic,
'threads': threads,
'search_term': search_term,
}
return render(request, template_name, context)
def thread_create(request, slug):
template_name = 'threads/thread_form.html'
topic = Topic.objects.get(slug=slug)
if request.method == 'POST':
form = ThreadForm(request.POST or None)
if form.is_valid():
c = form.save(commit=False)
c.topic = topic
c.creator = request.user
c.save()
return redirect("topics:threads", topic.slug)
else:
form = ThreadForm()
return render(request, template_name, {'form': form})
|
StarcoderdataPython
|
5086445
|
<reponame>mghendi/Portfolio<gh_stars>0
from django.http import HttpResponse
from django.shortcuts import render
def home_page(request):
home_title = "Hi !"
context = {"title": home_title}
#doc = "<h1>{title}</h1>".format(title=title)
#django_rendered_doc = "<h1>{{title}}</h1>".format(title=title)
return render(request, "home.html", context)
def work(request):
context = {"title": "Things I've Learnt"}
return render(request, "work.html", context)
def blog(request):
context = {"title": "Tips, Tricks & Tutorials"}
return render(request, "blog.html", context)
def resume(request):
context = {"title": "Resume"}
return render(request, "resume.html", context)
|
StarcoderdataPython
|
4937081
|
import logging
import os
import time
from concurrent import futures
from multiprocessing import cpu_count
from google.cloud import storage
from tools.local_utils import get_settings
logging.basicConfig(level=logging.INFO)
log = logging.getLogger()
def get_elapsed_seconds(start_time):
return round(time.time() - start_time, 2)
def get_upload_variables():
settings = get_settings()
bucket_name = settings['blog_config']['google_cloud_bucket_name']
source_root = settings['config']['wp_uploads_path']
blob_name_prefix = settings['blog_config']['blob_name_prefix']
max_files_to_upload = int(settings['config']['max_files_to_upload'])
log.info(f'Copying {max_files_to_upload} files from: {source_root} to blob name prefixed with `{blob_name_prefix}`')
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
return blob_name_prefix, bucket, max_files_to_upload, source_root
def upload_file_to_bucket(source_root, subdir, file, bucket, blob_name_prefix, log_upload_file_message_prefix=''):
# TODO: test with Windows paths (C:, D: etc.)
partial_blob_name = f'{subdir}/{file}'.replace(source_root, '') # 2020/01/foo-bar.jpg
blob_name = f'{blob_name_prefix}{partial_blob_name}' # media/2020/01/foo-bar.jpg
log.info(f'{log_upload_file_message_prefix}Uploading file {partial_blob_name}')
file_path = os.path.join(subdir, file)
blob = bucket.blob(blob_name)
blob.upload_from_filename(file_path)
return blob.public_url, True
def upload_files_to_bucket():
blob_name_prefix, bucket, max_files_to_upload, source_root = get_upload_variables()
start_time = time.time()
files_count = 0
for subdir, dirs, files in os.walk(os.path.join(source_root)):
log.info(f'Processing directory: {subdir}')
for file in files:
if files_count == max_files_to_upload:
elapsed_time = get_elapsed_seconds(start_time)
log.info(f'Uploaded {max_files_to_upload} files in {elapsed_time} seconds')
return
files_count += 1
blob_url = upload_file_to_bucket(source_root, subdir, file, bucket, blob_name_prefix, f'{files_count}: ')
log.info(f'File uploaded: {blob_url}')
elapsed_time = get_elapsed_seconds(start_time)
log.info(f'Uploaded a total of {files_count} files in {elapsed_time} seconds')
def upload_files_to_bucket_in_parallel():
# Inspired by https://github.com/googleapis/python-storage/issues/36
blob_name_prefix, bucket, max_files_to_upload, source_root = get_upload_variables()
pool = futures.ThreadPoolExecutor(max_workers=cpu_count())
uploads = []
start_time = time.time()
files_count = 0
for subdir, dirs, files in os.walk(os.path.join(source_root)):
log.info(f'Processing directory: {subdir}')
for file in files:
files_count += 1
if files_count > max_files_to_upload:
break
upload = pool.submit(upload_file_to_bucket, source_root, subdir, file, bucket, blob_name_prefix)
uploads.append(upload)
future_count = 0
successes = []
for f in futures.as_completed(uploads):
future_count += 1
blob_url, success = f.result()
successes.append(success)
if success:
log.info(f'File uploaded: {blob_url}')
else:
log.error(f'Error uploading: {blob_url}')
if files_count != future_count:
log.error('Some upload jobs did not complete!')
if not all(successes):
log.error('Some upload jobs completed but were not successful!')
elapsed_time = get_elapsed_seconds(start_time)
log.info(f'Uploaded a total of {files_count} files in {elapsed_time} seconds')
if __name__ == '__main__':
# upload_files_to_bucket()
upload_files_to_bucket_in_parallel()
|
StarcoderdataPython
|
3232894
|
<filename>api/app/routers/users.py
from fastapi import APIRouter, Depends, Path, Response
from fastapi_pagination import Page
from .. import deps
from ..schemas import Note, User, UserIn, UserInDb
from ..services import AuthService, NoteService, UserService
router = APIRouter(prefix="/users", tags=["users"])
@router.get(
"", dependencies=[Depends(deps.get_current_superuser)], response_model=Page[User]
)
async def get_users(user_service: UserService = Depends(deps.get_user_service)):
return await user_service.get_users()
@router.post(
"",
status_code=201,
response_model=User,
responses={
400: {"description": "User Already Registered"},
},
)
async def create_user(
user_in: UserIn, user_service: UserService = Depends(deps.get_user_service)
):
return await user_service.create(user_in)
@router.get(
"/me",
response_model=User,
responses={
404: {"description": "User not found"},
},
)
async def get_current_user(current_user: UserInDb = Depends(deps.get_current_user)):
return current_user
@router.put(
"/me",
status_code=204,
responses={
400: {"description": "User Already Registered"},
404: {"description": "User Not Found"},
},
)
async def update_current_user(
*,
current_user: UserInDb = Depends(deps.get_current_user),
user_in: UserIn,
user_service: UserService = Depends(deps.get_user_service),
):
await user_service.create_or_update(current_user.id, user_in)
@router.delete(
"/me",
status_code=204,
responses={
404: {"description": "User Not Found"},
},
)
async def remove_current_user(
current_user: UserInDb = Depends(deps.get_current_user),
user_service: UserService = Depends(deps.get_user_service),
):
await user_service.remove(current_user.id)
@router.get(
"/me/notes",
response_model=Page[Note],
responses={
404: {"description": "User Not Found"},
},
)
async def get_current_user_notes(
current_user: UserInDb = Depends(deps.get_current_user),
note_service: NoteService = Depends(deps.get_note_service),
):
return await note_service.get_user_notes(current_user.id)
@router.get(
"/{user_id}",
response_model=User,
responses={
404: {"description": "User Not Found"},
},
)
async def get_user(
current_user: UserInDb = Depends(deps.get_current_user),
user_id: int = Path(..., ge=1),
user_service: UserService = Depends(deps.get_user_service),
):
AuthService.owner_or_superuser_required(current_user, user_id)
return await user_service.get_user(user_id)
@router.put(
"/{user_id}",
status_code=204,
responses={
201: {},
404: {"description": "User Not Found"},
},
)
async def update_user(
*,
response: Response,
current_user: UserInDb = Depends(deps.get_current_user),
user_id: int = Path(..., ge=1),
user_in: UserIn,
user_service: UserService = Depends(deps.get_user_service),
):
AuthService.owner_or_superuser_required(current_user, user_id)
user = await user_service.create_or_update(user_id, user_in)
if user:
response.status_code = 201
return user
@router.delete(
"/{user_id}",
status_code=204,
responses={
404: {"description": "User Not Found"},
},
)
async def remove_user(
current_user: UserInDb = Depends(deps.get_current_user),
user_id: int = Path(..., ge=1),
user_service: UserService = Depends(deps.get_user_service),
):
AuthService.owner_or_superuser_required(current_user, user_id)
await user_service.remove(user_id)
@router.get(
"/{user_id}/notes",
response_model=Page[Note],
responses={
404: {"description": "User Not Found"},
},
)
async def get_user_notes(
current_user: UserInDb = Depends(deps.get_current_user),
user_id: int = Path(..., ge=1),
note_service: NoteService = Depends(deps.get_note_service),
):
AuthService.owner_or_superuser_required(current_user, user_id)
return await note_service.get_user_notes(user_id)
|
StarcoderdataPython
|
9704035
|
<gh_stars>0
import logging
from pulsar.apps.http import HttpClient
from ..utils import get_auth
from .repo import GitRepo
class GithubApi:
def __init__(self, auth=None, http=None):
if not http:
http = HttpClient(headers=[('Content-Type', 'application/json')])
self.auth = auth or get_auth()
self.http = http
self.logger = logging.getLogger('agile.github')
@property
def api_url(self):
return 'https://api.github.com'
def repo(self, repo_path):
return GitRepo(self, repo_path)
|
StarcoderdataPython
|
9625350
|
<reponame>HendrikPN/scigym
VERSION = '0.0.3'
|
StarcoderdataPython
|
224088
|
<gh_stars>1-10
import gzip
import itertools
import os
import shutil
import tempfile
import zipfile
from io import BytesIO
from struct import unpack_from
from subprocess import run
import djclick as click
import requests
from django.core.files.base import ContentFile
from PIL import Image, ImageDraw, ImageFilter
from boundlexx.api.tasks import purge_static_cache
from boundlexx.boundless.models import (
Beacon,
BeaconPlotColumn,
BeaconScan,
Color,
World,
)
from boundlexx.boundless.utils import SPHERE_GAP, crop_world, html_name
from boundlexx.utils import make_thumbnail
BASE_DIR = "/tmp/maps"
GLOW_SOLID = 5
GLOW_WIDTH = 20
BLEND_START = 1
BLEND_END = 0
TRANS_START = 255
TRANS_END = 128
BLUR = 10
def _draw_world_image( # pylint: disable=too-many-locals
atlas_image_file, world_id, atmo_color
):
sphere_image_file = os.path.join(BASE_DIR, f"{world_id}_sphere.png")
run(
["/usr/local/bin/convert-atlas", atlas_image_file, sphere_image_file],
check=True,
capture_output=True,
)
img = crop_world(Image.open(sphere_image_file))
size, _ = img.size
trans_diff = TRANS_START - TRANS_END
blur_diff = BLEND_START - BLEND_END
for offset in range(GLOW_WIDTH):
offset = max(0, offset - GLOW_SOLID)
trans = TRANS_START - int(offset / GLOW_WIDTH * trans_diff)
blend = BLEND_START - offset / GLOW_WIDTH * blur_diff
ellipse_coors = (
SPHERE_GAP + offset,
SPHERE_GAP + offset,
size - SPHERE_GAP - offset,
size - SPHERE_GAP - offset,
)
# add atmo color
atmo = img.copy()
drawa = ImageDraw.Draw(atmo)
drawa.ellipse(
ellipse_coors,
outline=(*atmo_color, trans),
width=2,
)
img = Image.blend(img, atmo, blend)
outer_width = 2
outer_ellipse = (
SPHERE_GAP - outer_width,
SPHERE_GAP - outer_width,
size - SPHERE_GAP + outer_width,
size - SPHERE_GAP + outer_width,
)
drawa = ImageDraw.Draw(img)
drawa.ellipse(
outer_ellipse,
outline=(0, 0, 0, 255),
width=outer_width,
)
mask = Image.new("L", img.size, 0)
draw = ImageDraw.Draw(mask)
draw.ellipse(
outer_ellipse,
outline=255,
width=outer_width * 2,
)
blurred = img.filter(ImageFilter.GaussianBlur(BLUR))
img.paste(blurred, mask=mask)
with BytesIO() as output:
img.save(output, format="PNG")
content = output.getvalue()
image = ContentFile(content)
image.name = f"{world_id}.png"
return image
def _process_image(world, root, name):
atlas_image_file = os.path.join(root, name)
with open(atlas_image_file, "rb") as image_file:
atlas_image = ContentFile(image_file.read())
atlas_image.name = f"{world.id}.png"
image = _draw_world_image(atlas_image_file, world.id, world.atmosphere_color_tuple)
if world.atlas_image is not None and world.atlas_image.name:
world.atlas_image.delete()
if world.image is not None and world.image.name:
world.image.delete()
if world.image_small is not None and world.image_small.name:
world.image_small.delete()
world.atlas_image = atlas_image
world.image = image
world.image_small = make_thumbnail(image)
world.save()
def _process_beacons(world, root, name): # pylint: disable=too-many-locals
Beacon.objects.filter(world=world).delete()
with gzip.open(os.path.join(root, name)) as beacons_file:
buffer = beacons_file.read()
if len(buffer) == 0:
return
# adapted from https://docs.playboundless.com/modding/http-beacons.html
offset = 0
num_beacons, world_size = unpack_from("<HH", buffer, offset)
offset += 4
colors = Color.objects.all()
beacons = []
for _ in range(num_beacons):
skipped = unpack_from("<H", buffer, offset)[0]
offset += 2
if skipped != 0:
num_beacons -= skipped
break
campfire, pos_x, pos_y, pos_z, mayor_name_len = unpack_from(
"<BhhhB", buffer, offset
)
offset += 8
mayor_name = unpack_from(f"<{mayor_name_len}s", buffer, offset)[0]
mayor_name = mayor_name.decode("utf-8")
offset += mayor_name_len
beacon = Beacon.objects.create(
world=world,
location_x=pos_x,
location_y=pos_y,
location_z=-pos_z,
is_campfire=bool(campfire),
)
if campfire != 0:
BeaconScan.objects.create(beacon=beacon, mayor_name=mayor_name)
else:
prestige, compactness, num_plots, num_plot_columns, name_len = unpack_from(
"<QbIIB", buffer, offset
)
offset += 18
name = unpack_from(f"<{name_len}s", buffer, offset)[0]
name = name.decode("utf-8")
offset += name_len
BeaconScan.objects.create(
beacon=beacon,
mayor_name=mayor_name,
name=name,
text_name=html_name(name, strip=True, colors=colors),
html_name=html_name(name, colors=colors),
prestige=prestige,
compactness=compactness,
num_plots=num_plots,
num_columns=num_plot_columns,
)
beacons.append(beacon)
for z, x in itertools.product(range(world_size), repeat=2):
beacon_index, plot_count = unpack_from("<HB", buffer, offset)
offset += 3
if beacon_index != 0:
BeaconPlotColumn.objects.create(
beacon=beacons[beacon_index - 1], plot_x=x, plot_z=z, count=plot_count
)
@click.command()
@click.argument("dropbox_url", nargs=1)
def command(dropbox_url):
click.echo("Downloading zip...")
response = requests.get(dropbox_url)
response.raise_for_status()
click.echo("Writing zip...")
atlas_zip_file = tempfile.NamedTemporaryFile( # pylint: disable=consider-using-with
delete=False
)
atlas_zip_file.write(response.content)
atlas_zip_file.close()
os.makedirs(BASE_DIR)
with zipfile.ZipFile(atlas_zip_file.name, "r") as zip_file:
zip_file.extractall(BASE_DIR)
click.echo("Processing data...")
for root, _, files in os.walk(BASE_DIR):
with click.progressbar(files, show_percent=True, show_pos=True) as pbar:
for name in pbar:
pbar.label = name
pbar.render_progress()
world_id = int(name.split("_")[1])
world = World.objects.filter(id=world_id).first()
if world is None:
continue
if name.endswith(".png"):
_process_image(world, root, name)
elif name.endswith(".beacons.gz"):
_process_beacons(world, root, name)
click.echo("Cleaning up...")
os.remove(atlas_zip_file.name)
shutil.rmtree(BASE_DIR)
click.echo("Purging CDN cache...")
purge_static_cache(["worlds", "atlas"])
|
StarcoderdataPython
|
9753475
|
<gh_stars>10-100
from setuptools import setup
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='django-wallet',
packages=['wallets'],
version='0.3',
license='MIT',
description='Apple Wallet integration for a django project',
long_description=long_description,
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/Silver3310/django-wallets',
download_url='https://github.com/Silver3310/django-wallets/archive/v_02.tar.gz',
keywords=['django', 'wallet', 'apple', 'pass'],
install_requires=[
'celery',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
|
StarcoderdataPython
|
6582726
|
<filename>test/files/column_arguments2.py
from sqlalchemy import Column
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
Column("name", Integer, index=True)
# EXPECTED_MYPY: No overload variant of "Column" matches argument types "None", "str" # noqa E501
Column(None, name="name")
Column(Integer, name="name", index=True)
Column("name", ForeignKey("a.id"))
Column(ForeignKey("a.id"), type_=None, index=True)
Column(ForeignKey("a.id"), name="name", type_=Integer())
# EXPECTED_MYPY: No overload variant of "Column" matches argument types "str", "None" # noqa E501
Column("name", None)
Column("name", index=True)
Column(ForeignKey("a.id"), name="name", index=True)
Column(type_=None, index=True)
# EXPECTED_MYPY: No overload variant of "Column" matches argument types "None", "ForeignKey" # noqa E501
Column(None, ForeignKey("a.id"))
Column("name")
Column(name="name", type_=None, index=True)
Column(ForeignKey("a.id"), name="name", type_=None)
Column(Integer)
Column(ForeignKey("a.id"), type_=Integer())
Column("name", Integer, ForeignKey("a.id"), index=True)
# EXPECTED_MYPY: No overload variant of "Column" matches argument types "str", "None", "ForeignKey", "bool # noqa E501
Column("name", None, ForeignKey("a.id"), index=True)
Column(ForeignKey("a.id"), index=True)
Column("name", Integer)
Column(Integer, name="name")
Column(Integer, ForeignKey("a.id"), name="name", index=True)
Column(ForeignKey("a.id"), type_=None)
Column(ForeignKey("a.id"), name="name")
Column(name="name", index=True)
Column(type_=None)
# EXPECTED_MYPY: No overload variant of "Column" matches argument types "None", "bool" # noqa E501
Column(None, index=True)
Column(name="name", type_=None)
Column(type_=Integer(), index=True)
Column("name", Integer, ForeignKey("a.id"))
Column(name="name", type_=Integer(), index=True)
Column(Integer, ForeignKey("a.id"), index=True)
# EXPECTED_MYPY: No overload variant of "Column" matches argument types "str", "None", "ForeignKey" # noqa E501
Column("name", None, ForeignKey("a.id"))
Column(index=True)
Column("name", type_=None, index=True)
Column("name", ForeignKey("a.id"), type_=Integer(), index=True)
Column(ForeignKey("a.id"))
Column(Integer, ForeignKey("a.id"))
Column(Integer, ForeignKey("a.id"), name="name")
Column("name", ForeignKey("a.id"), index=True)
Column("name", type_=Integer(), index=True)
Column(ForeignKey("a.id"), name="name", type_=Integer(), index=True)
Column(name="name")
# EXPECTED_MYPY: No overload variant of "Column" matches argument types "str", "None", "bool" # noqa E501
Column("name", None, index=True)
Column("name", ForeignKey("a.id"), type_=None, index=True)
Column("name", type_=Integer())
# EXPECTED_MYPY: No overload variant of "Column" matches argument type "None"
Column(None)
# EXPECTED_MYPY: No overload variant of "Column" matches argument types "None", "ForeignKey", "bool" # noqa E501
Column(None, ForeignKey("a.id"), index=True)
Column("name", ForeignKey("a.id"), type_=None)
Column(type_=Integer())
# EXPECTED_MYPY: No overload variant of "Column" matches argument types "None", "ForeignKey", "str", "bool" # noqa E501
Column(None, ForeignKey("a.id"), name="name", index=True)
Column(Integer, index=True)
Column(ForeignKey("a.id"), name="name", type_=None, index=True)
Column(ForeignKey("a.id"), type_=Integer(), index=True)
Column(name="name", type_=Integer())
# EXPECTED_MYPY: No overload variant of "Column" matches argument types "None", "str", "bool" # noqa E501
Column(None, name="name", index=True)
Column()
# EXPECTED_MYPY: No overload variant of "Column" matches argument types "None", "ForeignKey", "str" # noqa E501
Column(None, ForeignKey("a.id"), name="name")
Column("name", type_=None)
Column("name", ForeignKey("a.id"), type_=Integer())
# We are no longer able to detect there, as the kwargs must stay in place.
# EXPECTED_MYPY: No overload variant of "Column" matches argument types "Type[Integer]", "ForeignKey", "Type[String]" # noqa E501
Column(Integer, ForeignKey("a.id"), type_=String)
# EXPECTED_MYPY: No overload variant of "Column" matches argument types "str", "ForeignKey", "str" # noqa E501
Column("name", ForeignKey("a.id"), name="String")
|
StarcoderdataPython
|
5166103
|
<gh_stars>0
"""
Random field classes
"""
import numpy as np
import pyrft as pr
class Field:
""" Field class
Parameters
----------
field: a numpy.ndarray of shape (Dim) or (Dim, fibersize)
Here Dim is the size of the field and fibersize is an index for the fields,
typically fibersize is the number of subjects.
mask: Bool,
a boolean numpty array giving the spatial mask the size of which
must be compatible with the field
Returns
-------
An object of class field
Examples
--------
# 1D
field = np.random.randn(100, 30)
mask = np.ones((100, 1), dtype=bool)
exField = pr.Field(field, mask)
print(exField)
# 2D
field = np.random.randn(100, 100, 30)
mask = np.ones((100, 100), dtype=bool)
exField = pr.Field(field, mask)
print(exField)
# 2D no subjects
field = np.random.randn(100, 100)
mask = np.ones((100, 100), dtype=bool)
exField = pr.Field(field, mask)
print(exField)
-----------------------------------------------------------------------------
"""
def __init__(self, field, mask):
self.field = field
self.fieldsize = field.shape
masksize = mask.shape
# Check that the mask is a boolean array
if mask.dtype != np.bool:
raise Exception("The mask must be a boolean array")
# Assign the dimension
self.D = len(masksize)
# Cover the 1D case where the mask is a vector!
# (Allows for row and column vectors)
if (self.D == 2) and (masksize[0] == 1 or masksize[1] == 1):
self.D = 1
# Force the mask to be a row vector
if masksize[1] == 1:
mask = mask.transpose()
self.masksize = tuple(np.sort(masksize))
else:
# In D > 1 just assign the mask size
self.masksize = masksize
# Obtain the fibersize
if self.masksize == self.fieldsize:
# If the size of the mask is the size of the data then there is just
# one field so the fibersize is set to 1
self.fibersize = 1
else:
self.fibersize = self.field.shape[self.D:]
if len(self.fibersize) == 1:
self.fibersize = self.field.shape[self.D:][0]
elif self.masksize == (1, 1):
self.fibersize = self.field.shape[self.D + 1:][0]
# Ensure that the size of the mask matches the size of the field
if self.D > 1 and field.shape[0: self.D] != self.masksize:
raise Exception("The size of the spatial field must match the mask")
elif self.D == 1 and field.shape[0: self.D][0] != self.masksize[1]:
# If the size of the mask doesn't match the field then return an error
raise Exception("The size of the spatial field must match the mask")
# If it passes the above tests assign the mask to the array
self.mask = mask
def __str__(self):
# Initialize string output
str_output = ''
# Get a list of the attributes
attributes = vars(self).keys()
# Add each attribute (and its properties to the output)
for atr in attributes:
if atr in ['D', 'fibersize']:
str_output += atr + ': ' + str(getattr(self, atr)) + '\n'
elif atr in ['_Field__mask']:
pass
elif atr in ['_Field__fieldsize']:
str_output += 'fieldsize' + ': ' + str(getattr(self, atr)) + '\n'
elif atr in ['_Field__masksize']:
str_output += 'masksize' + ': ' + str(getattr(self, atr)) + '\n'
elif atr in ['_Field__field']:
str_output += 'field' + ': ' + str(getattr(self, atr).shape) + '\n'
else:
str_output += atr + ': ' + str(getattr(self, atr).shape) + '\n'
# Return the string (minus the last \n)
return str_output[:-1]
#Getting and setting field
def _get_field(self):
return self.__field
def _set_field(self, value):
if hasattr(self, 'mask'):
if self.D > 1:
if value.shape[0:self.D] != self.masksize:
raise ValueError("The size of the field must be compatible with the mask")
else:
if value.shape[0:self.D][0] != self.masksize[1]:
raise ValueError("The size of the field must be compatible with the mask")
self.__field = value
self.fieldsize = value.shape
#Getting and setting mask
def _get_mask(self):
return self.__mask
def _set_mask(self, value):
if (self.D > 1) and value.shape != self.masksize:
raise ValueError("The size of the mask must be compatible with the field")
elif (self.D == 1) and tuple(np.sort(value.shape)) != self.masksize:
raise ValueError("The size of the mask must be compatible with the field")
if value.dtype != np.bool:
raise Exception("The mask must be a boolean array")
self.__mask = value
self.masksize = value.shape
#Getting and setting fieldsize
def _get_fieldsize(self):
return self.__fieldsize
def _set_fieldsize(self, value):
if value != self.field.shape:
raise Exception("The field size cannot be changed directly")
self.__fieldsize = value
#Getting and setting masksize
def _get_masksize(self):
return self.__masksize
def _set_masksize(self, value):
if hasattr(self, 'mask'):
if value != self.mask.shape:
raise Exception("The field size cannot be changed directly")
self.__masksize = value
# Set properties
field = property(_get_field, _set_field)
mask = property(_get_mask, _set_mask)
fieldsize = property(_get_fieldsize, _set_fieldsize)
masksize = property(_get_masksize, _set_masksize)
def make_field(array, fibersize=1):
""" conv2field converts a numpy array to am object of class field
Parameters
----------
array: numpy.ndarray of shape (Dim, fibersize),
Here Dim is the spatial size and fibersize is the index dimension
fibersize: int,
specifies the size of the fiber, typically this is 1 i.e. when the
last dimension of array corresponds to the fibersize
Returns
-------
F: object of class field
Examples
--------
data = np.random.randn(100, 30)
F = pr.make_field(data)
"""
fieldsize = array.shape
D = len(fieldsize) - fibersize
if D == 1:
masksize = (fieldsize[0], 1)
else:
masksize = fieldsize[0:D]
mask = np.ones(masksize, dtype = bool)
f = pr.Field(array, mask)
return f
|
StarcoderdataPython
|
6533772
|
"""Common constants."""
INDENT = ' '
ADDED = '+'
REMOVED = '-'
NESTED = 'nested'
CHANGED = 'changed'
UNCHANGED = ' '
SIMPLE = 'simple'
COMPLEX = 'complex value'
|
StarcoderdataPython
|
8117145
|
# coding=utf-8
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.task.task import Task
from pants.util.memo import memoized_property
from structured.subsystems.r_distribution import RDistribution
class RTask(Task):
@classmethod
def subsystem_dependencies(cls):
return super(RTask, cls).subsystem_dependencies() + (
RDistribution.Factory.scoped(cls),
)
@memoized_property
def r_distribution(self):
return RDistribution.Factory.scoped_instance(self).create()
|
StarcoderdataPython
|
121094
|
"""empty message
Revision ID: 137ed4905569
Revises: <PASSWORD>
Create Date: 2016-09-26 17:22:28.928084
"""
# revision identifiers, used by Alembic.
revision = '<PASSWORD>'
down_revision = '<PASSWORD>'
import sqlalchemy as sa
from alembic import op
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('test', sa.Column('timeout', sa.Integer()))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('test', 'timeout')
### end Alembic commands ###
|
StarcoderdataPython
|
3573518
|
<filename>horizon/openstack_dashboard/dashboards/settings/logger/panel.py
__author__ = 'gaga'
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.settings import dashboard
class Logger(horizon.Panel):
name = _("Action Log")
slug = 'logger'
dashboard.Settings.register(Logger)
|
StarcoderdataPython
|
11336562
|
import os
import py
import numpy as np
import openpnm as op
from openpnm.models.misc import from_neighbor_pores
class SalomeTest:
def setup_class(self):
np.random.seed(10)
self.net = op.network.Cubic(shape=[2, 2, 2])
self.net["pore.diameter"] = 0.5 + np.random.rand(self.net.Np) * 0.5
Dt = from_neighbor_pores(target=self.net, prop="pore.diameter") * 0.5
self.net["throat.diameter"] = Dt
self.net["throat.length"] = 1.0
def teardown_class(self):
os.remove(f"{self.net.name}.py")
os.remove("salome_custom.py")
def test_export_data_salome(self):
op.io.to_salome(network=self.net)
assert os.path.isfile(f"{self.net.name}.py")
op.io.to_salome(network=self.net, filename="salome_custom")
assert os.path.isfile("salome_custom.py")
if __name__ == '__main__':
# All the tests in this file can be run with 'playing' this file
t = SalomeTest()
self = t # For interacting with the tests at the command line
t.setup_class()
for item in t.__dir__():
if item.startswith('test'):
print(f'Running test: {item}')
try:
t.__getattribute__(item)()
except TypeError:
t.__getattribute__(item)(tmpdir=py.path.local())
t.teardown_class()
|
StarcoderdataPython
|
9665692
|
print("Escriba las calificaciones de los 3 parciales:")
c1, c2, c3 = map(float, input().split())
ef=float(input("Escriba la calificacion del examen final: " ))* 0.30
tf=float(input("Digite la calificacion del trabajo final: "))* 0.15
promedio=(c1+c2+c3)/3*0.55
cf=promedio+ef+tf
print("La calificacion final de la clase es de: ",cf)
|
StarcoderdataPython
|
5092698
|
<gh_stars>0
import pkgutil
import unittest
from django.test import TestCase
from django.test.runner import DiscoverRunner
from OnToology import settings
from OnToology.models import *
from mongoengine import connection, connect
import pyclbr
def suite():
return unittest.TestLoader().discover("OnToology.tests", pattern="test*.py")
class NoSQLTestRunner(DiscoverRunner):
def setup_databases(self):
settings.test_conf['local'] = True
settings.test_conf['fork'] = True
settings.test_conf['clone'] = True
settings.test_conf['push'] = True
settings.test_conf['pull'] = True
self.clearing_db_connection()
new_db_name = "test_OnToology"
connect(new_db_name)
def teardown_databases(self, *args):
db_name = connection._connection_settings['default']['name']
connection.get_connection().drop_database(db_name)
connection.disconnect()
@classmethod
def clearing_db_connection(cls):
"""
Only used by me
:return:
"""
# disconnect the connection with the db
connection.disconnect()
# remove the connection details
connection._dbs = {}
connection._connections = {}
connection._connection_settings = {}
# getting call classes defined in models.py
models = pyclbr.readmodule("OnToology.models").keys()
for class_model in models:
# delete the collection to prevent automatically connecting with the old db (the live one)
del globals()[class_model]._collection
class NoSQLTestCase(TestCase):
def _fixture_setup(self):
pass
def _fixture_teardown(self):
pass
|
StarcoderdataPython
|
8125994
|
<reponame>rspitler/infra-buddy<filename>src/main/python/infra_buddy/deploy/s3_deploy.py
import os
import tempfile
from infra_buddy.aws import s3 as s3util
from infra_buddy.aws.cloudformation import CloudFormationBuddy
from infra_buddy.aws.s3 import S3Buddy
from infra_buddy.deploy.deploy import Deploy
from infra_buddy.utility import print_utility
class S3Deploy(Deploy):
def __init__(self, artifact_id, location, ctx):
super(S3Deploy, self).__init__(ctx)
self.location = location
self.artifact_id = artifact_id
self.cloud_formation_buddy = CloudFormationBuddy(self.deploy_ctx)
if self.deploy_ctx.s3_deploy_bucket:
self.destination_bucket = self.deploy_ctx.s3_deploy_bucket
else:
self.destination_bucket = \
self.cloud_formation_buddy.get_export_value( param=self.deploy_ctx.s3_deploy_bucket_export)
if not self.destination_bucket:
print_utility.error("Could not find s3 deploy bucket")
def _internal_deploy(self, dry_run):
mkdtemp = tempfile.mkdtemp()
if not self.artifact_id.endswith(".zip" ):
self.artifact_id = "{}.zip".format(self.artifact_id)
artifact_download = "s3://{location}/{artifact_id}".format(location=self.location,artifact_id=self.artifact_id)
s3util.download_zip_from_s3_url(artifact_download,destination=mkdtemp)
to_upload = self.get_filepaths(mkdtemp)
if dry_run:
print_utility.banner_warn("Dry Run: Uploading files to - {}".format( self.destination_bucket),
str(to_upload))
else:
split = self.destination_bucket.split("/")
if len(split)>1:
path = "/".join(split[1:])
else:
path = ''
s3 = S3Buddy(self.deploy_ctx, path, split[0])
print_utility.progress("S3 Deploy: Uploading files to - {}".format( self.destination_bucket))
for s3_key, path in to_upload.items():
print_utility.info("{} - {}".format( self.destination_bucket, s3_key))
s3.upload(key_name=s3_key, file=path)
def get_filepaths(self, local_directory):
rel_paths = {}
for root, dirs, files in os.walk(local_directory):
for filename in files:
# construct the full local path
local_path = os.path.join(root, filename)
# construct the full Dropbox path
relative_path = os.path.relpath(local_path, local_directory)
# s3_path = os.path.join(destination, relative_path)
rel_paths[relative_path] = local_path
return rel_paths
def __str__(self):
return "{} - {}:{}".format(self.__class__.__name__,self.location,self.artifact_id)
|
StarcoderdataPython
|
6432024
|
<filename>RaspberryPI/Script.py
# **********************************Defintions: ****************************************
#Device Code
Device = "aaaa"
# URLS:
# Address of Sever, Local or domain
Address = 'http://SimpleStorage.local/
# Set Color R=Red G=Green B=Blue Recommend using bright Colors
# Finder Strip
R ="255"
G = "100"
B = "255"
# Indicator Strip
R2 ="255"
G2 = "100"
B2 = "255"
# NeoPixel Data For Product Strip
LED_COUNT = 16 # Number of LED pixels.
LED_PIN = 16 # GPIO pin connected to the pixels (must support PWM!).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 5 # DMA channel to use for generating signal (try 5)
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
# Neopixel Data for the Indicator Strip
LED_COUNT2 = 16 # Number of LED pixels.
LED_PIN2 = 18 # GPIO pin connected to the pixels (must support PWM!).
LED_FREQ_HZ2 = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA2 = 5 # DMA channel to use for generating signal (try 5)
LED_INVERT2 = False # True to invert the signal (when using NPN transistor level shift)
Color = ","+R +","+G+","+B
Color2 = ","+R2 +","+G2+","+B2
# If the large Tray is enabled
Largetray = True
Large1 = True
Large2 = True
Large3 = True
Large4 = True
Large5 = True
Large6 = True
Large7 = True
Large8 = True
Large9 = True
# Large Trays Put Specified values Make Sure They are unique
Largetr1 = 101
Largetr2 = 102
Largetr3 = 103
Largetr4 = 104
Largetr5 = 105
Largetr6 = 106
Largetr7 = 107
Largetr8 = 108
Largetr9 = 109
#Large Trays LED value Code Custom Code ( Color value + Pixel number starts at Zero + Can have infinet pixels
LargetrLED1 = "23" + Color
LargetrLED2= "23" + Color
LargetrLED3= "23" + Color
LargetrLED4= "23" + Color
LargetrLED5= "23" + Color
LargetrLED6= "23" + Color
LargetrLED7="23" + Color
LargetrLED8= "23" + Color
LargetrLED9 "23" + Color
#**************************************************************************************
url = Address + 'newfile.txt'
power = Address + "power.txt"
import serial
import time
import requests
from bs4 import BeautifulSoup
import time
from neopixel import *
print
print
ind = Adafruit_NeoPixel(LED_COUNT2, LED_PIN2, LED_FREQ_HZ2, LED_DMA2, LED_INVERT2)
def Indicator:
int count = 0
while count >= LED_COUNT2
ind.setPixelColor(Count, R2,B2,G2)
ind.Show()
Word = "ON"
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT)
strip.begin()
ind.beign()
#Creates Loop
print "Starting"
while True:
print("Connecting")
powerresponse = requests.get(power)
powerparsed = BeautifulSoup(powerresponse.content, "html.parser")
print powerparsed
pow = powerparsed
word = pow
value = str(pow)
print("Connected Sucessfuly")
if int(value) >= 1000:
# Gets Web Data
response = requests.get(url)
print("Requested Item:")
soup = BeautifulSoup(response.content, "html.parser")
soup_string = str(soup)
ID = soup_string[:4]
print(soup)
if ID == Device:
if Largetray == True:
print("Large Trays Enabled")
if Number == Largetr1:
if Large1 == True:
senddata(LargetrLED2)
Number = " "
elif Number == Largetr2:
if Large2 == True:
senddata(LargetrLED2)
Number = " "
elif Number == Largetr3:
if Large3 == True:
senddata(LargetrLED3)
Number = " "
elif Number == Largetr4:
if Large4 == True:
senddata(LargetrLED4)
Number = " "
elif Number == Largetr5:
if Large5 == True:
senddata(LargetrLED5)
Number = " "
elif Number == Largetr6:
if Large6 == True:
senddata(LargetrLED6)
Number = " "
elif Number == Largetr7:
if Large6 == True:
senddata(LargetrLED7)
Number = " "
elif Number == Largetr8:
if Large6 == True:
senddata(LargetrLED8)
Number = " "
elif Number == Largetr9:
if Large6 == True:
senddata(LargetrLED9)
Number = " "
else:
Number2 == Number -1
print("Sending Data")
senddata(Number2 + Color)
time.sleep(2)
else:
print("Large Tree Not Enabled Treating Normally")
LED = int(soup_string)
LED2 = LED -1
senddata(Color + "P" + LED2)
else:
print("Not This Device")
time.sleep(2)
else:
print("Device is still OFF")
time.sleep(2)
def senddata(data):
print("Sending Data:")
print(data)
strip.setPixelColor(data)
indicator()
|
StarcoderdataPython
|
1790781
|
<reponame>Xz-Alan/RS_Classification
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.optim.lr_scheduler import StepLR
import numpy as np
import sar_data as sd
import test_sar_data as tsd
import os
import math
import time
import argparse
import scipy as sp
import scipy.stats
import scipy.io
from PIL import Image
import random
from network import CNNEncoder, RelationNetwork
from sklearn.metrics import confusion_matrix
import rgb
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
parser = argparse.ArgumentParser(description="hsi few-shot classification")
parser.add_argument("--num_epoch", type=int, default=1)
parser.add_argument("--train_n_way", type=int, default=7)
parser.add_argument("--train_n_shot", type=int, default=5)
parser.add_argument("--train_n_query", type=int, default=15)
parser.add_argument("--test_n_way", type=int, default=7)
parser.add_argument("--test_n_shot", type=int, default=5)
parser.add_argument("--test_n_query", type=int, default=1)
parser.add_argument("--test_epoch", type=int, default=100)
parser.add_argument("--lr", type=float, default=0.001)
parser.add_argument("--data_folder", type=str, default='./data/')
parser.add_argument("--data_name", type=str, default='rs_data') # flevoland
parser.add_argument("--sar_size1", type=int, default=5, help="flip the picture to 5x5 size")
parser.add_argument("--sar_size2", type=int, default=11, help="flip the picture to 11x11 size")
parser.add_argument("--sar_size3", type=int, default=17, help="flip the picture to 13x13 size")
parser.add_argument("--trainset_ratio", type=float, default=0.7)
parser.add_argument("--out_dim", type=int, default=32, help="cnn_net_out_dim")
parser.add_argument("--hidden_size", type=int, default=10, help="relation_net_hidden_size")
parser.add_argument("--loss_model", type=int, default=3, help="0: ce_loss;1: mse_loss;2: focal_loss;3: MSE_IIRL_loss")
parser.add_argument("--test_num", type=int, default=0)
parser.add_argument("--test_switch",type=bool, default=False)
parser.add_argument("--paint_switch",type=bool,default=False)
args = parser.parse_args()
def weights_init(m):
"""
initial model.
"""
classname = m.__class__.__name__
if classname.find('Conv') != -1:
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif classname.find('BatchNorm') != -1:
m.weight.data.fill_(1)
m.bias.data.zero_()
elif classname.find('Linear') != -1:
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data = torch.ones(m.bias.data.size())
def one_hot(args, indices):
"""
Returns a one-hot tensor.
This is a PyTorch equivalent of Tensorflow's tf.one_hot.
"""
encoded_indicate = torch.zeros(args.train_n_way*args.train_n_query, args.train_n_way).cuda()
index = indices.long().view(-1,1)
encoded_indicate = encoded_indicate.scatter_(1,index,1)
return encoded_indicate
def kappa(confusion_matrix):
"""kappa系数
:param: confusion_matrix--混淆矩阵
:return: Kappa系数
"""
pe_rows = np.sum(confusion_matrix, axis=0)
pe_cols = np.sum(confusion_matrix, axis=1)
sum_total = sum(pe_cols)
pe = np.dot(pe_rows, pe_cols) / float(sum_total ** 2)
po = np.trace(confusion_matrix) / float(sum_total)
return (po - pe) / (1 - pe)
def main():
rgb_colors = rgb.ncolors(args.train_n_way)
print(rgb_colors)
start_time = time.time()
# rgb_colors = np.array([[248, 49, 49], [200, 248, 9], [42, 248, 124], [36, 123, 254], [204, 4, 254]])
if args.paint_switch:
print("painting img_gt")
_, gts = sd.mat_data(args)
wait
gts -= 1
img_h = gts.shape[0]-16
img_v = gts.shape[1]-16
img_gt = Image.new("RGB", (img_h, img_v), "white")
for h in range(img_h):
for v in range(img_v):
for i in range(args.test_n_way):
if gts[h+8,v+8] == i:
img_gt.putpixel([h, v], (rgb_colors[i][0], rgb_colors[i][1], rgb_colors[i][2]))
break
img_gt.save("./img_result/"+ str(args.data_name) + "_img_gt.jpg")
if args.test_switch:
# 184170 load
que_labels = scipy.io.loadmat("./labels_save/que_%s_%d_loss_%d_shot_%d_img_out.mat"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num))['que_labels'].squeeze(0).astype(int)
pre_labels = scipy.io.loadmat("./labels_save/pre_%s_%d_loss_%d_shot_%d_img_out.mat"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num))['pre_labels'].squeeze(0)
# perpare
class_correct = np.zeros(args.test_n_way).astype(int)
class_num = np.zeros(args.test_n_way).astype(int)
class_acc = np.zeros(args.test_n_way).astype(float)
for i in range(len(que_labels)):
if pre_labels[i]==que_labels[i]:
class_correct[que_labels[i]] += 1
class_num[que_labels[i]] += 1
# kappa
confusion_m = confusion_matrix(que_labels, pre_labels)
kappa_score = kappa(confusion_m)
print("Kappa: %.2f %%" %(kappa_score*100))
# aa
for i in range(args.test_n_way):
class_acc[i] = class_correct[i] / class_num[i]
print("class_%d_acc: %.2f %%" %(i, class_acc[i]*100))
aa = np.mean(class_acc)
print("AA: %.2f %%" %(aa*100))
# oa
total_labels = np.sum(class_num)
total_correct = np.sum(class_correct)
oa = total_correct/1.0 / total_labels/1.0
print("OA: %.2f %%" %(oa*100))
return print("test finished!")
print("loading sar_dataset")
if os.path.exists('./data/' + args.data_name + '/stacks_1.npy') == False:
print("making dataset")
os.makedirs(("./data/"+args.data_name+"/"), exist_ok= True)
tsd.sar_datesets(args)
test_stacks_1 = torch.Tensor(np.load('./data/' + args.data_name + '/stacks_1.npy')) # (182656,27,5,5)
test_stacks_2 = torch.Tensor(np.load('./data/' + args.data_name + '/stacks_2.npy'))
test_stacks_3 = torch.Tensor(np.load('./data/' + args.data_name + '/stacks_3.npy'))
test_gts = torch.Tensor(np.load('./data/' + args.data_name + '/gts.npy'))
test_gts -= 1
load_time = time.time()
print("%sset load successfully, and spend time: %.2f"%(args.data_name, load_time-start_time))
print("init network")
cnn_sup = CNNEncoder(test_stacks_1.size(1), args.out_dim)
cnn_que = CNNEncoder(test_stacks_1.size(1), args.out_dim)
relation_net = RelationNetwork(2*args.out_dim, args.hidden_size)
# 初始化模型
cnn_sup.apply(weights_init)
cnn_que.apply(weights_init)
relation_net.apply(weights_init)
cnn_sup.cuda()
cnn_que.cuda()
relation_net.cuda()
# scheduler
# Adam 对网络参数进行优化,学习率10000次循环后降为原来的0.5倍
cnn_sup_optim = torch.optim.Adam(cnn_sup.parameters(), lr=args.lr)
cnn_sup_scheduler = StepLR(cnn_sup_optim, step_size=20000, gamma=0.5)
cnn_que_optim = torch.optim.Adam(cnn_que.parameters(), lr=args.lr)
cnn_que_scheduler = StepLR(cnn_que_optim, step_size=20000, gamma=0.5)
relation_net_optim = torch.optim.Adam(relation_net.parameters(), lr=args.lr)
relation_net_scheduler = StepLR(relation_net_optim, step_size=20000, gamma=0.1)
test_result = open("./test_result/%s_%d_loss_%d_shot_%d_log.txt"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num), 'w')
cnn_sup_folder = "./model/" + str(args.data_name) + "/cnn_sup/"
cnn_que_folder = "./model/" + str(args.data_name) + "/cnn_que/"
relation_net_folder = "./model/" + str(args.data_name) + "/relation_net/"
os.makedirs(cnn_sup_folder, exist_ok=True)
os.makedirs(cnn_que_folder, exist_ok=True)
os.makedirs(relation_net_folder, exist_ok=True)
if os.path.exists(cnn_sup_folder + "%s_%d_loss_%d_shot_%d.pth"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num)):
cnn_sup.load_state_dict(torch.load(cnn_sup_folder + "%s_%d_loss_%d_shot_%d.pth"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num)))
print("load cnn_sup successfully")
if os.path.exists(cnn_que_folder + "%s_%d_loss_%d_shot_%d.pth"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num)):
cnn_que.load_state_dict(torch.load(cnn_que_folder + "%s_%d_loss_%d_shot_%d.pth"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num)))
print("load cnn_que successfully")
if os.path.exists(relation_net_folder + "%s_%d_loss_%d_shot_%d.pth"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num)):
relation_net.load_state_dict(torch.load(relation_net_folder + "%s_%d_loss_%d_shot_%d.pth"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num)))
print("load relation_net successfully")
'''
cnn_sup.eval()
cnn_que.eval()
relation_net.eval()
'''
for epoch in range(args.num_epoch):
print("start testing")
#------------------------------prepare------------------------------
test_time = time.time()
total_correct = 0
class_correct = np.zeros(args.test_n_way).astype(int)
class_acc = np.zeros(args.test_n_way).astype(float)
pre_labels = []
que_labels = []
gts_class = np.arange(args.test_n_way)
h_img = 750 -16
v_img = 1024 -16
img_out = Image.new("RGB", (h_img, v_img), "white")
#------------------------------test------------------------------
test_sup_stacks_1, test_sup_stacks_2, test_sup_stacks_3, test_sup_gts, class_num = tsd.sar_dataloader(args, gts_class, test_gts, test_stacks_1, test_stacks_2, test_stacks_3, split='test',form='support', shuffle=False)
class_num_max = np.max(class_num)
print("class_num_max: ", class_num_max)
index_i = np.zeros(args.test_n_way).astype(int)
index_j = np.zeros(args.test_n_way).astype(int)
for i in range(class_num_max):
#-------------------------------------------------------------------------
stack_index = np.arange(0, test_gts.size(0)) # 生成stack的索引
# print("stack_index: ", len(stack_index))
index = np.zeros(1, dtype=int) # 生成一个零数组,方便for循环
for i in gts_class:
stack_index_i = stack_index[test_gts == i]
if index_j[i] >= len(stack_index_i):
index_j[i] = 0
# print(i, ":", len(stack_index_i))
stack_index_i = [stack_index_i[index_j[i]]]
index = np.concatenate((index, stack_index_i), axis=0)
index_j[i] += 1
index = np.delete(index, 0 , 0) # 不打乱顺序
test_que_stacks_1 = []
test_que_stacks_2 = []
test_que_stacks_3 = []
test_que_gts = []
for item in list(index):
# 每一行需要增加一维,拼接时保证维度正确
test_que_stacks_1.append(test_stacks_1[item].unsqueeze(0))
test_que_stacks_2.append(test_stacks_2[item].unsqueeze(0))
test_que_stacks_3.append(test_stacks_3[item].unsqueeze(0))
test_que_gts.append(test_gts[item].unsqueeze(0))
test_que_stacks_1 = torch.cat(test_que_stacks_1, dim=0) # (25,27,5,5)
test_que_stacks_2 = torch.cat(test_que_stacks_2, dim=0) # (25,27,11,11)
test_que_stacks_3 = torch.cat(test_que_stacks_3, dim=0) # (25,27,17,17)
test_que_gts = torch.cat(test_que_gts, dim=0)
#-------------------------------------------------------------------------
test_sup_stacks_1 = test_sup_stacks_1.cuda()
test_sup_stacks_2 = test_sup_stacks_2.cuda()
test_sup_stacks_3 = test_sup_stacks_3.cuda()
test_sup_gts = test_sup_gts.cuda()
test_que_stacks_1 = test_que_stacks_1.cuda()
test_que_stacks_2 = test_que_stacks_2.cuda()
test_que_stacks_3 = test_que_stacks_3.cuda()
test_que_gts = test_que_gts.cuda()
mult_sup_feature = cnn_sup(test_sup_stacks_1, test_sup_stacks_2, test_sup_stacks_3)
mult_que_feature = cnn_que(test_que_stacks_1, test_que_stacks_2, test_que_stacks_3)
mult_relation_pairs = []
for i in range(3):
# 支持集按类取平均
sup_feature = mult_sup_feature[i]
que_feature = mult_que_feature[i]
sup_feature = sup_feature.view(args.test_n_way, args.test_n_shot, -1, sup_feature.shape[2], sup_feature.shape[3])
sup_feature = torch.mean(sup_feature,1).squeeze(1)
# relations
sup_feature_ext = sup_feature.unsqueeze(0).repeat(args.test_n_way*args.test_n_query, 1, 1, 1, 1)
que_feature_ext = torch.transpose(que_feature.unsqueeze(0).repeat(args.test_n_way,1,1, 1, 1),0,1)
relation_pairs = torch.cat((sup_feature_ext, que_feature_ext), 2).view(-1, 2*args.out_dim, sup_feature.shape[2], sup_feature.shape[3])
mult_relation_pairs.append(relation_pairs)
relations = relation_net(mult_relation_pairs[0], mult_relation_pairs[1], mult_relation_pairs[2]).view(-1, args.test_n_way)
# calculate relations
_, predict_gts = torch.max(relations.data, 1)
for j in range(args.test_n_way):
h_j = index[j] // v_img
v_j = index[j] % v_img
img_out.putpixel([h_j, v_j], (rgb_colors[predict_gts[j]][0], rgb_colors[predict_gts[j]][1], rgb_colors[predict_gts[j]][2]))
if index_i[j] > class_num[j]:
continue
if predict_gts[j]== test_que_gts[j]:
class_correct[j] += 1
pre_labels.append(predict_gts[j].item())
que_labels.append(test_que_gts[j].item())
index_i[j] +=1
# painting
img_out.save("./img_result/" + "%s_%d_loss_%d_shot_%d_img_out.jpg"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num))
# labels save
que_save = "./labels_save/que_%s_%d_loss_%d_shot_%d_img_out.mat"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num)
pre_save = "./labels_save/pre_%s_%d_loss_%d_shot_%d_img_out.mat"%(args.data_name, args.loss_model, args.train_n_shot, args.test_num)
scipy.io.savemat(que_save, mdict={"que_labels": que_labels})
scipy.io.savemat(pre_save, mdict={"pre_labels": pre_labels})
# kappa
confusion_m = confusion_matrix(que_labels, pre_labels)
kappa_score = kappa(confusion_m)
print("Kappa: %.2f %%" %(kappa_score*100))
test_result.write("Kappa: %.2f %%\n" %(kappa_score*100))
test_result.flush()
# aa
for i in range(args.test_n_way):
class_acc[i] = class_correct[i] / class_num[i]
# print(i, "_class_correct: ", class_correct[i])
# print(i, "_class_num: ", class_num[i])
print("class_%d_acc: %.2f %%" %(i, class_acc[i]*100))
test_result.write("class_%d_acc: %.2f %%\n" %(i, class_acc[i]*100))
test_result.flush()
aa = np.mean(class_acc)
print("AA: %.2f %%" %(aa*100))
test_result.write("AA: %.2f %%\n" %(aa*100))
test_result.flush()
# oa
total_labels = np.sum(class_num)
total_correct = np.sum(class_correct)
# print("total_labels: ", total_labels)
# print("total_correct: ", total_correct)
oa = total_correct / total_labels
print("OA: %.2f %%" %(oa*100))
test_result.write("OA: %.2f %%\n" %(oa*100))
test_result.flush()
end_time = time.time()
print("test finished, and spend time: ", end_time - test_time)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
39458
|
<reponame>sqilz/LIMS-Backend
import io
import json
from django.core.exceptions import ObjectDoesNotExist
from pint import UnitRegistry
import django_filters
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.decorators import detail_route, list_route
from rest_framework import serializers
from rest_framework.parsers import FormParser, MultiPartParser
from rest_framework.filters import (OrderingFilter,
SearchFilter,
DjangoFilterBackend)
from lims.permissions.permissions import (IsInAdminGroupOrRO,
ViewPermissionsMixin, ExtendedObjectPermissions,
ExtendedObjectPermissionsFilter)
from lims.shared.mixins import StatsViewMixin, AuditTrailViewMixin
from lims.filetemplate.models import FileTemplate
from lims.projects.models import Product
from .models import Set, Item, ItemTransfer, ItemType, Location, AmountMeasure
from .serializers import (AmountMeasureSerializer, ItemTypeSerializer, LocationSerializer,
ItemSerializer, DetailedItemSerializer, SetSerializer,
ItemTransferSerializer)
from .providers import InventoryItemPluginProvider
# Define as module level due to issues with file locking
# when calling a function requiring it multiple times
ureg = UnitRegistry()
class LeveledMixin(AuditTrailViewMixin):
"""
Provide a display value for a heirarchy of elements
"""
def _to_leveled(self, obj):
level = getattr(obj, obj._mptt_meta.level_attr)
if level == 0:
display_value = obj.name
else:
display_value = '{} {}'.format('--' * level, obj.name)
return {
'display_value': display_value,
'value': obj.name,
'root': obj.get_root().name
}
class MeasureViewSet(AuditTrailViewMixin, viewsets.ModelViewSet):
queryset = AmountMeasure.objects.all()
serializer_class = AmountMeasureSerializer
permission_classes = (IsInAdminGroupOrRO,)
search_fields = ('symbol', 'name',)
class ItemTypeViewSet(viewsets.ModelViewSet, LeveledMixin):
queryset = ItemType.objects.all()
serializer_class = ItemTypeSerializer
permission_classes = (IsInAdminGroupOrRO,)
search_fields = ('name', 'parent__name',)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
if instance.has_children():
return Response({'message': 'Cannot delete ItemType with children'},
status=400)
self.perform_destroy(instance)
return Response(status=204)
class LocationViewSet(viewsets.ModelViewSet, LeveledMixin):
queryset = Location.objects.all()
serializer_class = LocationSerializer
permission_classes = (IsInAdminGroupOrRO,)
search_fields = ('name', 'parent__name')
def filter_queryset(self, queryset):
queryset = super(LocationViewSet, self).filter_queryset(queryset)
# Set ordering explicitly as django-filter borks the defaults
return queryset.order_by('tree_id', 'lft')
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
if instance.has_children():
return Response({'message': 'Cannot delete Location with children'},
status=400)
self.perform_destroy(instance)
return Response(status=204)
class InventoryFilterSet(django_filters.FilterSet):
"""
Filter for inventory items
"""
class Meta:
model = Item
fields = {
'id': ['exact'],
'name': ['exact', 'icontains'],
'added_by__username': ['exact'],
'identifier': ['exact'],
'barcode': ['exact'],
'description': ['icontains'],
'item_type__name': ['exact'],
'location__name': ['exact'],
'in_inventory': ['exact'],
'amount_measure__symbol': ['exact'],
'amount_available': ['exact', 'lt', 'lte', 'gt', 'gte'],
'concentration_measure__symbol': ['exact'],
'concentration': ['exact', 'lt', 'lte', 'gt', 'gte'],
'added_on': ['exact', 'lt', 'lte', 'gt', 'gte'],
'last_updated_on': ['exact', 'lt', 'lte', 'gt', 'gte'],
'properties__name': ['exact', 'icontains'],
'properties__value': ['exact', 'icontains'],
}
class InventoryViewSet(LeveledMixin, StatsViewMixin, ViewPermissionsMixin, viewsets.ModelViewSet):
queryset = Item.objects.all()
serializer_class = ItemSerializer
permission_classes = (ExtendedObjectPermissions,)
filter_backends = (SearchFilter, DjangoFilterBackend,
OrderingFilter, ExtendedObjectPermissionsFilter,)
search_fields = ('name', 'identifier', 'item_type__name', 'location__name',
'location__parent__name')
filter_class = InventoryFilterSet
def get_serializer_class(self):
if self.action == 'list':
return self.serializer_class
return DetailedItemSerializer
def get_object(self):
instance = super().get_object()
plugins = [p(instance) for p in InventoryItemPluginProvider.plugins]
for p in plugins:
p.view()
return instance
def perform_create(self, serializer):
serializer, permissions = self.clean_serializer_of_permissions(serializer)
instance = serializer.save(added_by=self.request.user)
self.assign_permissions(instance, permissions)
plugins = [p(instance) for p in InventoryItemPluginProvider.plugins]
for p in plugins:
p.create()
def perform_update(self, serializer):
instance = serializer.save()
plugins = [p(instance) for p in InventoryItemPluginProvider.plugins]
for p in plugins:
p.update()
@list_route(methods=['POST'], parser_classes=(FormParser, MultiPartParser,))
def importitems(self, request):
"""
Import items from a CSV file
Expects:
file_template: The ID of the file template to use to parse the file
items_file: The CSV file to parse
permissions: Standard permissions format ({"name": "rw"}) to give to all items
"""
file_template_id = request.data.get('filetemplate', None)
uploaded_file = request.data.get('items_file', None)
permissions = request.data.get('permissions', '{}')
response_data = {}
if uploaded_file and file_template_id:
try:
filetemplate = FileTemplate.objects.get(id=file_template_id)
except FileTemplate.DoesNotExist:
return Response({'message': 'File template does not exist'}, status=404)
encoding = 'utf-8' if request.encoding is None else request.encoding
f = io.TextIOWrapper(uploaded_file.file, encoding=encoding)
items_to_import = filetemplate.read(f, as_list=True)
saved = []
rejected = []
if items_to_import:
for item_data in items_to_import:
item_data['assign_groups'] = json.loads(permissions)
if 'properties' not in item_data:
item_data['properties'] = []
'''
I'm not actually sure what this was supposed to do!
Properties are already list so this shouldn't be required.
else:
item_data['properties'] = ast.literal_eval(item_data['properties'])
'''
item = DetailedItemSerializer(data=item_data)
if item.is_valid():
saved.append(item_data)
item, parsed_permissions = self.clean_serializer_of_permissions(item)
item.validated_data['added_by'] = request.user
instance = item.save()
self.assign_permissions(instance, parsed_permissions)
if 'product' in item_data:
try:
prod = item_data['product']
product = Product.objects.get(product_identifier=prod)
except:
pass
else:
product.linked_inventory.add(instance)
else:
item_data['errors'] = item.errors
rejected.append(item_data)
else:
return Response({'message': 'File is format is incorrect'}, status=400)
response_data = {
'saved': saved,
'rejected': rejected
}
return Response(response_data)
@list_route(methods=['POST'])
def export_items(self, request):
# The ID of the file template
file_template_id = request.data.get('filetemplate', None)
# The ID's of items to get
selected = request.data.get('selected', None)
if file_template_id:
if selected:
ids = selected.strip(',').split(',')
items = Item.objects.filter(pk__in=ids)
else:
# The query used to get the results
# Query params in URL used NOT in .data
items = self.filter_queryset(self.get_queryset())
serializer = DetailedItemSerializer(items, many=True)
try:
file_template = FileTemplate.objects.get(pk=file_template_id)
except:
return Response({'message': 'File template does not exist'}, status=404)
with io.StringIO() as output_file:
output_file = file_template.write(output_file, serializer.data)
output_file.seek(0)
return Response(output_file.read(), content_type='text/csv')
return Response({'Please supply a file template and data to export'}, status=400)
@detail_route(methods=['POST'])
def transfer(self, request, pk=None):
"""
Either create or complete an item transfer.
"""
tfr_id = request.query_params.get('id', None)
complete_transfer = request.query_params.get('complete', False)
transfer_details = request.data
if tfr_id and complete_transfer:
try:
tfr = ItemTransfer.objects.get(pk=tfr_id)
except ObjectDoesNotExist:
return Response({'message': 'No item transfer exists with that ID'}, status=404)
tfr.transfer_complete = True
tfr.save()
return Response({'message': 'Transfer {} complete'.format(tfr_id)})
elif transfer_details:
item = self.get_object()
raw_amount = float(transfer_details.get('amount', 0))
raw_measure = transfer_details.get('measure', item.amount_measure.symbol)
addition = transfer_details.get('is_addition', False)
# Booleanise them
is_complete = False
is_addition = False
if addition:
is_addition = True
is_complete = True
if transfer_details.get('transfer_complete', False):
is_complete = True
try:
measure = AmountMeasure.objects.get(symbol=raw_measure)
except AmountMeasure.DoesNotExist:
raise serializers.ValidationError({'message':
'Measure {} does not exist'.format(raw_measure)
})
tfr = ItemTransfer(
item=item,
amount_taken=raw_amount,
amount_measure=measure,
barcode=transfer_details.get('barcode', ''),
coordinates=transfer_details.get('coordinates', ''),
transfer_complete=is_complete,
is_addition=is_addition
)
transfer_status = tfr.check_transfer()
if transfer_status[0] is True:
tfr.save()
tfr.do_transfer(ureg)
else:
return Response(
{'message': 'Inventory item {} ({}) is short of amount by {}'.format(
item.identifier, item.name, transfer_status[1])}, status=400)
return Response({'message': 'Transfer {} created'.format(tfr.id)})
return Response({'message': 'You must provide a transfer ID'}, status=400)
@detail_route(methods=['POST'])
def cancel_transfer(self, request, pk=None):
"""
Cancel an active transfer, adding the amount back
"""
tfr_id = request.query_params.get('id', None)
if tfr_id:
try:
tfr = ItemTransfer.objects.get(pk=tfr_id, transfer_complete=False)
except ObjectDoesNotExist:
return Response({'message': 'No item transfer exists with that ID'}, status=404)
tfr.is_addition = True
tfr.do_transfer(ureg)
tfr.delete()
return Response({'message': 'Transfer cancelled'})
return Response({'message': 'You must provide a transfer ID'}, status=400)
class SetViewSet(AuditTrailViewMixin, viewsets.ModelViewSet, ViewPermissionsMixin):
queryset = Set.objects.all()
serializer_class = SetSerializer
permission_classes = (ExtendedObjectPermissions,)
search_fields = ('name',)
filter_fields = ('is_partset',)
filter_backends = (SearchFilter, DjangoFilterBackend,
OrderingFilter, ExtendedObjectPermissionsFilter,)
def perform_create(self, serializer):
serializer, permissions = self.clean_serializer_of_permissions(serializer)
instance = serializer.save()
self.assign_permissions(instance, permissions)
@detail_route()
def items(self, request, pk=None):
limit_to = request.query_params.get('limit_to', None)
item = self.get_object()
if limit_to:
queryset = [o for o in item.items.all() if o.item_type.name == limit_to]
else:
queryset = item.items.all()
serializer = ItemSerializer(queryset, many=True)
return Response(serializer.data)
@detail_route(methods=['POST'])
def add(self, request, pk=None):
item_id = request.query_params.get('id', None)
inventoryset = self.get_object()
if item_id:
try:
item = Item.objects.get(pk=item_id)
except Item.DoesNotExist:
raise serializers.ValidationError({'message':
'Item {} does not exist'.format(item_id)})
item.sets.add(inventoryset)
return Response(status=201)
return Response(
{'message': 'The id of the item to add to the inventory is required'}, status=400)
@detail_route(methods=['DELETE'])
def remove(self, request, pk=None):
item_id = request.query_params.get('id', None)
inventoryset = self.get_object()
if item_id:
try:
item = inventoryset.items.get(pk=item_id)
except Item.DoesNotExist:
raise serializers.ValidationError({'message':
'Item {} does not exist'.format(item_id)})
inventoryset.items.remove(item)
return Response(status=204)
return Response(
{'message': 'The id of the item to add to the inventory is required'}, status=400)
class ItemTransferViewSet(AuditTrailViewMixin, viewsets.ReadOnlyModelViewSet, ViewPermissionsMixin):
queryset = ItemTransfer.objects.all()
serializer_class = ItemTransferSerializer
search_fields = ('item__name', 'item__identifier', 'barcode',)
filter_fields = ('transfer_complete', 'barcode',)
filter_backends = (SearchFilter, DjangoFilterBackend,
OrderingFilter,)
def get_queryset(self):
return ItemTransfer.objects.filter(transfer_complete=False)
@list_route(methods=['GET'])
def grouped(self, request):
"""
Group transfers under the same barcode e.g. as if they where in plates.
Limit allows to set how many barcodes are fetched.
"""
limit = int(request.query_params.get('limit', 10))
qs = (ItemTransfer.objects.filter(transfer_complete=False)
.distinct('barcode')
.order_by('barcode', '-date_created')[:limit])
barcodes = [i.barcode for i in qs]
transfers = (ItemTransfer.objects.filter(transfer_complete=False, barcode__in=barcodes)
.order_by('barcode', 'coordinates'))
serializer = ItemTransferSerializer(transfers, many=True)
groups = {}
for t in serializer.data:
if t['barcode'] not in groups:
groups[t['barcode']] = []
groups[t['barcode']].append(t)
return Response(groups)
|
StarcoderdataPython
|
3309672
|
<filename>pointnet2/emd.py<gh_stars>10-100
import torch
import emd_cuda
import torch.nn as nn
class EarthMoverDistanceFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, xyz1, xyz2, return_match=False):
xyz1 = xyz1.contiguous()
xyz2 = xyz2.contiguous()
assert xyz1.is_cuda and xyz2.is_cuda, "Only support cuda currently."
match = emd_cuda.approxmatch_forward(xyz1, xyz2)
cost = emd_cuda.matchcost_forward(xyz1, xyz2, match)
n = xyz1.shape[1]
m = xyz2.shape[1]
cost = cost / max(n,m)
ctx.save_for_backward(xyz1, xyz2, match)
if return_match:
return cost, match
else:
return cost
@staticmethod
def backward(ctx, grad_cost):
xyz1, xyz2, match = ctx.saved_tensors
grad_cost = grad_cost.contiguous()
grad_xyz1, grad_xyz2 = emd_cuda.matchcost_backward(grad_cost, xyz1, xyz2, match)
return grad_xyz1, grad_xyz2, None
def earth_mover_distance(xyz1, xyz2, transpose=False, return_match=False):
"""Earth Mover Distance (Approx)
Args:
xyz1 (torch.Tensor): (b, n, 3)
xyz2 (torch.Tensor): (b, m, 3)
transpose (bool): whether to transpose inputs as it might be BCN format.
Extensions only support BNC format.
Returns:
cost (torch.Tensor): (b)
"""
if xyz1.dim() == 2:
xyz1 = xyz1.unsqueeze(0)
if xyz2.dim() == 2:
xyz2 = xyz2.unsqueeze(0)
if transpose:
xyz1 = xyz1.transpose(1, 2)
xyz2 = xyz2.transpose(1, 2)
if return_match:
cost, match = EarthMoverDistanceFunction.apply(xyz1, xyz2, True)
return cost, match
else:
cost = EarthMoverDistanceFunction.apply(xyz1, xyz2, False)
return cost
class EMD_distance(nn.Module):
def forward(self, xyz1, xyz2, transpose=False, return_match=False):
if xyz1.dim() == 2:
xyz1 = xyz1.unsqueeze(0)
if xyz2.dim() == 2:
xyz2 = xyz2.unsqueeze(0)
if transpose:
xyz1 = xyz1.transpose(1, 2)
xyz2 = xyz2.transpose(1, 2)
if return_match:
cost, match = EarthMoverDistanceFunction.apply(xyz1, xyz2, True)
return cost, match
else:
cost = EarthMoverDistanceFunction.apply(xyz1, xyz2, False)
return cost
if __name__ == '__main__':
import pdb
# from pytorch3d.loss.chamfer import chamfer_distance
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1,2'
n = 2048
m = 2048
return_match = False
x = torch.rand(128, n, 3).cuda() # (B,n,3)
y = torch.rand(128, m, 3).cuda() # (B,m,3)
d1 = earth_mover_distance(x,y, transpose=False, return_match=return_match)
emd_module = EMD_distance()
emd_module = nn.DataParallel(emd_module)
d2 = emd_module(x,y, transpose=False, return_match=return_match)
# c_d1,_ = chamfer_distance(x,y, batch_reduction=None)
# c_d1 = c_d1/2
# # in the completion 3d repo, they set groud truth point cloud as y, and the generated point cloud as x
# # m1 is of shape (B,m,n) and
# # if m>n, m1.sum(dim=1) = ones(B,n)
# # if m<n, m1.sum(dim=2) = ones(B,m)
# # if m=n, m1.sum(dim=1) = m1.sum(dim=2) = ones(B,n)
# # assume x has less points than y
# # then for every point in x, we assign weights to every point to y, they sum to 1.
# # The closer a point in y is to x, the larger its weight is
# x_perm = torch.randperm(n)
# x_new = x[:,x_perm,:]
# y_perm = torch.randperm(m)
# y_new = y[:,y_perm,:]
# d1_new, m1_new = earth_mover_distance(x_new,y_new, transpose=False, return_match=return_match)
# # permutation of the order of points in x or y doesn't change the distance much
# d2, m2 = earth_mover_distance(y,x, transpose=False, return_match=return_match)
# # change the order of x and y alter the distance largely
# d3, m3 = earth_mover_distance(x,x, transpose=False, return_match=return_match)
# # ds is indeed close to 0
pdb.set_trace()
|
StarcoderdataPython
|
3455881
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for user.py."""
import mock
from google.appengine.ext import ndb
from upvote.gae import settings
from upvote.gae.datastore import test_utils
from upvote.gae.datastore.models import user as user_models
from upvote.gae.lib.testing import basetest
from upvote.gae.utils import user_utils
from upvote.shared import constants
_TEST_EMAIL = user_utils.UsernameToEmail('testemail')
# Done for the sake of brevity.
USER = constants.USER_ROLE.USER
TRUSTED_USER = constants.USER_ROLE.TRUSTED_USER
ADMINISTRATOR = constants.USER_ROLE.ADMINISTRATOR
class UserTest(basetest.UpvoteTestCase):
"""Test User model."""
def setUp(self):
super(UserTest, self).setUp()
self.PatchEnv(settings.ProdEnv, ENABLE_BIGQUERY_STREAMING=True)
def testGetOrInsertAsync_ExistingUser(self):
user = user_models.User(id=_TEST_EMAIL)
user.put()
self.assertEntityCount(user_models.User, 1)
future = user_models.User.GetOrInsertAsync(email_addr=_TEST_EMAIL)
user = future.get_result()
self.assertIsNotNone(user)
self.assertEntityCount(user_models.User, 1)
self.assertNoBigQueryInsertions()
def testGetOrInsertAsync_NewUser(self):
self.assertEntityCount(user_models.User, 0)
future = user_models.User.GetOrInsertAsync(email_addr=_TEST_EMAIL)
user = future.get_result()
self.assertIsNotNone(user)
self.assertEntityCount(user_models.User, 1)
self.assertListEqual([constants.USER_ROLE.USER], user.roles)
self.assertSetEqual(constants.PERMISSIONS.SET_USER, user.permissions)
self.assertEqual(_TEST_EMAIL, user.email)
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.USER)
def testGetOrInsert_ExistingUser_EmailAddr(self):
user = user_models.User(id=_TEST_EMAIL)
user.put()
self.assertEntityCount(user_models.User, 1)
user = user_models.User.GetOrInsert(email_addr=_TEST_EMAIL)
self.assertIsNotNone(user)
self.assertEntityCount(user_models.User, 1)
self.assertNoBigQueryInsertions()
def testGetOrInsert_ExistingUser_AppEngineUser(self):
user = user_models.User(id=_TEST_EMAIL)
user.put()
self.assertEntityCount(user_models.User, 1)
appengine_user = test_utils.CreateAppEngineUser(email=_TEST_EMAIL)
user = user_models.User.GetOrInsert(appengine_user=appengine_user)
self.assertIsNotNone(user)
self.assertEntityCount(user_models.User, 1)
self.assertNoBigQueryInsertions()
def testGetOrInsert_ExistingUser_NoOverwrite(self):
user = user_models.User(id=_TEST_EMAIL)
user.put()
self.assertEntityCount(user_models.User, 1)
old_recorded_dt = user.recorded_dt
user = user_models.User.GetOrInsert(email_addr=_TEST_EMAIL)
new_recorded_dt = user.recorded_dt
self.assertIsNotNone(user)
self.assertEntityCount(user_models.User, 1)
self.assertEqual(old_recorded_dt, new_recorded_dt)
self.assertNoBigQueryInsertions()
def testGetOrInsert_NewUser_EmailAddr(self):
self.assertEntityCount(user_models.User, 0)
user = user_models.User.GetOrInsert(email_addr=_TEST_EMAIL)
self.assertIsNotNone(user)
self.assertEntityCount(user_models.User, 1)
self.assertListEqual([constants.USER_ROLE.USER], user.roles)
self.assertSetEqual(constants.PERMISSIONS.SET_USER, user.permissions)
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.USER)
def testGetOrInsert_NewUser_EmailAddr_Lowercase(self):
user = user_models.User.GetOrInsert(email_addr='<EMAIL>')
self.assertIsNotNone(user)
self.assertEqual('<EMAIL>', user.email)
self.assertEqual('upper', user.nickname)
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.USER)
def testGetOrInsert_NewUser_AppEngineUser(self):
self.assertEntityCount(user_models.User, 0)
appengine_user = test_utils.CreateAppEngineUser(email=_TEST_EMAIL)
user = user_models.User.GetOrInsert(appengine_user=appengine_user)
self.assertIsNotNone(user)
self.assertEntityCount(user_models.User, 1)
self.assertListEqual([constants.USER_ROLE.USER], user.roles)
self.assertSetEqual(constants.PERMISSIONS.SET_USER, user.permissions)
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.USER)
def testGetOrInsert_NewUser_ProperlyInitialized(self):
self.assertEntityCount(user_models.User, 0)
user = user_models.User.GetOrInsert(email_addr=_TEST_EMAIL)
self.assertIsNotNone(user)
self.assertEntityCount(user_models.User, 1)
self.assertEqual(_TEST_EMAIL, user.email)
self.assertIsNotNone(user.recorded_dt)
self.assertEqual(
settings.VOTING_WEIGHTS[constants.USER_ROLE.USER], user.vote_weight)
self.assertListEqual([constants.USER_ROLE.USER], user.roles)
self.assertIsNone(user.last_vote_dt)
self.assertGreaterEqual(user.rollout_group, 0)
self.assertSetEqual(constants.PERMISSIONS.SET_USER, user.permissions)
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.USER)
def testGetOrInsert_UnknownUserError(self):
self.Patch(user_models.users, 'get_current_user', return_value=None)
with self.assertRaises(user_models.UnknownUserError):
user_models.User.GetOrInsert()
@mock.patch.object(user_models.mail_utils, 'Send')
def testSetRoles_RemoveAll(self, mock_send):
with self.LoggedInUser() as user:
self.assertListEqual([constants.USER_ROLE.USER], user.roles)
email_addr = user.email
with self.assertRaises(user_models.NoRolesError):
user_models.User.SetRoles(email_addr, [])
user = user_models.User.GetOrInsert(email_addr=email_addr)
self.assertListEqual([constants.USER_ROLE.USER], user.roles)
mock_send.assert_not_called()
self.assertNoBigQueryInsertions()
@mock.patch.object(user_models.mail_utils, 'Send')
def testSetRoles_InvalidUserRole(self, mock_send):
with self.LoggedInUser() as user:
with self.assertRaises(user_models.InvalidUserRoleError):
user_models.User.SetRoles(user.email, ['INVALID_ROLE'])
mock_send.assert_not_called()
@mock.patch.object(user_models.mail_utils, 'Send')
def testSetRoles_NoChanges(self, mock_send):
with self.LoggedInUser() as user:
self.assertListEqual([constants.USER_ROLE.USER], user.roles)
old_vote_weight = user.vote_weight
user_models.User.SetRoles(user.email, [constants.USER_ROLE.USER])
user = user_models.User.GetOrInsert(email_addr=user.email)
self.assertListEqual([constants.USER_ROLE.USER], user.roles)
self.assertEqual(user.vote_weight, old_vote_weight)
mock_send.assert_not_called()
self.assertNoBigQueryInsertions()
@mock.patch.object(user_models.mail_utils, 'Send')
def testSetRoles_AddRole(self, mock_send):
with self.LoggedInUser() as user:
self.assertListEqual([constants.USER_ROLE.USER], user.roles)
self.assertEqual(
settings.VOTING_WEIGHTS[constants.USER_ROLE.USER], user.vote_weight)
new_roles = [constants.USER_ROLE.SUPERUSER, constants.USER_ROLE.USER]
user_models.User.SetRoles(user.email, new_roles)
user = user_models.User.GetOrInsert(email_addr=user.email)
self.assertListEqual(new_roles, user.roles)
self.assertEqual(
settings.VOTING_WEIGHTS[constants.USER_ROLE.SUPERUSER],
user.vote_weight)
mock_send.assert_called_once()
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.USER)
@mock.patch.object(user_models.mail_utils, 'Send')
def testSetRoles_RemoveRole(self, mock_send):
old_roles = [constants.USER_ROLE.SUPERUSER, constants.USER_ROLE.USER]
user = test_utils.CreateUser(email=_TEST_EMAIL, roles=old_roles)
self.assertEqual(
settings.VOTING_WEIGHTS[constants.USER_ROLE.SUPERUSER],
user.vote_weight)
mock_send.reset_mock()
new_roles = [constants.USER_ROLE.USER]
user_models.User.SetRoles(_TEST_EMAIL, new_roles)
user = user_models.User.GetOrInsert(email_addr=_TEST_EMAIL)
self.assertListEqual(new_roles, user.roles)
self.assertEqual(
settings.VOTING_WEIGHTS[constants.USER_ROLE.USER],
user.vote_weight)
mock_send.assert_called_once()
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.USER)
@mock.patch.object(user_models.mail_utils, 'Send')
def testUpdateRoles_AddRole(self, mock_send):
with self.LoggedInUser() as user:
self.assertListEqual([constants.USER_ROLE.USER], user.roles)
self.assertEqual(
settings.VOTING_WEIGHTS[constants.USER_ROLE.USER], user.vote_weight)
user_models.User.UpdateRoles(
user.email, add=[constants.USER_ROLE.SUPERUSER])
user = user_models.User.GetOrInsert(email_addr=user.email)
self.assertListEqual(
[constants.USER_ROLE.SUPERUSER, constants.USER_ROLE.USER], user.roles)
self.assertEqual(
settings.VOTING_WEIGHTS[constants.USER_ROLE.SUPERUSER],
user.vote_weight)
mock_send.assert_called_once()
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.USER)
@mock.patch.object(user_models.mail_utils, 'Send')
def testUpdateRoles_RemoveRole(self, mock_send):
old_roles = [constants.USER_ROLE.SUPERUSER, constants.USER_ROLE.USER]
user = test_utils.CreateUser(email=_TEST_EMAIL, roles=old_roles)
with self.LoggedInUser(user=user):
self.assertEqual(
settings.VOTING_WEIGHTS[constants.USER_ROLE.SUPERUSER],
user.vote_weight)
mock_send.reset_mock()
user_models.User.UpdateRoles(
_TEST_EMAIL, remove=[constants.USER_ROLE.SUPERUSER])
user = user_models.User.GetOrInsert(email_addr=_TEST_EMAIL)
self.assertListEqual([constants.USER_ROLE.USER], user.roles)
self.assertEqual(
settings.VOTING_WEIGHTS[constants.USER_ROLE.USER], user.vote_weight)
mock_send.assert_called_once()
self.assertBigQueryInsertion(constants.BIGQUERY_TABLE.USER)
def testHighestRole_Default(self):
user = test_utils.CreateUser()
self.assertEqual(constants.USER_ROLE.USER, user.highest_role)
def testHighestRole_Administrator(self):
roles = [
constants.USER_ROLE.USER,
constants.USER_ROLE.TRUSTED_USER,
constants.USER_ROLE.ADMINISTRATOR]
user = test_utils.CreateUser(roles=roles)
self.assertEqual(constants.USER_ROLE.ADMINISTRATOR, user.highest_role)
def testHighestRole_NoRolesError(self):
user = test_utils.CreateUser()
user.roles = []
user.put()
with self.assertRaises(user_models.NoRolesError):
user.highest_role # pylint: disable=pointless-statement
def testIsAdmin_Nope(self):
lowly_peon = test_utils.CreateUser(roles=[constants.USER_ROLE.USER])
self.assertFalse(lowly_peon.is_admin)
def testIsAdmin_HasAdminRole(self):
fancy_admin = test_utils.CreateUser(
roles=[constants.USER_ROLE.ADMINISTRATOR])
self.assertTrue(fancy_admin.is_admin)
def testIsAdmin_IsFailsafe(self):
self.PatchSetting('FAILSAFE_ADMINISTRATORS', [_TEST_EMAIL])
mr_failsafe = test_utils.CreateUser(
email=_TEST_EMAIL, roles=[constants.USER_ROLE.USER])
self.assertTrue(mr_failsafe.is_admin)
def testPermissions_Admin(self):
admin = test_utils.CreateUser(admin=True)
self.assertSetEqual(constants.PERMISSIONS.SET_ALL, admin.permissions)
def testPermissions_User(self):
user = test_utils.CreateUser()
self.assertSetEqual(constants.PERMISSIONS.SET_USER, user.permissions)
def testHasPermission(self):
user = test_utils.CreateUser()
self.assertTrue(user.HasPermission(constants.PERMISSIONS.VOTE))
self.assertFalse(user.HasPermission(constants.PERMISSIONS.EDIT_ALERTS))
def testLacksPermission(self):
user = test_utils.CreateUser()
self.assertFalse(user.LacksPermission(constants.PERMISSIONS.VOTE))
self.assertTrue(user.LacksPermission(constants.PERMISSIONS.EDIT_ALERTS))
if __name__ == '__main__':
basetest.main()
|
StarcoderdataPython
|
3597292
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# flake8: noqa
__doc__ = """
This sample demonstrate the coupling of aqueous equilibrium equations generated from pyequion
with the simulation using the DAETOOLs framework.
This sample was based on the daetools's tutorial: `tutorial_adv_2.py`
"""
import sys
import os
from time import localtime, strftime
import numpy as np
from daetools.pyDAE import *
from daetools.solvers.trilinos import pyTrilinos
# from res_nahco3_cacl2_reduced_T_2 import res as res_speciation
from res_nahco3_cacl2_T_2 import res as res_speciation
import pyequion
import aux_create_dynsim
# import daetools_cm
# import pyequion
# Standard variable types are defined in variable_types.py
from pyUnits import J, K, g, kg, kmol, m, mol, s, um
no_small_positive_t = daeVariableType(
"no_small_positive_t", dimless, 0.0, 1.0, 0.1, 1e-5
)
eq_idx_regular = {
"size": 14,
"Na+": 0,
"HCO3-": 1,
"Ca++": 2,
"OH-": 3,
"H+": 4,
"CO2": 5,
"CaOH+": 6,
"NaOH": 7,
"NaHCO3": 8,
"CO3--": 9,
"CaCO3": 10,
"NaCO3-": 11,
"Na2CO3": 12,
"CaHCO3+": 13,
"H2O": 14,
"Cl-": 15,
}
eq_idx_reduced2 = {
"size": 11, # REDUCEC
"Na+": 0,
"HCO3-": 1,
"Ca++": 2,
"OH-": 3,
"H+": 4,
"CO2": 5,
"NaHCO3": 6,
"CO3--": 7,
"CaCO3": 8,
"Na2CO3": 9, # WRONG, fixing
"CaHCO3+": 10,
"H2O": 11,
"Cl-": 12,
}
# eq_idx = eq_idx_reduced2
eq_idx = eq_idx_regular
iNa = 0
iC = 1
iCa = 2
iCl = 3
sys_eq_aux = aux_create_dynsim.get_caco3_nahco3_equilibrium()
less_factor = 0.1
CONCS = np.array(
[
0.5 * less_factor * 150.0e-3,
0.5 * less_factor * 150.0e-3,
0.5 * less_factor * 50.0e-3,
0.5 * less_factor * 2 * 50.0e-3,
]
)
comps_aux = {
"Na+": CONCS[0] * 1e3,
"HCO3-": CONCS[1] * 1e3,
"Ca++": CONCS[2] * 1e3,
"Cl-": CONCS[3] * 1e3,
}
solution_aux = pyequion.solve_solution(comps_aux, sys_eq_aux)
guess_speciation = 10 ** (solution_aux.x)
# guess_speciation = np.array([7.33051995e-02, 6.43325906e-02, 1.82951191e-02, 7.76126264e-07,
# 2.23776785e-08, 1.90732600e-03, 8.07985599e-08, 2.03684279e-08,
# 1.51672652e-03, 3.62336563e-04, 1.36791361e-03, 1.74388571e-04,
# 1.83235834e-06, 5.33688654e-03])
sol_eq_full = pyequion.solve_solution(comps_aux)
rhoc = 2.709997e3 # kg/m3
kv = 1.0
MW_C = 12.0107
MW_Na = 22.98977
MW_Ca = 40.078
MW_CaCO3 = 100.0869
MW_Cl = 35.453
f_cryst_mol = [0, 1, 1, 0]
def calc_B(S):
# Verdoes 92
Eb = 12.8
Ks = 1.4e18
in_exp = -Eb / ((np.log(S)) ** 2)
# if in_exp < -300.0:
# return 0.0
B = Ks * S * np.exp(in_exp)
# B *= (self.V*1e-6) * 60.0 #to #/min
return B * 1e-3 # : #/m^3/s -> #/kg/s
def calc_G(S):
# Verdoes 92
# Kg = 2.4e-12 #m/s
Kg = 5.6e-10 # ref 22 in Verdoes
g = 1.8
G = Kg * (S - 1.0) ** g
return G
class modelCaCO3Precip(daeModel):
def __init__(self, Name, Parent=None, Description=""):
daeModel.__init__(self, Name, Parent, Description)
self.Nmoments = daeDomain(
"Nmoments", self, dimless, "Number of Moments"
)
self.mus = daeVariable(
"mus",
no_t,
self,
"Particle Moment concentrations",
[self.Nmoments],
)
self.lmin = daeParameter("lmin", dimless, self, "Minimal size")
self.G = daeVariable("G", no_t, self, "Growth rate")
self.B0 = daeVariable("B0", no_t, self, "Nucleation rate")
self.NSpecies = daeDomain(
"NSpecies", self, dimless, "Number of Species"
)
self.NElementTotal = daeDomain(
"NElementTotal", self, dimless, "Number of Elements"
)
# self.conc_species = daeVariable("conc_species", no_t, self, "Conc Species", [self.NSpecies])
self.x_species = daeVariable(
"x_species", no_t, self, "Conc Species", [self.NSpecies]
)
self.conc_element_total = daeVariable(
"conc_element_total",
no_t,
self,
"Conc Element Total",
[self.NElementTotal],
)
# self.iap = daeVariable("iap", no_t, self, "Ionic Activity Product")
self.S = daeVariable("S", no_t, self, "Supersaturation")
self.aCapp = daeVariable("aCapp", no_t, self, "Ca++ Activity")
self.aCO3mm = daeVariable("aCO3mm", no_t, self, "CO3-- Activity")
self.pH = daeVariable("pH", no_t, self, "pH")
self.massConcCrystalBulk = daeVariable(
"massConcCrystalBulk", no_t, self, "massConcCrystalBulk"
)
self.L10 = daeVariable("L10", no_t, self, "L10")
self.TK = daeVariable("TK", no_t, self, "T")
def DeclareEquations(self):
daeModel.DeclareEquations(self)
n_species = self.NSpecies.NumberOfPoints
n_elements = self.NElementTotal.NumberOfPoints
cNa = self.conc_element_total(iNa)
cC = self.conc_element_total(iC)
cCa = self.conc_element_total(iCa)
cCl = self.conc_element_total(iCl)
C = [cNa, cC, cCa, cCl]
kappa = 1e-6 # Constant(1e-6 * m) #dimensionless auxiliary param
mu0_ref = 1e20 # dimensionless auxiliary param
# mu_refs = [mu0_ref*kappa**k for k in range(0,4)]
"Element Conservation"
rhoc_mol = rhoc / (MW_CaCO3 * 1e-3) # kg/m^3 -> mol/m^3
for j in range(0, n_elements):
eq = self.CreateEquation("EleConservation({})".format(j), "")
mu2_dim = self.mus(2) * mu0_ref * kappa ** 2
G_dim = self.G() * kappa
eq.Residual = dt(C[j]) - (
-3.0 * f_cryst_mol[j] * kv * rhoc_mol * mu2_dim * G_dim
)
eq.CheckUnitsConsistency = False
"Moment Balance"
eq = self.CreateEquation("Moment({})".format(0), "")
eq.Residual = dt(self.mus(0)) - self.B0()
eq.CheckUnitsConsistency = False
for j in range(1, 4):
eq = self.CreateEquation("Moment({})".format(j), "")
eq.Residual = dt(self.mus(j)) - (
j * self.G() * self.mus(j - 1) # + self.B0()*self.lmin()**j
)
eq.CheckUnitsConsistency = False
"Nucleation Rate"
# S = 2.0 #TO DO.
S = self.S()
eq = self.CreateEquation("NucleationRate", "")
# eq.Residual = self.B0() - calc_B(S)*1e-8
B0_calc = calc_B(S)
eq.Residual = self.B0() - (B0_calc / mu0_ref)
"Growth Rate"
eq = self.CreateEquation("GrowthRate", "")
# eq.Residual = self.G() - calc_G(S)
eq.Residual = self.G() - (calc_G(S) / kappa)
# "Temperature"
# eq = self.CreateEquation("TK", "")
# eq.Residual = self.TK() - (25.0 + 273.15)
"Chemical Speciation"
args_speciation = ([cNa, cC, cCa, cCl], self.TK(), np.nan)
x = [self.x_species(j) for j in range(n_species)]
res_species = res_speciation(x, args_speciation)
for i_sp in range(0, n_species):
eq = self.CreateEquation("c_species({})".format(i_sp))
eq.Residual = res_species[i_sp]
molal_species = [np.log10(self.x_species(j)) for j in range(n_species)]
comps = {
"Na+": cNa,
"HCO3-": cC,
"Ca++": cCa,
"Cl-": cCl,
}
solution = pyequion.get_solution_from_x(sys_eq_aux, x, comps)
"pH"
eq = self.CreateEquation("pH", "")
eq.Residual = self.pH() - solution.pH
"Supersaturation"
aCapp = pyequion.get_activity(solution, "Ca++")
aCO3mm = pyequion.get_activity(solution, "CO3--")
Ksp = 10 ** (solution.log_K_solubility["Calcite"])
iap = solution.ionic_activity_prod["Calcite"]
eq = self.CreateEquation("aCapp", "")
eq.Residual = self.aCapp() - aCapp
eq = self.CreateEquation("aCapp", "")
eq.Residual = self.aCO3mm() - aCO3mm
eq = self.CreateEquation("S", "")
eq.Residual = self.S() - Sqrt(aCapp * aCO3mm / Ksp)
"Mass of Crystal in Bulk"
eq = self.CreateEquation("massConcCrystlBulk", "")
eq.Residual = (
self.massConcCrystalBulk()
- (self.mus(3) * kappa ** 3 * mu0_ref) * rhoc * kv
)
eq = self.CreateEquation("L10", "")
eq.Residual = self.L10() - (self.mus(1) * kappa) / (
self.mus(0) + 1e-20
)
"Disturbing Temperature"
self.IF(Time() < Constant(10 * 600 * s), eventTolerance=1e-5)
eq = self.CreateEquation("TKIni", "")
eq.Residual = self.TK() - (25.0 + 273.15)
self.ELSE()
eq = self.CreateEquation("TKMod", "")
eq.Residual = self.TK() - (50.0 + 273.15)
self.END_IF()
pass
class simTutorial(daeSimulation):
def __init__(self):
daeSimulation.__init__(self)
self.m = modelCaCO3Precip("modelCaCO3Precip")
def SetUpParametersAndDomains(self):
self.m.Nmoments.CreateArray(4)
self.m.NSpecies.CreateArray(eq_idx["size"])
self.m.NElementTotal.CreateArray(4)
self.m.lmin.SetValue(0.0)
pass
def SetUpVariables(self):
nMus = self.m.Nmoments.NumberOfPoints
self.m.mus.SetInitialConditions(np.zeros(nMus))
for i_sp in range(0, self.m.NSpecies.NumberOfPoints):
# self.m.conc_species.SetInitialGuess(i_sp, guess_speciation[i_sp])
self.m.x_species.SetInitialGuess(i_sp, solution_aux.x[i_sp])
self.m.conc_element_total.SetInitialConditions(CONCS)
# self.m.iap.SetInitialGuess(1e-6)
guess_aCapp = pyequion.get_activity(solution_aux, "Ca++")
guess_aCO3mm = pyequion.get_activity(solution_aux, "CO3--")
S = (
solution_aux.ionic_activity_prod["Calcite"]
/ 10 ** (solution_aux.log_K_solubility["Calcite"])
) ** 0.5
self.m.pH.SetInitialGuess(solution_aux.pH)
self.m.aCapp.SetInitialGuess(guess_aCapp)
self.m.aCO3mm.SetInitialGuess(guess_aCO3mm)
self.m.S.SetInitialGuess(S)
self.m.TK.SetInitialGuess(25.0 + 273.15)
pass
def run(**kwargs):
simulation = simTutorial()
print(
"Supported Trilinos solvers: %s"
% pyTrilinos.daeTrilinosSupportedSolvers()
)
# lasolver = pyTrilinos.daeCreateTrilinosSolver("Amesos_Klu", "")
lasolver = pyTrilinos.daeCreateTrilinosSolver("Amesos_Umfpack", "")
# lasolver = pyTrilinos.daeCreateTrilinosSolver("Amesos_Lapack", "")
# lasolver = pyTrilinos.daeCreateTrilinosSolver("'AztecOO_ML", "")
return daeActivity.simulate(
simulation,
reportingInterval=1,
timeHorizon=20 * 60,
lasolver=lasolver,
# calculateSensitivities=True,
**kwargs
)
if __name__ == "__main__":
guiRun = (
False if (len(sys.argv) > 1 and sys.argv[1] == "console") else True
)
# run(guiRun = guiRun)
run(guiRun=False)
|
StarcoderdataPython
|
9768677
|
<filename>ssc_lcd.py
import RPi.GPIO as GPIO
import time
def lcd_init(config):
# Initialise display
lcd_byte(config,0x33,config.getboolean('lcd', 'LCD_CMD')) # 110011 Initialise
lcd_byte(config,0x32,config.getboolean('lcd', 'LCD_CMD')) # 110010 Initialise
lcd_byte(config,0x06,config.getboolean('lcd', 'LCD_CMD')) # 000110 Cursor move direction
lcd_byte(config,0x0C,config.getboolean('lcd', 'LCD_CMD')) # 001100 Display On,Cursor Off, Blink Off
lcd_byte(config,0x28,config.getboolean('lcd', 'LCD_CMD')) # 101000 Data length, number of lines, font size
lcd_byte(config,0x01,config.getboolean('lcd', 'LCD_CMD')) # 000001 Clear display
time.sleep(config.getfloat('lcd', 'E_DELAY'))
def lcd_byte(config,bits, mode):
# Send byte to data pins
# bits = data
# mode = True for character
# False for command
GPIO.output(config.getint('lcd', 'LCD_RS'), mode) # RS
# High bits
GPIO.output(config.getint('lcd', 'LCD_D4'), False)
GPIO.output(config.getint('lcd', 'LCD_D5'), False)
GPIO.output(config.getint('lcd', 'LCD_D6'), False)
GPIO.output(config.getint('lcd', 'LCD_D7'), False)
if bits&0x10==0x10:
GPIO.output(config.getint('lcd', 'LCD_D4'), True)
if bits&0x20==0x20:
GPIO.output(config.getint('lcd', 'LCD_D5'), True)
if bits&0x40==0x40:
GPIO.output(config.getint('lcd', 'LCD_D6'), True)
if bits&0x80==0x80:
GPIO.output(config.getint('lcd', 'LCD_D7'), True)
# Toggle 'Enable' pin
lcd_toggle_enable(config)
# Low bits
GPIO.output(config.getint('lcd', 'LCD_D4'), False)
GPIO.output(config.getint('lcd', 'LCD_D5'), False)
GPIO.output(config.getint('lcd', 'LCD_D6'), False)
GPIO.output(config.getint('lcd', 'LCD_D7'), False)
if bits&0x01==0x01:
GPIO.output(config.getint('lcd', 'LCD_D4'), True)
if bits&0x02==0x02:
GPIO.output(config.getint('lcd', 'LCD_D5'), True)
if bits&0x04==0x04:
GPIO.output(config.getint('lcd', 'LCD_D6'), True)
if bits&0x08==0x08:
GPIO.output(config.getint('lcd', 'LCD_D7'), True)
# Toggle 'Enable' pin
lcd_toggle_enable(config)
def lcd_toggle_enable(config):
# Toggle enable
time.sleep(config.getfloat('lcd', 'E_DELAY'))
GPIO.output(config.getint('lcd', 'LCD_E'), True)
time.sleep(config.getfloat('lcd', 'E_PULSE'))
GPIO.output(config.getint('lcd', 'LCD_E'), False)
time.sleep(config.getfloat('lcd', 'E_DELAY'))
def lcd_string(config,message,line):
# Send string to display
message = message.ljust(config.getint('lcd', 'LCD_WIDTH')," ")
lcd_byte(config,line, config.getboolean('lcd', 'LCD_CMD'))
for i in range(config.getint('lcd', 'LCD_WIDTH')):
lcd_byte(config,ord(message[i]),config.getboolean('lcd', 'LCD_CHR'))
|
StarcoderdataPython
|
366495
|
import torch
import numbers
import numpy as np
from typing import Optional
def get_Rbasis(
system: str,
modification: Optional[str],
a: Optional[float],
b: Optional[float],
c: Optional[float],
alpha: Optional[float],
beta: Optional[float],
gamma: Optional[float],
) -> torch.Tensor:
"""Create lattice vectors from lattice system and modification"""
def check_needed(**kwargs):
"""Check if all needed arguments are provided"""
for key, value in kwargs.items():
if value is None:
raise KeyError(system + " lattice system requires parameter " + key)
if not isinstance(value, numbers.Number):
raise TypeError("Lattice paramater " + key + " must be numeric")
if value <= 0.0:
raise ValueError("Lattice paramater " + key + " must be > 0")
def check_spurious(**kwargs):
"""Check if any spurious arguments are provided"""
for key, value in kwargs.items():
if value is not None:
raise KeyError(
system + " lattice system does not require" " parameter " + key
)
def check_modification(allowed_systems):
"""Check compatibility of modification with lattice system"""
if system not in allowed_systems:
raise KeyError(
modification + " modification not allowed for " + system + " lattices"
)
# Check inputs and get a, b, c, alpha, beta, gamma for all cases:
assert isinstance(system, str)
system = system.lower()
if system == "triclinic":
check_needed(a=a, b=b, c=c, alpha=alpha, beta=beta, gamma=gamma)
elif system == "monoclinic":
check_needed(a=a, b=b, c=c, beta=beta)
check_spurious(alpha=alpha, gamma=gamma)
alpha = gamma = 90.0
elif system == "orthorhombic":
check_needed(a=a, b=b, c=c)
check_spurious(alpha=alpha, beta=beta, gamma=gamma)
alpha = beta = gamma = 90.0
elif system == "tetragonal":
check_needed(a=a, c=c)
check_spurious(b=b, alpha=alpha, beta=beta, gamma=gamma)
b = a
alpha = beta = gamma = 90.0
elif system == "rhombohedral":
check_needed(a=a, alpha=alpha)
check_spurious(b=b, c=c, beta=beta, gamma=gamma)
b = c = a
beta = gamma = alpha
elif system == "hexagonal":
check_needed(a=a, c=c)
check_spurious(b=b, alpha=alpha, beta=beta, gamma=gamma)
b = a
alpha = beta = 90.0
gamma = 120.0
elif system == "cubic":
check_needed(a=a)
check_spurious(b=b, c=c, alpha=alpha, beta=beta, gamma=gamma)
b = c = a
alpha = beta = gamma = 90.0
else:
raise KeyError("Unknown lattice system: " + system)
# Confirm that all geometry parameters are now available:
assert isinstance(a, numbers.Number)
assert isinstance(b, numbers.Number)
assert isinstance(c, numbers.Number)
assert isinstance(alpha, numbers.Number)
assert isinstance(beta, numbers.Number)
assert isinstance(gamma, numbers.Number)
# Compute base lattice vectors:
cos_alpha = np.cos(np.deg2rad(alpha))
cos_beta = np.cos(np.deg2rad(beta))
cos_gamma = np.cos(np.deg2rad(gamma))
sin_gamma = np.sin(np.deg2rad(gamma))
v0 = np.array((1.0, 0, 0))
v1 = np.array((cos_gamma, sin_gamma, 0))
v2 = np.array((cos_beta, (cos_alpha - cos_beta * cos_gamma) / sin_gamma, 0))
v2[2] = np.sqrt(1 - (v2 ** 2).sum())
Rbasis = torch.tensor(np.array([a * v0, b * v1, c * v2]).T)
# Apply modifications if any:
if modification is None:
M = torch.eye(3) # transformation from base lattice
else:
assert isinstance(modification, str)
modification = modification.lower()
if modification == "body-centered":
check_modification(["orthorhombic", "tetragonal", "cubic"])
M = 0.5 * torch.tensor([[-1, 1, 1], [1, -1, 1], [1, 1, -1]])
elif modification == "base-centered":
check_modification(["monoclinic"])
M = 0.5 * torch.tensor([[1, -1, 0], [1, 1, 0], [0, 0, 2]])
elif modification == "face-centered":
check_modification(["orthorhombic", "cubic"])
M = 0.5 * torch.tensor([[0, 1, 1], [1, 0, 1], [1, 1, 0]])
else:
raise KeyError("Unknown lattice modification: " + modification)
return Rbasis @ M
|
StarcoderdataPython
|
120036
|
<filename>bot.py
# initiating bot
import os
import discord
from dotenv import load_dotenv
from discord.ext import commands
from f.alive import keep_alive
import random
# get .env secrets
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
OWNERS = os.getenv('OWNERS').split(", ")
# import cogs
initial_extensions = [
"jishaku",
"cogs.ai"
]
# rich presence
activity = discord.Activity(
name="100 commits to GitHub? Sure, why not? Meanwhile this bot commits suicide from bugs.",
type=discord.ActivityType.playing
)
# Discord Intents settings
intents = discord.Intents.default()
intents.members = True
intents.presences = True
bot = commands.Bot(
help_command=None,
command_prefix=["c:", "c: "],
activity=activity,
status=discord.Status.online,
afk=False,
intents=intents,
strip_after_prefix=True,
owner_ids=OWNERS,
case_insensitive=True
)
# load extensions
if __name__ == '__main__':
for extension in initial_extensions:
bot.load_extension(extension)
print(f"📥 {extension}")
@bot.event
async def on_ready():
# print the bot's status
randcode = random.randint(1000000, 9999999)
print(f'{bot.user} has connected to Discord! [{randcode}]')
print(f'Successfully logged in and booted...!')
@bot.command(
name="help",
)
async def help(ctx):
await ctx.send(
embed=discord.Embed(
title="How to use this bot",
description="https://github.com/writeblankspace/aichatbot/wiki",
)
)
# run the bot
keep_alive()
bot.run(TOKEN)
|
StarcoderdataPython
|
5062635
|
<reponame>aaroncgw/csci-e88-final-project
import os
import json
from dateutil import parser
from http.client import IncompleteRead
import tweepy
import pykafka
import pandas as pd
from elasticsearch import Elasticsearch
class TweetStreamListener(tweepy.StreamListener):
def __init__(self):
self.client = pykafka.KafkaClient("localhost:9092")
self.kafka_topic = "TweetStreamListener"
self.producer = self.client.topics[
bytes(self.kafka_topic, "ascii")
].get_producer()
self.es = Elasticsearch(
"https://db2cb7cbe8834bb1a48f960a437f461d.us-east-1.aws.found.io:9243",
http_auth=(os.environ["ELASTIC_USERNAME"], os.environ["ELASTIC_PASSWORD"]),
)
print("Connecting...")
def on_connect(self):
print("Connected!")
def on_data(self, data):
try:
tweet = json.loads(data)
tweet["timestamp"] = parser.parse(tweet["created_at"]).strftime(
"%Y-%m-%dT%H:%M:%S.%f%z"
)
self.es.index(
index=self.kafka_topic.lower(), body=tweet
) # send raw data to elasticsearch directly
if "extended_tweet" in tweet:
tweet_text = tweet["extended_tweet"]["full_text"]
else:
tweet_text = tweet["text"]
if tweet_text:
send_data = "{}"
json_send_data = json.loads(send_data)
json_send_data["tweet"] = tweet_text.replace(",", "")
json_send_data["created_at"] = tweet["created_at"]
self.producer.produce(bytes(json.dumps(json_send_data), "ascii"))
print("Published to Kafka: " + str(json_send_data))
except Exception as e:
print("Exception: " + str(e))
print(data)
def on_error(self, status_code):
if status_code == 420:
return False # 420 error occurs when rate limit exceeds
print("Error received in TweetStreamListener: " + str(status_code))
return True # Don't kill the stream
def on_timeout(self):
print("TweetStreamListener timeout!")
return True # Don't kill the stream
def main():
LOCAL_ROOT = os.path.abspath("data") + os.sep
df = pd.read_csv(LOCAL_ROOT + "twitter_userid.csv")
user_ids = (
df["id"].apply(str).to_list()
) # tweepy could only filter tweets by user ids rather than usernames
auth = tweepy.OAuthHandler(os.environ["KEY"], os.environ["KEY_SECRET"])
auth.set_access_token(os.environ["TOKEN"], os.environ["TOKEN_SECRET"])
api = tweepy.API(auth)
while True:
try:
tweetStreamListener = tweepy.Stream(
auth=api.auth, listener=TweetStreamListener()
)
# tweetStreamListener.filter(languages=['en'], track=['bitcoin']) #fitler by keywords
tweetStreamListener.filter(
follow=user_ids[:50], languages=["en"]
) # fitler by user ids
except IncompleteRead:
# Oh well, reconnect and keep trucking
continue
except KeyboardInterrupt:
# Or however you want to exit this loop
tweetStreamListener.disconnect()
break
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
126719
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.tapi_photonic_media_frequency_constraint import TapiPhotonicMediaFrequencyConstraint # noqa: F401,E501
from tapi_server import util
class TapiPhotonicMediaSpectrumBand(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, lower_frequency=None, upper_frequency=None, frequency_constraint=None): # noqa: E501
"""TapiPhotonicMediaSpectrumBand - a model defined in OpenAPI
:param lower_frequency: The lower_frequency of this TapiPhotonicMediaSpectrumBand. # noqa: E501
:type lower_frequency: int
:param upper_frequency: The upper_frequency of this TapiPhotonicMediaSpectrumBand. # noqa: E501
:type upper_frequency: int
:param frequency_constraint: The frequency_constraint of this TapiPhotonicMediaSpectrumBand. # noqa: E501
:type frequency_constraint: TapiPhotonicMediaFrequencyConstraint
"""
self.openapi_types = {
'lower_frequency': int,
'upper_frequency': int,
'frequency_constraint': TapiPhotonicMediaFrequencyConstraint
}
self.attribute_map = {
'lower_frequency': 'lower-frequency',
'upper_frequency': 'upper-frequency',
'frequency_constraint': 'frequency-constraint'
}
self._lower_frequency = lower_frequency
self._upper_frequency = upper_frequency
self._frequency_constraint = frequency_constraint
@classmethod
def from_dict(cls, dikt) -> 'TapiPhotonicMediaSpectrumBand':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.photonic.media.SpectrumBand of this TapiPhotonicMediaSpectrumBand. # noqa: E501
:rtype: TapiPhotonicMediaSpectrumBand
"""
return util.deserialize_model(dikt, cls)
@property
def lower_frequency(self):
"""Gets the lower_frequency of this TapiPhotonicMediaSpectrumBand.
The lower frequency bound of the media channel spectrum specified in MHz # noqa: E501
:return: The lower_frequency of this TapiPhotonicMediaSpectrumBand.
:rtype: int
"""
return self._lower_frequency
@lower_frequency.setter
def lower_frequency(self, lower_frequency):
"""Sets the lower_frequency of this TapiPhotonicMediaSpectrumBand.
The lower frequency bound of the media channel spectrum specified in MHz # noqa: E501
:param lower_frequency: The lower_frequency of this TapiPhotonicMediaSpectrumBand.
:type lower_frequency: int
"""
self._lower_frequency = lower_frequency
@property
def upper_frequency(self):
"""Gets the upper_frequency of this TapiPhotonicMediaSpectrumBand.
The upper frequency bound of the media channel spectrum specified in MHz # noqa: E501
:return: The upper_frequency of this TapiPhotonicMediaSpectrumBand.
:rtype: int
"""
return self._upper_frequency
@upper_frequency.setter
def upper_frequency(self, upper_frequency):
"""Sets the upper_frequency of this TapiPhotonicMediaSpectrumBand.
The upper frequency bound of the media channel spectrum specified in MHz # noqa: E501
:param upper_frequency: The upper_frequency of this TapiPhotonicMediaSpectrumBand.
:type upper_frequency: int
"""
self._upper_frequency = upper_frequency
@property
def frequency_constraint(self):
"""Gets the frequency_constraint of this TapiPhotonicMediaSpectrumBand.
:return: The frequency_constraint of this TapiPhotonicMediaSpectrumBand.
:rtype: TapiPhotonicMediaFrequencyConstraint
"""
return self._frequency_constraint
@frequency_constraint.setter
def frequency_constraint(self, frequency_constraint):
"""Sets the frequency_constraint of this TapiPhotonicMediaSpectrumBand.
:param frequency_constraint: The frequency_constraint of this TapiPhotonicMediaSpectrumBand.
:type frequency_constraint: TapiPhotonicMediaFrequencyConstraint
"""
self._frequency_constraint = frequency_constraint
|
StarcoderdataPython
|
4939309
|
<reponame>sslivkoff/regression_code
"""use storm api to call other functions
- these translations are less developed than use_other_api translations
"""
import functools
import numpy as np
from .... import huth
from .... import aone
def transform_kwargs(function, kwargs_transformers=None,
output_transformer=None):
"""transforms a wrapped function's kwargs before calling it
Parameters
----------
- function: function to be decorated
- kwargs_transformers: iterable of functions that modify a dict in-place
"""
if kwargs_transformers is None:
kwargs_transformers = []
if output_transformer is None:
output_transformer = lambda outputs: outputs
@functools.wraps(function)
def wrapper(*args, **kwargs):
for transformer in kwargs_transformers:
transformer(kwargs)
outputs = function(*args, **kwargs)
return output_transformer(outputs)
return wrapper
def kernelize_kwargs(kwargs):
"""transform X data into kernels"""
if 'Xtrain' in kwargs:
kwargs['Ktrain'] = kwargs['Xtrain'].dot(kwargs['Xtrain'].T)
if 'Xtest' in kwargs:
kwargs['Ktest'] = kwargs['Xtest'].dot(kwargs['Xtrain'].T)
kwargs.pop('Xtest')
kwargs.pop('Xtrain')
def make_huth_kwargs(kwargs):
"""translate kwargs into huth.ridge api"""
kwargs['Rstim'] = kwargs.pop('Xtrain')
kwargs['Pstim'] = kwargs.pop('Xtest')
kwargs['Rresp'] = kwargs.pop('Ytrain')
kwargs['Presp'] = kwargs.pop('Ytest')
kwargs['alphas'] = kwargs.pop('ridges')
if kwargs.get('metric') == 'R^2':
kwargs['use_corr'] = False
if 'n_folds' in kwargs:
kwargs['nboots'] = kwargs.pop('n_folds')
test_fraction = 1 - kwargs.pop('train_fraction', .8)
len_blocks = kwargs.pop('len_blocks', 5)
n_samples = kwargs['Rstim'].shape[0]
kwargs['nchunks'] = int(n_samples * test_fraction / len_blocks)
kwargs['chunklen'] = len_blocks
for key in [
'weights',
'predictions',
'performance',
'verbose',
'locality',
'metric',
'Ytest_zscored',
]:
if key in kwargs:
kwargs.pop(key)
def make_aone_kwargs(kwargs, cv=False):
"""translate kwargs into aone.ridge api"""
if not cv:
if 'Xtest' in kwargs:
kwargs['Xval'] = kwargs.pop('Xtest')
if 'Ytest' in kwargs:
kwargs['Yval'] = kwargs.pop('Ytest')
if 'Ktest' in kwargs:
kwargs['Kval'] = kwargs.pop('Ktest')
if 'n_folds' in kwargs:
kwargs['nfolds'] = kwargs.pop('n_folds')
for key in [
'locality',
'Ytest_zscored',
]:
if key in kwargs:
kwargs.pop(key)
#
# # ridge functions
#
ridge_corr_wrapper = transform_kwargs(
huth.ridge.ridge_corr,
kwargs_transformers=[make_huth_kwargs],
output_transformer=lambda outputs: {
'performance': np.stack(outputs, axis=0),
},
)
solve_l2_primal_wrapper = transform_kwargs(
aone.models.solve_l2_primal,
kwargs_transformers=[make_aone_kwargs],
)
solve_l2_dual_wrapper = transform_kwargs(
aone.models.solve_l2_dual,
kwargs_transformers=[kernelize_kwargs, make_aone_kwargs],
)
#
# # cv functions
#
bootstrap_ridge_wrapper = transform_kwargs(
huth.ridge.bootstrap_ridge,
kwargs_transformers=[make_huth_kwargs],
output_transformer=lambda wt, corrs, valphas, allRcorrs, valinds: {
'weights': wt,
'performance': corrs,
'local_optimal_ridge': valphas,
},
)
cvridge_wrapper = transform_kwargs(
aone.models.cvridge,
kwargs_transformers=[functools.partial(make_aone_kwargs, cv=True)],
)
kernel_cvridge_wrapper = transform_kwargs(
aone.models.kernel_cvridge,
kwargs_transformers=[
kernelize_kwargs,
functools.partial(make_aone_kwargs, cv=True)
],
)
|
StarcoderdataPython
|
1703843
|
<reponame>Virksaabnavjot/MapperReducer<filename>train/Mapper.py
#!/usr/bin/env python
import sys
# Mapper to return 10 passengers by age groups
# Data source: https://www.kaggle.com/c/titanic/data
# Data header: "PassengerId" "Survived" "Pclass" "Name" "Sex" "Age" "SibSp" "Parch" "Ticket" "Fare" "Cabin" "Embarked"
myList = []
n = 10 # Number of top N records
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
# split data values into list
data = line.split(",")
# convert age (currently a string) to int
try:
age = int(data[6])
except ValueError:
# ignore/discard this line
continue
# add (age, record) touple to list
myList.append( (age, line) )
# sort list in reverse order
myList.sort(reverse=True)
# keep only first N records
if len(myList) > n:
myList = myList[:n]
# Print top N records
for (k,v) in myList:
print(v)
|
StarcoderdataPython
|
3521636
|
<reponame>phanak-sap/requre<filename>requre/postprocessing.py
import logging
from typing import Union, Any, Dict, Optional, List
from .constants import KEY_MINIMAL_MATCH, METATADA_KEY
from .storage import DataMiner, DataStructure, DataTypes
logger = logging.getLogger(__name__)
class DictProcessing:
def __init__(self, requre_dict: dict):
self.requre_dict = requre_dict
def match(self, selector: list, internal_object: Union[dict, list, None] = None):
if internal_object is None:
internal_object = self.requre_dict
if len(selector) == 0:
logger.debug(f"all selectors matched")
yield internal_object
# add return here, to avoid multiple returns
return
if isinstance(internal_object, dict):
for k, v in internal_object.items():
if v is None:
return
if selector and selector[0] == k:
logger.debug(f"selector {k} matched")
yield from self.match(selector=selector[1:], internal_object=v)
else:
yield from self.match(selector=selector, internal_object=v)
elif isinstance(internal_object, list):
for list_item in internal_object:
if list_item is None:
return
yield from self.match(selector=selector, internal_object=list_item)
else:
return
@staticmethod
def replace(obj: Any, key: Any, value: Any) -> None:
if isinstance(obj, dict):
for k, v in obj.items():
if k == key:
logger.debug(f"replacing: {obj[key]} by {value}")
obj[key] = value
else:
DictProcessing.replace(obj=v, key=key, value=value)
if isinstance(obj, list):
for item in obj:
DictProcessing.replace(obj=item, key=key, value=value)
@staticmethod
def minimal_match(dict_obj: Dict, metadata: Dict):
tmp_dict = dict_obj
first_item: Dict = {}
key_name = DataTypes.__name__
ds = DataTypes.List
if key_name in metadata:
ds = DataTypes(metadata.get(key_name))
logger.debug(f"Use datatype: {ds}")
for cntr in range(KEY_MINIMAL_MATCH):
if not isinstance(tmp_dict, dict) or len(tmp_dict.keys()) != 1:
return False
key = list(tmp_dict.keys())[0]
value = tmp_dict[key]
tmp_dict = value
if ds == DataTypes.DictWithList:
if isinstance(tmp_dict, dict):
tmp_first_item = tmp_dict[list(tmp_dict.keys())[0]]
if isinstance(tmp_first_item, list):
first_item = tmp_first_item[0]
if ds == DataTypes.Dict:
if isinstance(tmp_dict, dict):
first_item = tmp_dict[list(tmp_dict.keys())[0]]
if ds == DataTypes.List:
if isinstance(tmp_dict, list):
first_item = tmp_dict[0]
if ds == DataTypes.Value:
if isinstance(tmp_dict, dict):
first_item = tmp_dict
if isinstance(first_item, dict) and DataMiner().LATENCY_KEY in first_item.get(
DataStructure.METADATA_KEY, {}
):
logger.debug(
f"Minimal key path ({KEY_MINIMAL_MATCH}) matched for {first_item.keys()}"
)
return True
return False
def simplify(
self, internal_object: Optional[Dict] = None, ignore_list: Optional[List] = None
):
ignore_list = ignore_list or []
internal_object = (
self.requre_dict if internal_object is None else internal_object
)
if isinstance(internal_object, dict):
if len(internal_object.keys()) == 1:
key = list(internal_object.keys())[0]
if key in [METATADA_KEY] + ignore_list:
return
if self.minimal_match(internal_object, self.requre_dict[METATADA_KEY]):
return
if isinstance(internal_object[key], dict):
value = internal_object.pop(key)
print(f"Removing key: {key} -> {list(value.keys())}")
for k, v in value.items():
internal_object[k] = v
self.simplify(
internal_object=internal_object, ignore_list=ignore_list
)
else:
for v in internal_object.values():
self.simplify(internal_object=v, ignore_list=ignore_list)
|
StarcoderdataPython
|
1883605
|
<filename>modules/ctcp_reply.py<gh_stars>1-10
#!/usr/bin/env python3
# Goshu IRC Bot
# written by <NAME> <<EMAIL>>
# licensed under the ISC license
from time import strftime, localtime
from gbot.modules import Module
class ctcp_reply(Module):
"""Provides basic CTCP replies."""
core = True
def ctcp_listener(self, event):
"""Responds to CTCP messages
@listen in ctcp highest
"""
if event['ctcp_verb'] == 'version':
ver = 'https://github.com/goshuirc/goshu'
# tell them owner nick(s) if one is online
server_name = event['server'].name
runner_level, online_runners = self.bot.accounts.online_bot_runners(server_name)
if online_runners:
trailing_s = 's' if len(online_runners) > 1 else ''
runner_msg = ("Hi, I'm an IRC bot! Online Contact{}: "
"{}".format(trailing_s, ' '.join(online_runners)))
event['source'].msg(runner_msg)
event['source'].ctcp_reply('VERISON', ver)
elif event['ctcp_verb'] == 'userinfo':
userinfostring = None
# userinfostring = "Please don't kline me, I'll play nice!"
if userinfostring:
event['source'].ctcp_reply('USERINFO', userinfostring)
elif event['ctcp_verb'] == 'clientinfo':
understood = ['CLIENTINFO', 'ERRMSG', 'PING', 'TIME', 'USERINFO', 'VERSION']
msg = 'Understood CTCP Pairs: {}'.format(','.join(understood))
event['source'].ctcp_reply('CLIENTINFO', msg)
elif event['ctcp_verb'] == 'errmsg':
# disabled below. could be bad, errmsg-storm?
# event['source'].ctcp_reply(nm_to_n(event.source, 'ERRMSG '+event.arguments()[1] +
# ':ERRMSG echo, no error has occured')
pass
elif event['ctcp_verb'] == 'time':
event['source'].ctcp_reply('TIME', strftime('%a %b %d, %H:%M:%S %Y', localtime()))
elif event['ctcp_verb'] == 'ping':
event['source'].ctcp_reply('PING', event.ctcp_text)
|
StarcoderdataPython
|
6698961
|
import logging
import os
from typing import TYPE_CHECKING, List, Dict, Any
from abc import ABC, abstractmethod
from checkov.common.graph.graph_builder import Edge
from checkov.common.graph.graph_builder.utils import calculate_hash, run_function_multithreaded
if TYPE_CHECKING:
from checkov.common.graph.graph_builder.local_graph import LocalGraph
class VariableRenderer(ABC):
MAX_NUMBER_OF_LOOPS = 50
def __init__(self, local_graph: "LocalGraph") -> None:
self.local_graph = local_graph
self.run_async = True if os.environ.get("RENDER_VARIABLES_ASYNC") == "True" else False
self.max_workers = int(os.environ.get("RENDER_ASYNC_MAX_WORKERS", 50))
self.done_edges_by_origin_vertex: Dict[int, List[Edge]] = {}
self.replace_cache: List[Dict[str, Any]] = [{}] * len(local_graph.vertices)
def render_variables_from_local_graph(self) -> None:
# find vertices with out-degree = 0 and in-degree > 0
end_vertices_indexes = self.local_graph.get_vertices_with_degrees_conditions(
out_degree_cond=lambda degree: degree == 0, in_degree_cond=lambda degree: degree > 0
)
# all the edges entering `end_vertices`
edges_to_render = self.local_graph.get_in_edges(end_vertices_indexes)
loops = 0
while len(edges_to_render) > 0:
logging.info(f"evaluating {len(edges_to_render)} edges")
# group edges that have the same origin and label together
edges_groups = self.group_edges_by_origin_and_label(edges_to_render)
if self.run_async:
run_function_multithreaded(
func=self._edge_evaluation_task,
data=edges_groups,
max_group_size=1,
num_of_workers=self.max_workers,
)
else:
for edge_group in edges_groups:
self._edge_evaluation_task([edge_group])
for edge in edges_to_render:
origin = edge.origin
self.done_edges_by_origin_vertex.setdefault(origin, []).append(edge)
for edge in edges_to_render:
origin_vertex_index = edge.origin
out_edges = self.local_graph.out_edges.get(origin_vertex_index, [])
if all(e in self.done_edges_by_origin_vertex.get(origin_vertex_index, []) for e in out_edges):
end_vertices_indexes.append(origin_vertex_index)
edges_to_render = self.local_graph.get_in_edges(end_vertices_indexes)
edges_to_render = list(
{
edge
for edge in edges_to_render
if edge not in self.done_edges_by_origin_vertex.get(edge.origin, [])
}
)
loops += 1
if loops >= self.MAX_NUMBER_OF_LOOPS:
logging.warning(f"Reached 50 graph edge iterations, breaking.")
break
self.local_graph.update_vertices_configs()
logging.info("done evaluating edges")
self.evaluate_non_rendered_values()
logging.info("done evaluate_non_rendered_values")
def _edge_evaluation_task(self, edges: List[List[Edge]]) -> List[Edge]:
inner_edges = edges[0]
self.evaluate_vertex_attribute_from_edge(inner_edges)
return inner_edges
@abstractmethod
def evaluate_vertex_attribute_from_edge(self, edge_list: List[Edge]) -> None:
pass
@staticmethod
def group_edges_by_origin_and_label(edges: List[Edge]) -> List[List[Edge]]:
edge_groups: Dict[str, List[Edge]] = {}
for edge in edges:
origin_and_label_hash = calculate_hash(f"{edge.origin}{edge.label}")
edge_groups.setdefault(origin_and_label_hash, []).append(edge)
return list(edge_groups.values())
def evaluate_non_rendered_values(self) -> None:
pass
|
StarcoderdataPython
|
4843930
|
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from ffn.training.import_util import import_symbol
import time
from train import prepare_ffn, define_data_input, train_eval_size, get_batch, EvalTracker
import platform
if platform.system() == 'Windows':
DIVSTR = '::'
elif platform.system() in ['Linux', 'Darwin']:
DIVSTR = ':'
else:
DIVSTR = ':'
FLAGS = flags.FLAGS
# Options related to training data.
flags.DEFINE_string('train_coords', None,
'Glob for the TFRecord of training coordinates.') # What's the use of TFRecord
flags.DEFINE_string('data_volumes', None,
'Comma-separated list of <volume_name>:<volume_path>:'
'<dataset>, where volume_name need to match the '
'"label_volume_name" field in the input example, '
'volume_path points to HDF5 volumes containing uint8 '
'image data, and `dataset` is the name of the dataset '
'from which data will be read.')
flags.DEFINE_string('label_volumes', None,
'Comma-separated list of <volume_name>:<volume_path>:'
'<dataset>, where volume_name need to match the '
'"label_volume_name" field in the input example, '
'volume_path points to HDF5 volumes containing int64 '
'label data, and `dataset` is the name of the dataset '
'from which data will be read.')
flags.DEFINE_string('model_name', None,
'Name of the model to train. Format: '
'[<packages>.]<module_name>.<model_class>, if packages is '
'missing "ffn.training.models" is used as default.')
flags.DEFINE_string('model_args', None,
'JSON string with arguments to be passed to the model '
'constructor.')
# Training infra options.
flags.DEFINE_string('train_dir', '/tmp',
'Path where checkpoints and other data will be saved.')
flags.DEFINE_string('master', '', 'Network address of the master.')
flags.DEFINE_integer('batch_size', 4, 'Number of images in a batch.')
flags.DEFINE_integer('task', 0, 'Task id of the replica running the training.')
flags.DEFINE_integer('ps_tasks', 0, 'Number of tasks in the ps job.')
flags.DEFINE_integer('max_steps', 10000, 'Number of steps to train for.')
flags.DEFINE_integer('replica_step_delay', 300,
'Require the model to reach step number '
'<replica_step_delay> * '
'<replica_id> before starting training on a given '
'replica.')
flags.DEFINE_integer('summary_rate_secs', 120,
'How often to save summaries (in seconds).')
# FFN training options.
flags.DEFINE_float('seed_pad', 0.05,
'Value to use for the unknown area of the seed.')
flags.DEFINE_float('threshold', 0.9,
'Value to be reached or exceeded at the new center of the '
'field of view in order for the network to inspect it.')
flags.DEFINE_enum('fov_policy', 'fixed', ['fixed', 'max_pred_moves'],
'Policy to determine where to move the field of the '
'network. "fixed" tries predefined offsets specified by '
'"model.shifts". "max_pred_moves" moves to the voxel with '
'maximum mask activation within a plane perpendicular to '
'one of the 6 Cartesian directions, offset by +/- '
'model.deltas from the current FOV position.')
# TODO(mjanusz): Implement fov_moves > 1 for the 'fixed' policy.
flags.DEFINE_integer('fov_moves', 1,
'Number of FOV moves by "model.delta" voxels to execute '
'in every dimension. Currently only works with the '
'"max_pred_moves" policy.')
flags.DEFINE_boolean('shuffle_moves', True,
'Whether to randomize the order of the moves used by the '
'network with the "fixed" policy.')
flags.DEFINE_float('image_mean', None,
'Mean image intensity to use for input normalization.')
flags.DEFINE_float('image_stddev', None,
'Image intensity standard deviation to use for input '
'normalization.')
flags.DEFINE_list('image_offset_scale_map', None,
'Optional per-volume specification of mean and stddev. '
'Every entry in the list is a colon-separated tuple of: '
'volume_label, offset, scale.')
flags.DEFINE_list('permutable_axes', ['1', '2'],
'List of integers equal to a subset of [0, 1, 2] specifying '
'which of the [z, y, x] axes, respectively, may be permuted '
'in order to augment the training data.')
flags.DEFINE_list('reflectable_axes', ['0', '1', '2'],
'List of integers equal to a subset of [0, 1, 2] specifying '
'which of the [z, y, x] axes, respectively, may be reflected '
'in order to augment the training data.')
FLAGS = flags.FLAGS
def run_training_step(sess, model, fetch_summary, feed_dict):
"""Runs one training step for a single FFN FOV."""
ops_to_run = [model.train_op, model.global_step, model.logits] # train_op defined in model.set_up_optimizer()
if fetch_summary is not None:
ops_to_run.append(fetch_summary)
results = sess.run(ops_to_run, feed_dict) # get prediction for the operation
step, prediction = results[1:3]
if fetch_summary is not None:
summ = results[-1]
else:
summ = None
return prediction, step, summ
def train_ffn(model_cls, **model_kwargs):
with tf.Graph().as_default():
with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks, merge_devices=True)):
# The constructor might define TF ops/placeholders, so it is important
# that the FFN is instantiated within the current context.
# model instantiation!
model = model_cls(**model_kwargs)
eval_shape_zyx = train_eval_size(model).tolist()[::-1]
eval_tracker = EvalTracker(eval_shape_zyx)
load_data_ops = define_data_input(model, queue_batch=1)
prepare_ffn(model)
merge_summaries_op = tf.summary.merge_all()
if FLAGS.task == 0:
save_flags()
# Setup the Higher Order session enviroment!
summary_writer = None
saver = tf.train.Saver(keep_checkpoint_every_n_hours=0.25)
scaffold = tf.train.Scaffold(saver=saver)
with tf.train.MonitoredTrainingSession(
master=FLAGS.master,
is_chief=(FLAGS.task == 0),
save_summaries_steps=None,
save_checkpoint_secs=300,
config=tf.ConfigProto(log_device_placement=True, allow_soft_placement=True),
checkpoint_dir=FLAGS.train_dir,
scaffold=scaffold) as sess:
eval_tracker.sess = sess
step = int(sess.run(model.global_step)) # evaluate the step
# global_step is the tf inner mechanism that keeps the batches seen by
# thus when revive a model from checkpoint the `step` can be restored
if FLAGS.task > 0:
# To avoid early instabilities when using multiple replicas, we use
# a launch schedule where new replicas are brought online gradually.
logging.info('Delaying replica start.')
while step < FLAGS.replica_step_delay * FLAGS.task:
time.sleep(5.0)
step = int(sess.run(model.global_step))
else:
summary_writer = tf.summary.FileWriterCache.get(FLAGS.train_dir)
summary_writer.add_session_log(
tf.summary.SessionLog(status=tf.summary.SessionLog.START), step)
# Prepare the batch_it object by input shifts parameters into it
fov_shifts = list(model.shifts) # x, y, z vector collection formed by input deltas in 3*3*3-1 directions
if FLAGS.shuffle_moves:
random.shuffle(fov_shifts)
policy_map = {
'fixed': partial(fixed_offsets, fov_shifts=fov_shifts),
'max_pred_moves': max_pred_offsets
} # delta, fov_shift come into policy map to
batch_it = get_batch(lambda: sess.run(load_data_ops),
eval_tracker, model, FLAGS.batch_size,
policy_map[FLAGS.fov_policy])
t_last = time.time()
# Start major loop
while not sess.should_stop() and step < FLAGS.max_steps:
# Run summaries periodically.
t_curr = time.time()
if t_curr - t_last > FLAGS.summary_rate_secs and FLAGS.task == 0:
summ_op = merge_summaries_op
t_last = t_curr # update at summary_rate_secs
else:
summ_op = None
# Core lines to be modified w.r.t. Multi GPU computing
seed, patches, labels, weights = next(batch_it)
updated_seed, step, summ = run_training_step(
sess, model, summ_op,
feed_dict={
model.loss_weights: weights,
model.labels: labels,
model.input_patches: patches,
model.input_seed: seed,
})
# Save prediction results in the original seed array so that
# they can be used in subsequent steps.
mask.update_at(seed, (0, 0, 0), updated_seed)
# Record summaries.
if summ is not None:
logging.info('Saving summaries.')
summ = tf.Summary.FromString(summ)
# Compute a loss over the whole training patch (i.e. more than a
# single-step field of view of the network). This quantifies the
# quality of the final object mask.
summ.value.extend(eval_tracker.get_summaries())
eval_tracker.reset()
assert summary_writer is not None
summary_writer.add_summary(summ, step)
if summary_writer is not None:
summary_writer.flush()
def main(argv=()):
del argv # Unused.
model_class = import_symbol(FLAGS.model_name)
# Multiply the task number by a value large enough that tasks starting at a
# similar time cannot end up with the same seed.
seed = int(time.time() + FLAGS.task * 3600 * 24)
logging.info('Random seed: %r', seed)
random.seed(seed)
train_ffn(model_class, batch_size=FLAGS.batch_size,
**json.loads(FLAGS.model_args))
|
StarcoderdataPython
|
6501849
|
#!/usr/bin/env python
"""
Fix errors in a dataset.
For now, only removing erroneous lines is supported.
usage: %prog input errorsfile output
-x, --ext: dataset extension (type)
-m, --methods=N: comma separated list of repair methods
"""
import pkg_resources; pkg_resources.require( "bx-python" )
from bx.cookbook import doc_optparse
from galaxy import util
def main():
options, args = doc_optparse.parse( __doc__ )
methods = []
try:
if options.methods: methods = options.methods.split(",")
except:
pass
ext = options.ext
in_file = open(args[0], "r")
error_file = open(args[1], "r")
out_file = open(args[2], "w")
# string_to_object errors
error_list = util.string_to_object(error_file.read())
# index by error type and then by line number
error_lines = {}
error_types = {}
for error in error_list:
if error.linenum:
if error.linenum in error_lines:
error_lines[error.linenum].append(error)
else:
error_lines[error.linenum] = [error]
error_type = error.__class__.__name__
if error_type in error_types:
error_types[error_type].append(error)
else:
error_types[error_type] = [error]
linenum = 0
for line in in_file:
linenum += 1
# write unless
if "lines" in methods:
if linenum in error_lines:
line = None
# other processing here?
if line:
out_file.write(line)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3375978
|
import asyncio
from .engine.fanhuaji import FanhuajiEngine
from .engine.opencc import OpenCC as OpenCCEngine
class Converter():
def __init__(self, engine, converter):
self.engine = engine
self.converter = converter
def convert(self, content: str) -> str:
"""轉換 epub 內文
Args:
content (str): epub 內文字串
Returns:
[str]: 轉換後的內文
"""
converted_content = None
if not content:
return None
# opencc 轉換
if self.engine == 'opencc':
opencc = OpenCCEngine(self.converter)
converted_content = opencc.convert(content)
payload = {
'text': content,
'converter': self.converter
}
# 繁化姬同步轉換
if self.engine == 'fanhuaji':
fanhuaji = FanhuajiEngine()
converted_content = fanhuaji.convert(**payload)
# 繁化姬異步轉換
if self.engine == 'fanhuaji_async':
fanhuaji = FanhuajiEngine()
converted_content = asyncio.get_event_loop().run_until_complete(
fanhuaji.async_convert(**payload))
return converted_content
|
StarcoderdataPython
|
8194094
|
from injector import singleton, Module
from authserver.db.graph_database import AbstractGraphDatabase, Neo4jGraphDatabase
class GraphDatabaseModule(Module):
def configure(self, binder):
binder.bind(AbstractGraphDatabase, to=Neo4jGraphDatabase, scope=singleton)
|
StarcoderdataPython
|
5134309
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 26 15:32:05 2015 by Florin.Neacsu
Copyright Xcision LLC.
"""
from XcIOCommon import *
def ReadOCPparam(fname):
"""
Reads the file provided as input, assuming the
following format
|Radiation unit type: int
|Outer cup size: int
|The signded distance between the inside bottom of the OC and the
couch reference point in mm: float
|Path of the inside wall: about 5 lines; the last line only contains
the keyword 'closepath'
|Empty line
|Path of the outside wall: about 8 lines; the last line only contains
the keyword 'closepath'
|Empty line
|Fiducial curve: about 10 lines; the last line only contains the
keyword 'closefc'
Parameters
----------
fname: string
A string pointing to a file on the hdd
Returns
-------
RU: int
The radiation unit file
OC: int
The outer cup size
DistanceBottomOCToCouch: float
The signed distance in mm from the bottom of the inner wall to the
couch reference point
OCInsideWallDescription: string
A string containing the description of the inside wall. Each line is
separated by a ';'
OCOutsideWallDescription: string
A string containing the description of the outside wall. Each line is
separated by a ';'
FiducialCurveDescription: string
A string containing the description of the fiducial curve. Each line
is separated by a ';'
Raises
------
IOError:
If the fname is not pointing to an existing file
ValueError:
Whenever we try to parse to an expected format and it fails, or if
there is an inconsitency in the values within the file
IndexError:
Wrong (as in unexpected) number of elements in a vector
"""
try:
fileHandle = open(fname, 'r')
except IOError, e:
e.args += ('Invalid file name',)
raise
with fileHandle:
try:
line = fileHandle.readline()
RU = int(line)
line = fileHandle.readline()
OC = int(line)
line = fileHandle.readline()
DistanceBottomOCToCouch = float(line)
OCInsideWallDescription = GetWallDescription(fileHandle)
#there is an empty line between the wall description
#so read and discard
line = fileHandle.readline()
OCOutsideWallDescription = GetWallDescription(fileHandle)
#empty line again, read and discard
line = fileHandle.readline()
FiducialCurveDescription = GetFiducialDescription(fileHandle)
return (RU,OC,DistanceBottomOCToCouch,OCInsideWallDescription,OCOutsideWallDescription,FiducialCurveDescription)
except ValueError, e:
#raise ValueError('Invalid file format {0}\n{1}'.format(e.args, e.args))
e.args += ('Invalid file format',)
raise
except IndexError, e:
e.args += ('Invalid file format',)
raise
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.