id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
125307
|
# Each new term in the Fibonacci sequence is generated by adding
# the previous two terms. By starting with 1 and 2, the first 10
# terms will be:
#
# 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
#
# By considering the terms in the Fibonacci sequence whose values
# do not exceed four million, find the sum of the even-valued terms.
def fib_sum(limit):
return fib_calc(limit,0,1,0)
def fib_calc(limit, a, b, accum):
if b > limit:
return accum
else:
if b % 2 == 0:
accum += b
return fib_calc(limit, b, (a+b), accum)
if __name__ == "__main__":
print(fib_sum(4000000))
|
StarcoderdataPython
|
3327878
|
import sys
#import numpy
import pythoncyc
meta = pythoncyc.select_organism('meta')
from pythoncyc.PToolsFrame import PFrame
class CSV2PathwayToolsPlugin:
def input(self, filename):
self.myfile = filename
filestuff = open(self.myfile, 'r')
mapper = dict()
for line in filestuff:
contents = line.split('\t')
mapper[contents[0]] = contents[1].strip()
self.csvfile = mapper['csvfile']
self.compounddb = mapper['compounddb']
def run(self):
filestuff = open(self.csvfile, 'r')
# Find the location of the Group HMDB
# And the COMP ID
for line in filestuff:
if (line.find('Group HMDB') != -1):
headings = line.split(',')
hmdbspot = headings.index('Group HMDB')
compidspot = headings.index('COMP ID')
pubchemspot = headings.index('PUBCHEM')
break
# Create a mapping from Comp ID to Group HMDB
self.comp2hmdb = dict()
for line in filestuff:
contents = line.split(',')
self.comp2hmdb[contents[compidspot]] = (contents[hmdbspot], contents[pubchemspot])
# Create a mapping from HMDB to Pathway ID
self.hmdb2pathway = dict()
self.pubchem2pathway = dict()
filestuff2 = open(self.compounddb, 'r')
for line in filestuff2:
if (not line.startswith('#')):
contents = line.split('\t')
compound = contents[0].strip()
print compound
try:
tmp = PFrame(compound, meta, getFrameData=True).__dict__
if (tmp.has_key('dblinks') and tmp['dblinks'].has_key('|HMDB|')):
#print "INSERTING..."
#raw_input()
hmdbid = tmp['dblinks']['|HMDB|'][0]
self.hmdb2pathway[hmdbid] = compound
if (tmp.has_key('dblinks') and tmp['dblinks'].has_key('|PUBCHEM|')):
#print "INSERTING..."
#raw_input()
pubchemid = tmp['dblinks']['|PUBCHEM|'][0]
self.pubchem2pathway[pubchemid] = compound
except TypeError:
pass
def output(self, filename):
outputfile = open(filename, 'w')
outputfile.write('Compound\tPathwayToolsID\n')
found = 0
total = 0
#print self.hmdb2pathway
#raw_input()
for compound in self.comp2hmdb.keys():
#print compound, str(self.comp2hmdb[compound]), self.hmdb2pathway.has_key(str(self.comp2hmdb[compound]))
#raw_input()
if self.hmdb2pathway.has_key(str(self.comp2hmdb[compound][0])):
outputfile.write(compound+"\t"+self.hmdb2pathway[str(self.comp2hmdb[compound][0])]+"\n")
found += 1
elif self.pubchem2pathway.has_key(str(self.comp2hmdb[compound][1])):
outputfile.write(compound+"\t"+self.pubchem2pathway[str(self.comp2hmdb[compound][1])]+"\n")
found += 1
else:
outputfile.write(compound+"\tNOTFOUND\n")
total += 1
print found, " compounds found out of ", total
|
StarcoderdataPython
|
99366
|
import logging
import discord
from discord.ext import commands
class Errors(commands.Cog, name="Error handler"):
def __init__(self, bot):
self.bot = bot
self.logger = logging.getLogger(__name__)
@commands.Cog.listener()
async def on_ready(self):
self.logger.info("I'm ready!")
@commands.Cog.listener()
async def on_command_error(self, ctx, err):
if isinstance(err, commands.ConversionError):
await ctx.send(err)
elif isinstance(err, commands.MissingRequiredArgument):
await ctx.send(f"Missing required argument: `{err.param}`")
elif isinstance(err, commands.CommandInvokeError):
await ctx.send(err)
elif isinstance(err, commands.BadArgument):
await ctx.send(err)
elif isinstance(err, commands.ArgumentParsingError):
await ctx.send(err)
elif isinstance(err, commands.PrivateMessageOnly):
await ctx.send("This command can only be used in PMs.")
elif isinstance(err, commands.NoPrivateMessage):
await ctx.send("This command can only be used in Guilds.")
elif isinstance(err, commands.MissingPermissions):
perms = ", ".join(
f"`{perm.replace('_', ' ').title()}`" for perm in err.missing_perms
)
await ctx.send(f"You're missing the permissions: {perms}")
elif isinstance(err, commands.BotMissingPermissions):
perms = ", ".join(
f"`{perm.replace('_', ' ').title()}`" for perm in err.missing_perms
)
await ctx.send(f"I'm missing the permissions: {perms}")
elif isinstance(err, commands.DisabledCommand):
await ctx.send(f"`{ctx.command.qualified_name}` is currently disabled.")
elif isinstance(err, discord.HTTPException):
await ctx.send(
"An error occurred while I was trying to execute a task. Are you sure I have the correct permissions?"
)
else:
self.logger.error(err)
def setup(bot):
bot.add_cog(Errors(bot))
|
StarcoderdataPython
|
6457689
|
import os
from posixpath import dirname
from sys import dont_write_bytecode
import time
import shutil
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def _dict_2_df(dict):
return pd.DataFrame.from_dict(dict, orient="index")
def _get_file_path(dir, file):
return os.path.join(dir, file)
def _str_2_float(str):
return float(str)
def _read_file(file):
file_as_dict = {}
Wt0 = []
# dwt0 = []
# flag = 1
with open(file) as file:
for i, line in enumerate(file.readlines()):
line = line.split(" ")[0]
if i == 0:
file_as_dict["lamd"] = _str_2_float(line)
elif i == 1:
file_as_dict["nT"] = _str_2_float(line)
else:
if "=" in line:
if i > 3:
break
continue
# if flag:
Wt0.append(line)
# else:
# dwt0.append(line)
file_as_dict["Wt0"] = list(map(_str_2_float, Wt0))
# file_as_dict["dwt0"] = list(map(_str_2_float, dwt0))
return file_as_dict
def read_files(file_dir):
dataset = []
for _, _, files in os.walk(file_dir):
for file in files:
file_path = _get_file_path(file_dir, file)
if _read_file(file_path):
dataset.append(_read_file(file_path))
return list(map(_dict_2_df, dataset))
def _try_2_create_directory(dir):
try:
os.makedirs(dir)
except FileExistsError:
return dir
else:
return dir
def _move_data(old_dir, new_dir, start=0, end=0):
file_list = os.listdir(old_dir)
end = len(file_list) if end == 0 else end
for file in file_list[start:end]:
src = os.path.join(old_dir, file)
dst = os.path.join(new_dir, file)
shutil.copy(src, dst)
return None
def divide_data(src_dir, dest_dir):
# Try to create folders.
train_dir = _try_2_create_directory(os.path.join(dest_dir, "train"))
validation_dir = _try_2_create_directory(
os.path.join(dest_dir, "validation"))
test_dir = _try_2_create_directory(os.path.join(dest_dir, "test"))
# Move data
_move_data(src_dir, train_dir, start=0, end=1800)
_move_data(src_dir, validation_dir, start=1800, end=2400)
_move_data(src_dir, test_dir, start=2400)
return None
def make_dir_for_current_time(target_dir, dir_name=None):
current_time = time.strftime("%Y_%m_%d_%H_%M", time.localtime())
dir = os.path.join(target_dir, f"{current_time}", dir_name)
return _try_2_create_directory(dir)
def visualize(input_tensor):
x = np.asarray([i/500 for i in range(50001)]).reshape(50001, 1)
data = input_tensor.numpy().reshape(50001, 1)
plt.plot(x, data)
# Set axis.
plt.title("Prediction")
plt.xlabel("t")
plt.ylabel("W(t)")
plt.axis([0, 100, -1.5e-1, 2e-1])
plt.show()
input()
return None
# Split up dataset.
# src_dir = "D:\wt0_data\POM_Galerkin_Lizy_data"
# dest_dir = "D:\wt0_data"
# divide_data(src_dir,dest_dir)
|
StarcoderdataPython
|
11306820
|
# Create your tasks here
from __future__ import absolute_import, unicode_literals
import time
from celery import shared_task
@shared_task
def add(x, y):
time.sleep(10)
print('the sum is:',x+y)
return x + y
@shared_task
def mul(x, y):
return x * y
@shared_task
def xsum(numbers):
return sum(numbers)
|
StarcoderdataPython
|
1648249
|
<gh_stars>1-10
import tensorflow as tf
from keras.models import load_model
from FeatureExtraction import stft_matrix, get_random_samples
from FeaturePreprocess import prep_full_test, keras_img_prep
def get_predictions_from_cnn(audio):
data = get_random_samples(stft_matrix(audio),46,125)
data = prep_full_test(data)
# 513x125x1 for spectrogram with crop size of 125 pixels
img_rows, img_cols, img_depth = data.shape[1], data.shape[2], 1
# reshape image input for Keras
# used Theano dim_ordering (th), (# chans, # images, # rows, # cols)
data, input_shape = keras_img_prep(data, img_depth, img_rows, img_cols)
model = load_model('cnn_5100.h5')
model.pop()
model.pop()
model.pop()
model.compile(loss='categorical_crossentropy',optimizer='adadelta',metrics=['accuracy'])
return model.predict(data)
|
StarcoderdataPython
|
9794371
|
<reponame>woolpeeker/Face_Pytorch
from __future__ import absolute_import, division, print_function, unicode_literals
import math
import warnings
from abc import ABCMeta, abstractmethod
from functools import partial
from typing import List, Tuple, Optional
import torch
import torch.nn as nn
def _with_args(cls_or_self, **kwargs):
r"""Wrapper that allows creation of class factories.
This can be useful when there is a need to create classes with the same
constructor arguments, but different instances.
Example::
>>> Foo.with_args = classmethod(_with_args)
>>> foo_builder = Foo.with_args(a=3, b=4).with_args(answer=42)
>>> foo_instance1 = foo_builder()
>>> foo_instance2 = foo_builder()
>>> id(foo_instance1) == id(foo_instance2)
False
"""
class _PartialWrapper(object):
def __init__(self, p):
self.p = p
def __call__(self, *args, **keywords):
return self.p(*args, **keywords)
def __repr__(self):
return self.p.__repr__()
with_args = _with_args
r = _PartialWrapper(partial(cls_or_self, **kwargs))
return r
ABC = ABCMeta(str("ABC"), (object,), {}) # compatible with Python 2 *and* 3:
class ObserverBase(ABC, nn.Module):
r"""Base observer Module.
Any observer implementation should derive from this class.
Concrete observers should follow the same API. In forward, they will update
the statistics of the observed Tensor. And they should provide a
`calculate_qparams` function that computes the quantization parameters given
the collected statistics.
Args:
dtype: Quantized data type
"""
def __init__(self):
super(ObserverBase, self).__init__()
@abstractmethod
def forward(self, x):
pass
@abstractmethod
def calculate_qparams(self, **kwargs):
pass
with_args = classmethod(_with_args)
def calculate_qparams_symmetric(min_val, max_val, qmin, qmax):
"""quant_x = int(float_x / scale)
scale is the unit size
zero_point is always 0"""
if min_val == 0 or max_val == 0:
raise ValueError(
"must run observer before calling calculate_qparams.\
Returning default scale and zero point "
)
assert qmin < 0 and qmax > 0, "stupid assertion"
assert min_val < 0 and max_val > 0, "stupid assertion too"
max_val = max(abs(min_val), abs(max_val))
scale = max_val / max(abs(qmin), abs(qmax))
scale = max(scale, 1e-8)
scale = 0.5 ** math.floor(math.log(scale, 0.5))
zero_point = 0
if scale == 0:
raise ValueError('scale is 0')
return scale, zero_point
|
StarcoderdataPython
|
204479
|
#!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Unlimited developers
"""
Tests the cashaccount features of the Electrum server.
"""
from test_framework.util import waitFor, assert_equal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.electrumutil import compare, bitcoind_electrum_args, ElectrumConnection
from test_framework.nodemessages import FromHex, CTransaction, CTxOut, ToHex
from test_framework.blocktools import create_transaction
from test_framework.cashaddr import decode as decode_addr
from test_framework.script import *
import asyncio
CASHACCONT_PREFIX = bytes.fromhex("01010101")
DATATYPE_KEYHASH = bytes.fromhex("01")
class ElectrumCashaccountTests(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [bitcoind_electrum_args()]
def create_cashaccount_tx(self, n, spend, name, address):
fee = 500
tx = create_transaction(
FromHex(CTransaction(), n.getrawtransaction(spend['txid'])),
spend['vout'], b"", spend['satoshi'] - fee)
_, _, keyhash = decode_addr(address)
name = bytes(name, 'ascii')
keyhash = DATATYPE_KEYHASH + keyhash
cashaccount_script = CScript([OP_RETURN, CASHACCONT_PREFIX, name, keyhash])
tx.vout.append(CTxOut(0, cashaccount_script))
tx.rehash()
return n.signrawtransaction(ToHex(tx))['hex']
def sync_electrs(self, n):
waitFor(10, lambda: compare(n, "index_height", n.getblockcount()))
async def test_basic(self, n, spends, cli):
# Tests adding one cashaccount registration
tx = self.create_cashaccount_tx(n, spends.pop(), "satoshi", n.getnewaddress())
n.sendrawtransaction(tx)
n.generate(1)
self.sync_electrs(n)
res = await cli.call("cashaccount.query.name", "satoshi", n.getblockcount())
assert_equal(1, len(res))
account = res[0]
assert_equal(tx, account['tx'])
assert_equal(n.getblockcount(), account['height'])
assert_equal(n.getbestblockhash(), account['blockhash'])
async def test_duplicate_registration(self, n, spends, cli):
# If a block has multiple transactions registering the same name, all
# should be returned
tx1 = self.create_cashaccount_tx(n, spends.pop(), "nakamoto", n.getnewaddress())
tx2 = self.create_cashaccount_tx(n, spends.pop(), "nakamoto", n.getnewaddress())
# Case of name should be ignored when retrieving accounts
tx3 = self.create_cashaccount_tx(n, spends.pop(), "NakaMOTO", n.getnewaddress())
for tx in [tx1, tx2, tx3]:
n.sendrawtransaction(tx)
n.generate(1)
self.sync_electrs(n)
res = await cli.call("cashaccount.query.name",
"nakamoto",
n.getblockcount())
txs = list(map(lambda r: r['tx'], res))
assert_equal(3, len(txs))
assert(tx1 in txs)
assert(tx2 in txs)
assert(tx3 in txs)
for account in res:
assert_equal(n.getblockcount(), account['height'])
assert_equal(n.getbestblockhash(), account['blockhash'])
def run_test(self):
n = self.nodes[0]
n.generate(200)
async def async_tests():
cli = ElectrumConnection()
await cli.connect()
spends = n.listunspent()
await self.test_basic(n, spends, cli)
await self.test_duplicate_registration(n, spends, cli)
loop = asyncio.get_event_loop()
loop.run_until_complete(async_tests())
if __name__ == '__main__':
ElectrumCashaccountTests().main()
|
StarcoderdataPython
|
11261162
|
<reponame>contek-io/contek-tusk
from __future__ import annotations
from typing import Optional, Dict
DATABASE = 'database'
TABLE = 'table'
class Table:
def __init__(
self,
database: Optional[str],
table_name: str,
time_column: Optional[str] = None,
) -> None:
self._database = database
self._table_name = table_name
self._time_column = time_column
@classmethod
def from_str(cls, str_value: str) -> Table:
split = str_value.split('.')
if len(split) == 2:
return cls(split[0], split[1])
if len(split) == 1:
return cls(None, split[0])
raise ValueError(str_value)
@classmethod
def from_dict(cls, key_values: Dict[str, str]) -> Table:
database = key_values.get(DATABASE)
table = key_values.get(TABLE)
if table is None:
raise ValueError('Table not specified')
return cls(database, table)
def get_full_name(self) -> str:
if self._database is None:
return self._table_name
return f"{self._database}.{self._table_name}"
def get_time_column(self) -> Optional[str]:
return self._time_column
|
StarcoderdataPython
|
8140941
|
import os
import torch
import tabulate
import torch.nn as nn
import pandas as pd
import matplotlib
import numpy as np
matplotlib.use('svg')
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 24})
class Hook_record_input():
def __init__(self, module):
self.hook = module.register_forward_hook(self.hook_fn)
def hook_fn(self, module, input, output):
self.input = input
def close(self):
self.hook.remove()
class Hook_record_grad():
def __init__(self, module):
self.hook = module.register_backward_hook(self.hook_fn)
def hook_fn(self, module, grad_input, grad_output):
self.grad_output = grad_output
def close(self):
self.hook.remove()
class Hook_sparsify_grad_input():
def __init__(self, module, gamma=0.5):
self.hook = module.register_backward_hook(self.hook_fn)
self.gamma = gamma
def hook_fn(self, module, grad_input, grad_output):
num_grad_input_to_keep = int(grad_input[0].numel() * self.gamma) # grad_input contains grad for input, weight and bias
threshold, _ = torch.kthvalue(abs(grad_input[0]).view(-1), num_grad_input_to_keep)
grad_input_new = grad_input[0]
grad_input_new[abs(grad_input[0]) < threshold] = 0
#self.grad_input = grad_input_new
return (grad_input_new, grad_input[1], grad_input[2])
def close(self):
self.hook.remove()
def add_input_record_Hook(model, name_as_key=False):
Hooks = {}
if name_as_key:
for name,module in model.named_modules():
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
Hooks[name] = Hook_record_input(module)
else:
for k,module in enumerate(model.modules()):
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
Hooks[k] = Hook_record_input(module)
return Hooks
def add_grad_record_Hook(model, name_as_key=False):
Hooks = {}
if name_as_key:
for name,module in model.named_modules():
Hooks[name] = Hook_record_grad(module)
else:
for k,module in enumerate(model.modules()):
Hooks[k] = Hook_record_grad(module)
return Hooks
def add_sparsify_grad_input_Hook(model, gamma=0.5, name_as_key=False):
Hooks = {}
if name_as_key:
for name,module in model.named_modules():
if isinstance(module, nn.BatchNorm2d): # only sparsify grad_input of batchnorm, which is grad_output of conv2d
Hooks[name] = Hook_sparsify_grad_input(module, gamma)
elif isinstance(module, nn.Conv2d):
Hooks[name] = Hook_record_grad(module)
else:
for k,module in enumerate(model.modules()):
if isinstance(module, nn.BatchNorm2d): # only sparsify grad_input of batchnorm, which is grad_output of conv2d
Hooks[k] = Hook_sparsify_grad_input(module, gamma)
elif isinstance(module, nn.Conv2d):
Hooks[k] = Hook_record_grad(module)
return Hooks
def remove_hooks(Hooks):
for k in Hooks.keys():
Hooks[k].close()
def getBatchNormBiasRegularizer(model):
Loss = 0
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
Loss += torch.exp(m.bias).sum()
return Loss
def set_BN_bias(model, init_bias):
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
m.bias.data = init_bias * torch.ones_like(m.bias.data)
def get_weight_sparsity(model):
total = 0.
nonzeros = 0.
for m in model.modules():
if hasattr(m, 'mask_keep_original'):
nonzeros += m.mask_keep_original.sum()
total += m.mask_keep_original.numel()
return 0 if total == 0 else ((total - nonzeros) / total).cpu().numpy().item()
def get_activation_sparsity(Hooks):
total = 0.
nonzeros = 0.
for k in Hooks.keys():
input = Hooks[k].input[0]
input_mask = (input != 0).float()
nonzeros += input_mask.sum()
total += input_mask.numel()
input_sparsity = (total - nonzeros) / total
return input_sparsity
def adjust_learning_rate(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def print_table(values, columns, epoch, logger):
table = tabulate.tabulate([values], columns, tablefmt='simple', floatfmt='8.4f')
if epoch == 0:
table = table.split('\n')
table = '\n'.join([table[1]] + table)
else:
table = table.split('\n')[2]
logger.info(table)
#@profile
def run_epoch(loader, model, criterion, optimizer=None,
phase="train", loss_scaling=1.0, lambda_BN=0.0):
assert phase in ["train", "val", "test"], "invalid running phase"
loss_sum = 0.0
correct = 0.0
if phase=="train": model.train()
elif phase=="val" or phase=="test": model.eval()
ttl = 0
#Hooks = add_input_record_Hook(model)
#Hooks_grad = add_grad_record_Hook(model)
with torch.autograd.set_grad_enabled(phase=="train"):
for i, (input, target) in enumerate(loader):
input = input.cuda()
target = target.cuda()
output = model(input)
loss = criterion(output, target)
if lambda_BN > 0:
loss += lambda_BN * getBatchNormBiasRegularizer(model)
loss_sum += loss * input.size(0)
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
ttl += input.size()[0]
if phase=="train":
optimizer.zero_grad()
loss = loss * loss_scaling # grad scaling
loss.backward()
optimizer.step()
correct = correct.cpu().item()
return {
'loss': loss_sum.cpu().item() / float(ttl),
'accuracy': correct / float(ttl) * 100.0,
}
def log2df(log_file_name):
'''
return a pandas dataframe from a log file
'''
with open(log_file_name, 'r') as f:
lines = f.readlines()
# search backward to find table header
num_lines = len(lines)
for i in range(num_lines):
if lines[num_lines-1-i].startswith('---'):
break
header_line = lines[num_lines-2-i]
num_epochs = i
columns = header_line.split()
df = pd.DataFrame(columns=columns)
for i in range(num_epochs):
df.loc[i] = [float(x) for x in lines[num_lines-num_epochs+i].split()]
return df
def plot_data_dict(data_dict_list, result_file_name, xlabel='x', ylabel='y', yscale='auto', xlim=None, ylim=None):
# change figure file type
#filetype = result_file_name.split('.')[-1]
#matplotlib.use(filetype)
# define matplotlib parameters
markers = ['s','D','X','v','^','P','X', 'p', 'o']
fig = plt.figure(figsize=(10,8), dpi=300)
ax = fig.add_subplot(1,1,1)
ymin = float('Inf')
ymax = float('-Inf')
if isinstance(data_dict_list, dict):
data_dict_list = [data_dict_list]
k = 0
for data_dict in data_dict_list:
marker = markers[k % 8]
y = np.asarray(data_dict['y'])
if 'x' in data_dict.keys():
x = np.asarray(data_dict['x'])
else:
x = np.array([x for x in range(len(y))])
if 'label' not in data_dict:
data_dict['label'] = 'exp %d' % k
# check how many non-nan values
count = 0
for y_value in y:
if np.isnan(y_value) == False:
count += 1
markevery = int(np.ceil(count / 40.))
ax.plot(x, y, marker=marker, label=data_dict['label'], alpha=0.7,
markevery=markevery)
if y.max() > ymax:
ymax = y.max()
if y.min() < ymin:
ymin = y.min()
k += 1
if len(data_dict_list) > 10:
ax.legend(loc='center left', bbox_to_anchor=(1.04,0.5), fontsize=16)
else:
ax.legend(loc='best')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if yscale == 'log' or (yscale == 'auto' and (abs(ymax/(ymin+1e-8)) >= 100 and ymin > 0)):
ax.set_yscale('log')
plt.grid()
plt.savefig(result_file_name, bbox_inches='tight')
if __name__ == "__main__":
log2df('logs/VGG16BN_FP8_TD_4_0.0_0.0_0.9375_0.99_5.0.log')
|
StarcoderdataPython
|
248115
|
from jsonschema import validate
from pkg_trainmote import libInstaller
import os.path
import json
class Validator:
def validateDict(self, json, name: str):
schema = self.get_schema(name)
if schema is not None:
try:
validate(instance=json, schema=schema)
return True
except Exception as e:
print(e)
return False
def get_schema(self, name: str):
path = "{}/schemes/{}.json".format(os.path.dirname(libInstaller.__file__), name)
try:
with open(path, 'r') as file:
schema = json.load(file)
return schema
except Exception as e:
print(e)
return None
|
StarcoderdataPython
|
3359088
|
from numpy import *
from nn.base import NNBase
from nn.math import softmax, make_onehot
from misc import random_weight_matrix
##
# Evaluation code; do not change this
##
from sklearn import metrics
def full_report(y_true, y_pred, tagnames):
cr = metrics.classification_report(y_true, y_pred, target_names=tagnames)
print cr
def eval_performance(y_true, y_pred, tagnames):
pre, rec, f1, support = metrics.precision_recall_fscore_support(y_true, y_pred)
print "=== Performance (omitting 'O' class) ==="
print "Mean precision: %.02f%%" % (100*sum(pre[1:] * support[1:])/sum(support[1:]))
print "Mean recall: %.02f%%" % (100*sum(rec[1:] * support[1:])/sum(support[1:]))
print "Mean F1: %.02f%%" % (100*sum(f1[1:] * support[1:])/sum(support[1:]))
def compute_f1(y_true, y_pred, tagnames):
_, _, f1, support = metrics.precision_recall_fscore_support(y_true, y_pred)
return 100*sum(f1[1:] * support[1:])/sum(support[1:])
##
# Implement this!
##
class WindowMLP(NNBase):
"""Single hidden layer, plus representation learning."""
def __init__(self, wv, windowsize=3, dims=[None, 100, 5], reg=0.001, alpha=0.01, rseed=10):
"""
Initialize classifier model.
Arguments:
wv : initial word vectors (array |V| x n)
note that this is the transpose of the n x |V| matrix L
described in the handout; you'll want to keep it in
this |V| x n form for efficiency reasons, since numpy
stores matrix rows continguously.
windowsize : int, size of context window
dims : dimensions of [input, hidden, output]
input dimension can be computed from wv.shape
reg : regularization strength (lambda)
alpha : default learning rate
rseed : random initialization seed
"""
# Set regularization
self.lreg = float(reg)
self.alpha = alpha # default training rate
self.nclass = dims[2] # number of output classes
self.windowsize = windowsize # size of context window
self.n = wv.shape[1] # dimension of word vectors
dims[0] = windowsize * wv.shape[1] # input dimension
param_dims = dict(
W=(dims[1], dims[0]),
b1=(dims[1],),
U=(dims[2], dims[1]),
b2=(dims[2],),
)
param_dims_sparse = dict(L=wv.shape)
# initialize parameters: don't change this line
NNBase.__init__(self, param_dims, param_dims_sparse)
random.seed(rseed) # be sure to seed this for repeatability!
#### YOUR CODE HERE ####
self.sparams.L = wv.copy() # store own representations
self.params.W = random_weight_matrix(*self.params.W.shape)
self.params.U = random_weight_matrix(*self.params.U.shape)
# self.params.b1 = zeros((dims[1],1)) # done automatically!
# self.params.b2 = zeros((self.nclass,1)) # done automatically!
#### END YOUR CODE ####
def _acc_grads(self, window, label):
"""
Accumulate gradients, given a training point
(window, label) of the format
window = [x_{i-1} x_{i} x_{i+1}] # three ints
label = {0,1,2,3,4} # single int, gives class
Your code should update self.grads and self.sgrads,
in order for gradient_check and training to work.
So, for example:
self.grads.U += (your gradient dJ/dU)
self.sgrads.L[i] = (gradient dJ/dL[i]) # this adds an update for that index
"""
#### YOUR CODE HERE ####
##
# Forward propagation
# build input context
x = self.build_input_context(window)
# first hidden layer
z1 = self.params.W.dot(x) + self.params.b1
a1 = tanh(z1)
# second hidden layer
z2 = self.params.U.dot(a1) + self.params.b2
a2 = softmax(z2)
##
# Backpropagation
# second hidden layer
delta2 = a2 - make_onehot(label, self.nclass)
self.grads.b2 += delta2
self.grads.U += outer(delta2, a1) + self.lreg * self.params.U
# first hidden layer
delta1 = (1.0 - a1**2) * self.params.U.T.dot(delta2)
self.grads.b1 += delta1
self.grads.W += outer(delta1, x) + self.lreg * self.params.W
for j, idx in enumerate(window):
start = j * self.n
stop = (j + 1) * self.n
self.sgrads.L[idx] = self.params.W[:,start:stop].T.dot(delta1)
#### END YOUR CODE ####
def build_input_context(self, window):
x = zeros((self.windowsize * self.n,))
for j, idx in enumerate(window):
start = j * self.n
stop = (j + 1) * self.n
x[start:stop] = self.sparams.L[idx]
return x
def predict_proba(self, windows):
"""
Predict class probabilities.
Should return a matrix P of probabilities,
with each row corresponding to a row of X.
windows = array (n x windowsize),
each row is a window of indices
"""
# handle singleton input by making sure we have
# a list-of-lists
if not hasattr(windows[0], "__iter__"):
windows = [windows]
#### YOUR CODE HERE ####
# doing this first as a loop
n_windows = len(windows)
P = zeros((n_windows,self.nclass))
for i in range(n_windows):
x = self.build_input_context(windows[i])
# first hidden layer
z1 = self.params.W.dot(x) + self.params.b1
a1 = tanh(z1)
# second hidden layer
z2 = self.params.U.dot(a1) + self.params.b2
P[i,:] = softmax(z2)
'''
x = np.zeros((n_windows,self.windowsize * self.n))
for i in range(n):
x[i,:] = self.build_input_context(window[i])
# first hidden layer
z1 = self.params.W.dot(x) + self.params.b1
a1 = np.tanh(z1)
# second hidden layer
z2 = self.params.U.dot(a1) + self.params.b2
a2 = softmax(z2)
'''
#### END YOUR CODE ####
return P # rows are output for each input
def predict(self, windows):
"""
Predict most likely class.
Returns a list of predicted class indices;
input is same as to predict_proba
"""
#### YOUR CODE HERE ####
P = self.predict_proba(windows)
c = argmax(P, axis=1)
#### END YOUR CODE ####
return c # list of predicted classes
def compute_loss(self, windows, labels):
"""
Compute the loss for a given dataset.
windows = same as for predict_proba
labels = list of class labels, for each row of windows
"""
#### YOUR CODE HERE ####
P = self.predict_proba(windows)
N = P.shape[0]
J = -1.0 * sum(log(P[range(N),labels]))
J += (self.lreg / 2.0) * (sum(self.params.W**2.0) + sum(self.params.U**2.0))
#### END YOUR CODE ####
return J
|
StarcoderdataPython
|
228871
|
"""
Hackerrank Problem: https://www.hackerrank.com/challenges/iterables-and-iterators/problem
"""
import itertools
# Read in the inputs which consists of three lines:
# The first line contains the integer N, denoting the length of the list. The next line consists of N space-separated
# lowercase English letters, denoting the elements of the list. The third and the last line of input contains the
# integer K, denoting the number of indices to be selected.
n = int(input())
letters = map(str, input().split(" "))
k = int(input())
# Find the probability that the letter 'a' is in at least one of the K indices
combos = list(itertools.combinations(letters, k))
count = len([i for i in combos if "a" in i])
print(count/len(combos))
|
StarcoderdataPython
|
3440098
|
<gh_stars>0
# -*- coding: utf-8 -*-
__version__ = '0.5.2'
import logging
import requests
#import lxml
import re
import datetime
from bs4 import BeautifulSoup as bs
from parser_exceptions import *
from parser_abc import Parser, Restaurant, Day, Food
from restaurant_urls import UNICA_RESTAURANTS as unica_urls
__foodmenu_list__ = "#content .pad .menu-list"
__foodlists__ = "#content .pad .menu-list .accord"
__opening_times__ = "#content .pad.mod .threecol"
__restaurant_infos__ = "div#maplist ul.append-bottom li.color"
__week_number__ = "#content .pad .head2"
class Unica(Parser):
# @abstractmethod
def __init__(self):
super(Unica, self).__init__("Unica", __version__)
self.logger = logging.getLogger(" {0}".format(__name__))
# @abstractmethod
def parse(self):
parse_results = []
for url in unica_urls:
page = self.load_page(url["url_fi"])
if page == 1:
# page could not be loaded, move on to next url
continue
soup = bs(page.text, 'html.parser')
restaurant, error = self.parse_page(soup, url["url_fi"])
restaurant.restaurant_info["name"] = url["name"]
restaurant.restaurant_info["id"] = url["id"]
restaurant.restaurant_info["chain"] = "unica"
if error:
self.logger.debug("Restaurant foods were not found")
parse_results.append(restaurant)
parse_date = str(datetime.date.today())
return {
"restaurants": parse_results,
"parser_version": self.version,
"parser_name": self.name,
"parse_date": parse_date
}
# @abstractmethod
def parse_page(self, soup, link):
parse_year = datetime.date.today().year
if self.assert_foodlist_exists(soup):
week_number = self.parse_week_number(soup)
weekly_foods = self.parse_foods(soup)
restaurant_info = self.parse_restaurant_info(soup, link)
restaurant = Restaurant(restaurant_info,
weekly_foods,
week_number,
parse_year)
return restaurant, False
else:
week_number = datetime.date.today().isocalendar()[1]
restaurant_info = self.parse_restaurant_info(soup, link)
restaurant = Restaurant(restaurant_info,
[],
week_number,
parse_year)
return restaurant, True
def parse_foods(self, soup):
weekly_foods = {}
week_days = soup.select(__foodlists__)
for index, day in enumerate(week_days):
try:
day_name = self.encode_remove_eol(day.h4.getText())
day_number = index
lunch_elements = day.table.select(".lunch")
diet_elements = day.table.select(".limitations")
price_elements = day.table.select(".price")
try:
alert_element = self.encode_remove_eol(day.table.find(
"span", {"class": "alert"}).getText())
except AttributeError, e:
# alert element not found
alert_element = ""
daily_lunches = [self.encode_remove_eol(x.getText())
for x in lunch_elements]
daily_diets = [self.encode_remove_eol(x.getText())
for x in diet_elements]
daily_prices = [re.findall(r"\d\,\d\d", self.encode_remove_eol(
x.getText())) for x in price_elements]
daily_foods = [Food(name, diets, prices)
for name, diets, prices in zip(daily_lunches,
daily_diets,
daily_prices)]
weekly_foods[str(day_number)] = Day(
day_name, day_number, daily_foods, alert_element)
except Exception, e:
self.logger.exception(e)
return weekly_foods
def parse_opening_times(self, soup):
# contains opening hours
opening_hours_elements = soup.select(__opening_times__)
if len(opening_hours_elements) == 0:
return {}
weekdays = ['ma', 'ti', 'ke', 'to', 'pe', 'la', 'su']
if len(opening_hours_elements) > 1:
for section in opening_hours_elements:
section_title = str(
self.encode_remove_eol(section.h3.get_text()))
if section_title.lower() == 'lounas':
opening_times_element = section
else:
opening_times_element = opening_hours_elements[0]
# sanitize and split the initial string
days_hours = self.parse_opening_data(
opening_times_element.p.get_text())
days_hours = self.encode_split_newline(days_hours)
# apply hotfixes to the data here, as needed
days_hours = map(self.patch_data, days_hours)
self.logger.debug(days_hours)
opening_dates = {}
for elem in days_hours:
elem_days = elem.split(' ')[0]
elem_hours = elem.split(' ')[1]
if len(elem_days) and len(elem_hours):
days = []
if '-' in elem_days:
start_index = weekdays.index(
elem_days.split('-')[0].lower())
end_index = weekdays.index(
elem_days.split('-')[1].lower()) + 1
days.append(weekdays[start_index:end_index])
else:
if '-' in elem_hours:
days.append([elem_days.lower()])
else:
break
elem_hours = self.sanitize_opening_hour(elem_hours)
elem_hours = map(self.parse_hours, elem_hours.split('-'))
for day in days[0]:
if len(day) == 2:
opening_dates[day] = (elem_hours[0], elem_hours[1])
self.logger.debug(opening_dates)
return opening_dates
def parse_opening_data(self, data):
sanitized = data
if len(sanitized):
if data[-1] == ',' or data[-1] == ' ':
sanitized = sanitized[:-1] + '\n'
sanitized = sanitized.replace(' -', '-').replace(', ', '\n')
return sanitized
def parse_hours(self, hours):
parsed = hours
if len(parsed):
if "." not in str(hours):
parsed = hours + ".00"
return parsed
def sanitize_opening_hour(self, data):
sanitized = data
if len(sanitized):
if ' -' in sanitized:
sanitized = sanitized.replace(' -', '-')
if '.-' in sanitized:
sanitized = sanitized.replace('.-', '.00-')
if sanitized[-1] == '.' and sanitized[-1] != '00.':
sanitized = sanitized[:-1]
if ',' in sanitized:
sanitized = sanitized.replace(',', '')
return sanitized
def patch_data(self, data):
sanitized = data
if len(sanitized):
# Macciavelli fix
if 'Lunch' in sanitized:
sanitized = sanitized.replace('Lunch', '').strip()
# NBSP fix
sanitized = sanitized.replace(
'\xc2\xa0', '')
# remove all extra space
sanitized = " ".join(sanitized.split())
return sanitized
def parse_restaurant_info(self, soup, url):
restaurant_elements = soup.select(__restaurant_infos__)
try:
for restaurant in restaurant_elements:
restaurant_url = self.encode_remove_eol(
restaurant.attrs['data-uri'])
if restaurant_url not in url:
pass
else:
address = self.encode_remove_eol(
restaurant.attrs['data-address'])
zip_code = self.encode_remove_eol(
restaurant.attrs['data-zip'])
post_office = self.encode_remove_eol(
restaurant.attrs['data-city'])
longitude = self.encode_remove_eol(
restaurant.attrs['data-longitude'])
latitude = self.encode_remove_eol(
restaurant.attrs['data-latitude'])
opening_times = self.parse_opening_times(
soup)
restaurant_info = {
"address": address,
"zip_code": zip_code,
"post_office": post_office,
"longitude": longitude,
"latitude": latitude,
"opening_times": opening_times
}
return restaurant_info
except Exception, e:
self.logger.exception(e)
def parse_week_number(self, soup):
head_element = soup.select(
__week_number__)[0].getText().encode("utf-8", "ignore")
week_number = int(re.findall(r"\d\d", head_element)[0])
self.logger.debug("week number: " + str(week_number))
return week_number
def assert_foodlist_exists(self, soup):
menu_list = soup.select(__foodmenu_list__)
lunches = soup.select(__foodmenu_list__ + " .lunch")
menu_isnt_empty = len(menu_list) != 0
lunches_arent_empty = len(lunches) != 0
return (menu_isnt_empty and lunches_arent_empty)
def encode_remove_eol(self, text):
try:
return text.encode('utf-8', 'ignore').strip().replace(
'\n', '').replace('\t', '').replace('\r', '')
except UnicodeEncodeError, e:
self.logger.exception(e)
return text
def encode_split_newline(self, text):
try:
return text.encode('utf-8', 'ignore').strip().replace(
'\t', '').replace('\r', '').split('\n')
except UnicodeEncodeError, e:
self.logger.exception(e)
return text
def load_page(self, link):
try:
self.logger.debug(" Loading page " + link + "...")
html = requests.get(link)
self.logger.debug(" Done.")
return html
except RequestException, e:
self.logger.exception(e)
return 1
def __repr__(self):
return "{0} version {1}".format(self.name, __version__)
|
StarcoderdataPython
|
6455047
|
import abc
from enum import Enum
import json
import typing
from typing import List, Dict
# The status of the request.
class RequestStatus(Enum):
# The request failed for any reason, see the response message.
Failed = "Failed"
# The request was success but the token being used on the incoming called is NOT valid. # noqa: E501
TokenInvalid = "TokenInvalid"
# All's well!
Successful = "Successful"
# Event response class that houses attributes returned from the authentication events trigger. # noqa: E501
class _IEventResponse(abc.ABC):
def __init__(self, schema: str = None, body: str = None):
# The schema the of expected response.
self.schema = schema
# A template of the body of the expected response.
self.body = body
if body is not None:
# A JSON representation of the body.
self.jsonBody = json.loads(body)
# A class representing an action for an event.
class _IEventAction(abc.ABC):
def __init__(self, actionType: str):
# Must be overridden, this will be the 'Name' of the action in the JSON. # noqa: E501
self.actionType = actionType
action_type = typing.TypeVar("action_type", bound=_IEventAction)
# Class that binds a response that has actions
class _IActionableResponse(_IEventResponse, typing.Generic[action_type]):
def __init__(
self, actions: List[action_type], schema: str = None, body: str = None
):
super().__init__(schema, body)
# Collections of actions pertaining to the event.
self.actions = actions
# Event data class pertaining to the expected payload, this class houses the common attributes for data events. # noqa: E501
class _IEventData(abc.ABC):
def __init__(
self,
eventListenerId: str = None,
customExtensionId: str = None,
):
# Unique Id for the event.
self.eventListenerId = eventListenerId
# The unique internal Id of the registered custom extension.
self.customExtensionId = customExtensionId
response_type = typing.TypeVar("response_type", bound=_IEventResponse) # noqa: E501
payload_type = typing.TypeVar("payload_type", bound=_IEventData)
# Abstract base event class to house common event request attributes.
class _IEventRequest(abc.ABC, typing.Generic[response_type, payload_type]):
def __init__(
self,
requestStatus: RequestStatus,
response: response_type,
payload: payload_type,
statusMessage: str = None,
queryParameters: Dict[str, str] = None,
):
# A user friendly message (containing errors), that the authentication event returns. # noqa: E501
self.statusMessage = statusMessage
# The status of the current request, see RequestStatus.
self.requestStatus = requestStatus
# Related IEventResponse
self.response = response
# Related IEventData
self.payload = payload
# Related Query Parameters
self.queryParameter = queryParameters
@staticmethod
@abc.abstractmethod
def create_instance(result: dict):
pass
class _ICloudEventRequest(
_IEventRequest,
typing.Generic[response_type, payload_type]):
def __init__(
self,
requestStatus: RequestStatus,
response: response_type,
payload: payload_type,
statusMessage: str = None,
queryParameters: Dict[str, str] = None,
type: str = None,
source: str = None,
time: str = None,
oDataType: str = None,
):
self.type = type
self.source = source
self.time = time
self.oDataType = oDataType
super().__init__(
requestStatus, response, payload, statusMessage, queryParameters
)
# base class extended to ensure objects are serializable.
class _Serializable(abc.ABC):
# method used to create json dict from object.
@abc.abstractmethod
def to_dict(self) -> dict:
pass
# method used to create json string from object.
@abc.abstractmethod
def to_json(self) -> str:
pass
# Constructs a FailedRequest .
class FailedRequest(_IActionableResponse, _Serializable):
# Class method for creating a failed request .
def __init__(self, error: str):
self.error = error
# Create a JSON - serializable representation of the failed request.
@staticmethod
def handle(error: Exception):
return FailedRequest(str(error))
# Converts to object to a dictionary
def to_dict(self) -> dict:
return {"error": self.error}
# Returns a string representation of the failed request.
def to_json(self) -> str:
return json.dumps(self.to_dict())
|
StarcoderdataPython
|
12843738
|
<reponame>guilhermebaos/Advent-of-Code-Solutions
# Puzzle Input ----------
with open('Day06-Input.txt', 'r') as file:
puzzle = list(map(int, file.read().split(',')))
with open('Day06-Test01.txt', 'r') as file:
test01 = list(map(int, file.read().split(',')))
# Main Code ----------
# Count the first few fish and organize them by age in a dictionary
def count_first_fish(fish_list: list):
fish_per_age = dict()
for fish in fish_list:
fish_per_age[fish] = fish_per_age.get(fish, 0) + 1
return fish_per_age
# See how many fish there are after n days at sea
def fish_in_n_days(fish_list: list, n: int):
fish_per_age = count_first_fish(fish_list)
# Simulate each day
for _ in range(n):
new_fish_per_age = dict()
for age in fish_per_age:
# Make fish reproduce and create new fish
if age == 0:
new_fish_per_age[8] = fish_per_age[age]
new_fish_per_age[6] = new_fish_per_age.get(6, 0) + fish_per_age[age]
# Decrease the timer in fish by 1
else:
new_fish_per_age[age - 1] = new_fish_per_age.get(age - 1, 0) + fish_per_age[age]
fish_per_age = new_fish_per_age.copy()
# Return the total number of fish
return sum(fish_per_age.values())
# Tests and Solution ----------
print(fish_in_n_days(test01, 256))
print(fish_in_n_days(puzzle, 256))
|
StarcoderdataPython
|
9779021
|
<reponame>pbierkortte/Example-Data-Driven-Webapp
import json
import unittest
from src.crawler import Crawler
file_input_intercepted = open("test_data/input_intercepted.json")
input_intercepted = json.load(file_input_intercepted)
file_input_intercepted.close()
file_daily_clicks_by_country = open("test_data/output_avg_daily_clicks_by_country.json")
expected_daily_clicks_by_country = json.load(file_daily_clicks_by_country)
file_daily_clicks_by_country.close()
class MockCrawler(Crawler):
def __init__(self):
super().__init__()
def get_data(self, url, params: dict) -> dict:
request = {
"params": params,
"url": url,
}
response = list(filter(lambda rec: str(rec["request"]) == str(request), input_intercepted))[0][
"response"]
return response
class TestCrawler(unittest.TestCase):
def test_avg_daily_clicks_by_country(self):
crawler = MockCrawler()
result = crawler.avg_daily_clicks_by_country()
self.assertEqual(result, expected_daily_clicks_by_country)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
11358808
|
<reponame>zzzDavid/heterocl<filename>python/heterocl/platforms.py
import os, subprocess, json, time, sys
from .devices import Platform, CPU, FPGA, PIM, Project
from .devices import HBM, PLRAM, LUTRAM, BRAM, URAM
from .tools import *
class AWS_F1(Platform):
def __init__(self):
name = "aws_f1"
devs = [
CPU("intel", "e5"),
FPGA("xilinx", "xcvu19p")
]
host = devs[0].set_backend("xocl")
xcel = devs[1].set_backend("vhls")
tool = Tool.vitis
self.AMI_ID = "ami-0a7b98fdb062be15f"
self.XPFM = "xilinx_aws-vu9p-f1_shell-v04261818_201920_2.xpfm"
self.cache = None
self.tool = tool
# attach supported memory modules
off_chip_mem = {
"HBM": HBM,
"PLRAM": PLRAM
}
for memory, memory_class in off_chip_mem.items():
host.storage[memory] = memory_class()
xcel.storage[memory] = memory_class()
on_chip_mem = {
"URAM": URAM,
"BRAM": BRAM,
"LUTRAM": LUTRAM
}
for memory, memory_class in on_chip_mem.items():
xcel.storage[memory] = memory_class()
super(AWS_F1, self).__init__(name, devs, host, xcel, tool)
class XILINX_ZC706(Platform):
def __init__(self):
name = "zc706"
devs = [
CPU("arm", "a9"),
FPGA("xilinx", "xc7z045")
]
host = devs[0].set_backend("vhls")
xcel = devs[1].set_backend("vhls")
tool = Tool.vivado_hls
on_chip_mem = {
"URAM": URAM,
"BRAM": BRAM,
"LUTRAM": LUTRAM
}
for memory, memory_class in on_chip_mem.items():
xcel.storage[memory] = memory_class()
super(XILINX_ZC706, self).__init__(name, devs, host, xcel, tool)
class INTEL_VLAB(Platform):
def __init__(self):
name = "vlab"
devs = [
CPU("intel", "e5"),
FPGA("intel", "arria10")
]
host = devs[0].set_backend("aocl")
xcel = devs[1].set_backend("aocl")
tool = Tool.aocl
super(INTEL_VLAB, self).__init__(name, devs, host, xcel, tool)
class CADENCE_STRATUS(Platform):
# TODO(Niansong): check details for stratus platform
def __init__(self):
name = "cadence_stratus"
devs = [
CPU("intel", "i7"),
FPGA("xilinx", "xc7z045")
] # placeholders
host = devs[0].set_backend("shls")
xcel = devs[1].set_backend("shls")
tool = Tool.stratus_hls
super(CADENCE_STRATUS, self).__init__(
name, devs, host, xcel, tool
)
Platform.aws_f1 = AWS_F1()
Platform.xilinx_zc706 = XILINX_ZC706()
Platform.intel_vlab = INTEL_VLAB()
Platform.cadence_stratus = CADENCE_STRATUS()
|
StarcoderdataPython
|
9655062
|
from typing import Iterable, Set
ALL_SEGS = set(("a", "b", "c", "d", "e", "f", "g"))
class Display:
"""
0: 1: 2: 3: 4:
aaaa .... aaaa aaaa ....
b c . c . c . c b c
b c . c . c . c b c
.... .... dddd dddd dddd
e f . f e . . f . f
e f . f e . . f . f
gggg .... gggg gggg ....
5: 6: 7: 8: 9:
aaaa aaaa aaaa aaaa aaaa
b . b . . c b c b c
b . b . . c b c b c
dddd dddd .... dddd dddd
. f e f . f e f . f
. f e f . f e f . f
gggg gggg .... gggg gggg
"""
a: Set[str]
b: Set[str]
c: Set[str]
d: Set[str]
e: Set[str]
f: Set[str]
g: Set[str]
def __init__(self) -> None:
self.a = set()
self.b = set()
self.c = set()
self.d = set()
self.e = set()
self.f = set()
self.g = set()
def refine(self, digit: str) -> None:
"""
aaaa
b c
b c
dddd
e f
e f
gggg
"""
numseg = len(digit)
if numseg == 5:
return
chars = sorted(digit)
charset = set(chars)
if numseg == 2:
self.c |= charset
self.f |= charset
elif numseg == 3:
for char in chars:
if char not in self.c and char not in self.f:
self.a.add(char)
break
elif numseg == 4:
for char in chars:
if char not in self.c and char not in self.f:
self.b.add(char)
self.d.add(char)
elif numseg == 6:
missing = (ALL_SEGS - charset).pop()
if missing in self.c:
# 6
self.f.remove(missing)
self.c -= self.f
elif missing in self.d:
# 0
self.b.remove(missing)
self.d -= self.b
else:
self.e.add(missing)
elif numseg == 7:
self.g = ALL_SEGS - (self.a | self.b | self.c | self.d | self.e | self.f)
def classify(self, digit: str) -> int:
numseg = len(digit)
charset = set(digit)
if numseg == 2:
return 1
elif numseg == 3:
return 7
elif numseg == 4:
return 4
elif numseg == 5:
BASE = self.a | self.d | self.g
TWO = BASE | self.c | self.e
if charset == TWO:
return 2
THREE = BASE | self.c | self.f
if charset == THREE:
return 3
FIVE = BASE | self.b | self.f
if charset == FIVE:
return 5
elif numseg == 6:
BASE = self.a | self.b | self.f | self.g
ZERO = BASE | self.c | self.e
if charset == ZERO:
return 0
SIX = BASE | self.d | self.e
if charset == SIX:
return 6
NINE = BASE | self.c | self.d
if charset == NINE:
return 9
else:
return 8
raise ValueError("oh no")
def __str__(self) -> str:
return (
f"a: {self.a} b: {self.b} c: {self.c}"
f" d: {self.d} e: {self.e} f: {self.f} g: {self.g}"
)
def main(data: Iterable[str]):
res = 0
for line in data:
display = Display()
unique, output = line.split(" | ")
for digit in sorted(unique.split(" "), key=lambda a: len(a)):
display.refine(digit)
out_digits = ""
for digit in output.split(" "):
out_digits += str(display.classify(digit))
print(out_digits)
res += int(out_digits)
print(res)
|
StarcoderdataPython
|
1847205
|
<filename>libs/svn/nxpy/svn/_test/test_svnadmin.py
# nxpy_svn --------------------------------------------------------------------
# Copyright <NAME> 2010 - 2018
# Use, modification, and distribution are subject to the Boost Software
# License, Version 1.0. (See accompanying file LICENSE.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# See https://github.com/nmusatti/nxpy/tree/master/libs/svn. ------------------
r"""
test_svnadmin.py - tests for the svnadmin module
"""
# Python 2.5 compatibility
from __future__ import with_statement
from __future__ import absolute_import
import os.path
import nxpy.core.temp_file
import nxpy.svn.svnadmin
import nxpy.svn.svn
import nxpy.test.test
class SvnAdminTest(nxpy.test.test.TestCase):
def test_pass(self):
with nxpy.core.temp_file.TempDir(prefix="test_svnadmin_") as d:
path = os.path.join(d.name, "repo")
nxpy.svn.svnadmin.SvnAdmin().create(path)
url = "file:///" + path.replace(os.sep, "/").lstrip("/")
info = nxpy.svn.svn.Svn().info(url)
self.assertEqual(url.lower(), info.url.lower())
|
StarcoderdataPython
|
3292687
|
<filename>tests/orca_unit_testing/test_combining_merge.py
import unittest
import orca
import os.path as path
from setup.settings import *
from pandas.util.testing import *
class Csv:
pdf_csv_left = None
pdf_csv_right = None
odf_csv_left = None
odf_csv_right = None
class MergeTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# configure data directory
DATA_DIR = path.abspath(path.join(__file__, "../setup/data"))
left_fileName = 'test_merge_left_table.csv'
right_fileName = 'test_merge_right_table.csv'
data_left = os.path.join(DATA_DIR, left_fileName)
data_left = data_left.replace('\\', '/')
data_right = os.path.join(DATA_DIR, right_fileName)
data_right = data_right.replace('\\', '/')
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
# import
Csv.odf_csv_left = orca.read_csv(data_left)
Csv.pdf_csv_left = pd.read_csv(data_left, parse_dates=[0, 1])
Csv.odf_csv_right = orca.read_csv(data_right)
Csv.pdf_csv_right = pd.read_csv(data_right)
@property
def odf_csv_left(self):
return Csv.odf_csv_left
@property
def odf_csv_right(self):
return Csv.odf_csv_right
@property
def pdf_csv_left(self):
return Csv.pdf_csv_left
@property
def pdf_csv_right(self):
return Csv.pdf_csv_right
@property
def odf_csv_left_index(self):
return Csv.odf_csv_left.set_index("type")
@property
def odf_csv_right_index(self):
return Csv.odf_csv_right.set_index("type")
@property
def pdf_csv_left_index(self):
return Csv.pdf_csv_left.set_index("type")
@property
def pdf_csv_right_index(self):
return Csv.pdf_csv_right.set_index("type")
@property
def pdf_series_right(self):
return Csv.pdf_series_left
@property
def odf_left_small(self):
return orca.DataFrame({"type": [1, 2], "val": [1, 2]}, index=[1, 2])
@property
def pdf_left_small(self):
return pd.DataFrame({"type": [1, 2], "val": [1, 2]}, index=[1, 2])
@property
def odf_left_small_index(self):
return self.odf_left_small.set_index("type")
@property
def pdf_left_small_index(self):
return self.pdf_left_small.set_index("type")
@property
def odf_right_small(self):
return orca.DataFrame({"type": [2, 3], "vol": [3, 4]}, index=[1, 2])
@property
def pdf_right_small(self):
return pd.DataFrame({"type": [2, 3], "vol": [3, 4]}, index=[1, 2])
@property
def odf_right_small_index(self):
return self.odf_right_small.set_index("type")
@property
def pdf_right_small_index(self):
return self.pdf_right_small.set_index("type")
def test_assert_original_dataframe_equal(self):
assert_frame_equal(self.odf_csv_left.to_pandas(), self.pdf_csv_left, check_dtype=False)
assert_frame_equal(self.odf_csv_right.to_pandas(), self.pdf_csv_right, check_dtype=False)
assert_frame_equal(self.odf_csv_left_index.to_pandas(), self.pdf_csv_left_index, check_dtype=False)
assert_frame_equal(self.odf_csv_right_index.to_pandas(), self.pdf_csv_right_index, check_dtype=False)
def test_merge_from_csv_param_suffix(self):
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, on="type", suffixes=('_left', '_right'))
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, on="type", suffixes=('_left', '_right'))
# TODO: PARTITIONED TABLE IS IN RANDOM ORDER
# assert_frame_equal(odf_merge.sort_values("date").to_pandas(), pdf_merge, check_dtype=False, check_like=False)
def test_merge_from_csv_param_how(self):
# how = left
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, how="left", on="type")
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, how="left", on="type")
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False)
# how = right
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, how="right", on="type")
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, how="right", on="type")
# TODO: PARTITIONED TABLE IS IN RANDOM ORDER
# assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
# default how = inner
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, how="inner", on="type")
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, how="inner", on="type")
# TODO: PARTITIONED TABLE IS IN RANDOM ORDER
# assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
# how = outer
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, how="outer", on="type")
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, how="outer", on="type")
# TODO: PARTITIONED TABLE IS IN RANDOM ORDER
# assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
def test_merge_from_csv_param_on(self):
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, on="type")
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, on="type")
# TODO: PARTITIONED TABLE IS IN RANDOM ORDER
# assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
def test_merge_from_csv_param_leftonrighton(self):
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, left_on="type", right_on="type")
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, left_on="type", right_on="type")
# TODO: PARTITIONED TABLE IS IN RANDOM ORDER
# assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
def test_merge_from_csv_param_index(self):
odf_merge = self.odf_csv_left_index.merge(self.odf_csv_right_index, left_index=True, right_index=True)
pdf_merge = self.pdf_csv_left_index.merge(self.pdf_csv_right_index, left_index=True, right_index=True)
assert_frame_equal(odf_merge.sort_index().to_pandas(), pdf_merge, check_dtype=False)
def test_merge_from_csv_index_param_suffix(self):
odf_merge = self.odf_csv_left_index.merge(self.odf_csv_right_index, left_index=True, right_index=True,
suffixes=('_left', '_right'))
pdf_merge = self.pdf_csv_left_index.merge(self.pdf_csv_right_index, left_index=True, right_index=True,
suffixes=('_left', '_right'))
assert_frame_equal(odf_merge.sort_index().to_pandas(), pdf_merge, check_dtype=False)
def test_merge_from_csv_index_param_on(self):
odf_merge = self.odf_csv_left.merge(self.odf_csv_right_index, left_on="type", right_index=True)
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right_index, left_on="type", right_index=True)
assert_frame_equal(odf_merge.sort_values(by=["id", "value"]).to_pandas(),
pdf_merge.sort_values(by=["id", "value"]), check_dtype=False, check_like=False)
odf_merge = self.odf_csv_left_index.merge(self.odf_csv_right, right_on="type", left_index=True)
pdf_merge = self.pdf_csv_left_index.merge(self.pdf_csv_right, right_on="type", left_index=True)
assert_frame_equal(odf_merge.sort_values(by=["id", "value"]).to_pandas(),
pdf_merge.sort_values(by=["id", "value"]), check_dtype=False, check_like=False)
def test_merge_small_param_on(self):
odf_merge = self.odf_left_small.merge(self.odf_right_small_index, left_on="type", right_index=True)
pdf_merge = self.pdf_left_small.merge(self.pdf_right_small_index, left_on="type", right_index=True)
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
# odf_merge = self.odf_left_small.merge(self.odf_right_small, left_on="type", right_index=True)
# pdf_merge = self.pdf_left_small.merge(self.pdf_right_small, left_on="type", right_index=True)
# assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
odf_merge = self.odf_left_small_index.merge(self.odf_right_small, right_on="type", left_index=True)
pdf_merge = self.pdf_left_small_index.merge(self.pdf_right_small, right_on="type", left_index=True)
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
def test_merge_from_csv_index_param_how(self):
# how = left
odf_merge = self.odf_csv_left_index.merge(self.odf_csv_right_index, how="left", left_index=True,
right_index=True)
pdf_merge = self.pdf_csv_left_index.merge(self.pdf_csv_right_index, how="left", left_index=True,
right_index=True)
assert_frame_equal(odf_merge.sort_index().to_pandas(), pdf_merge, check_dtype=False)
# how = right
odf_merge = self.odf_csv_left_index.merge(self.odf_csv_right_index, how="right", left_index=True,
right_index=True)
pdf_merge = self.pdf_csv_left_index.merge(self.pdf_csv_right_index, how="right", left_index=True,
right_index=True)
assert_frame_equal(odf_merge.sort_index().to_pandas(), pdf_merge, check_dtype=False)
# default how = inner
odf_merge = self.odf_csv_left_index.merge(self.odf_csv_right_index, left_index=True, right_index=True)
pdf_merge = self.pdf_csv_left_index.merge(self.pdf_csv_right_index, left_index=True, right_index=True)
assert_frame_equal(odf_merge.sort_index().to_pandas(), pdf_merge, check_dtype=False)
# how = outer
odf_merge = self.odf_csv_left_index.merge(self.odf_csv_right_index, how="outer", left_index=True,
right_index=True)
pdf_merge = self.pdf_csv_left_index.merge(self.pdf_csv_right_index, how="outer", left_index=True,
right_index=True)
assert_frame_equal(odf_merge.sort_index().to_pandas(), pdf_merge, check_dtype=False)
def test_merge_from_csv_param_suffix_param_how(self):
# how = left
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, how="left", on="type", suffixes=('_left', '_right'))
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, how="left", on="type", suffixes=('_left', '_right'))
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
# how = right
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, how="right", on="type", suffixes=('_left', '_right'))
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, how="right", on="type", suffixes=('_left', '_right'))
# TODO: PARTITIONED TABLE IS IN RANDOM ORDER
# assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
# default how = inner
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, on="type", suffixes=('_left', '_right'))
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, on="type", suffixes=('_left', '_right'))
# TODO: PARTITIONED TABLE IS IN RANDOM ORDER
# assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
# how = outer
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, how="outer", on="type", suffixes=('_left', '_right'))
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, how="outer", on="type", suffixes=('_left', '_right'))
# TODO: PARTITIONED TABLE IS IN RANDOM ORDER
# assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
def test_merge_from_csv_param_how_param_leftonrighton(self):
# how = left
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, how="left", left_on="type", right_on="type")
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, how="left", left_on="type", right_on="type")
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
# how = right
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, how="right", left_on="type", right_on="type")
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, how="right", left_on="type", right_on="type")
# TODO: PARTITIONED TABLE IS IN RANDOM ORDER
# assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
# default how = inner
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, left_on="type", right_on="type")
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, left_on="type", right_on="type")
# TODO: PARTITIONED TABLE IS IN RANDOM ORDER
# assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
# how = outer
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, how="outer", left_on="type", right_on="type")
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, how="outer", left_on="type", right_on="type")
# TODO: PARTITIONED TABLE IS IN RANDOM ORDER
# assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
def test_merge_from_csv_index_param_suffix_param_how(self):
# how = left
odf_merge = self.odf_csv_left_index.merge(self.odf_csv_right_index, how="left", left_index=True,
right_index=True, suffixes=('_left', '_right'))
pdf_merge = self.pdf_csv_left_index.merge(self.pdf_csv_right_index, how="left", left_index=True,
right_index=True, suffixes=('_left', '_right'))
assert_frame_equal(odf_merge.sort_index().to_pandas(), pdf_merge, check_dtype=False, check_like=True)
# how = right
odf_merge = self.odf_csv_left_index.merge(self.odf_csv_right_index, how="right", left_index=True,
right_index=True, suffixes=('_left', '_right'))
pdf_merge = self.pdf_csv_left_index.merge(self.pdf_csv_right_index, how="right", left_index=True,
right_index=True, suffixes=('_left', '_right'))
assert_frame_equal(odf_merge.sort_index().to_pandas(), pdf_merge, check_dtype=False, check_like=False)
# default how = inner
odf_merge = self.odf_csv_left_index.merge(self.odf_csv_right_index, left_index=True, right_index=True,
suffixes=('_left', '_right'))
pdf_merge = self.pdf_csv_left_index.merge(self.pdf_csv_right_index, left_index=True, right_index=True,
suffixes=('_left', '_right'))
assert_frame_equal(odf_merge.sort_index().to_pandas(), pdf_merge, check_dtype=False, check_like=False)
# how = outer
odf_merge = self.odf_csv_left_index.merge(self.odf_csv_right_index, how="outer", left_index=True,
right_index=True, suffixes=('_left', '_right'))
pdf_merge = self.pdf_csv_left_index.merge(self.pdf_csv_right_index, how="outer", left_index=True,
right_index=True, suffixes=('_left', '_right'))
assert_frame_equal(odf_merge.sort_index().to_pandas(), pdf_merge, check_dtype=False, check_like=False)
def test_merge_from_dataframe_param_suffix(self):
odf = orca.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'], 'A': [1, 2, 3, 4, 5, 6]})
odf_other = orca.DataFrame({'key': ['K0', 'K1', 'K2'], 'B': [11, 22, 33]})
odf_merge = odf.merge(odf_other, on="key", suffixes=('_left', '_right'))
pdf = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'], 'A': [1, 2, 3, 4, 5, 6]})
pdf_other = pd.DataFrame({'key': ['K0', 'K1', 'K2'], 'B': [11, 22, 33]})
pdf_merge = pdf.merge(pdf_other, on="key", suffixes=('_left', '_right'))
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False)
def test_merge_from_dataframe_param_leftonrighton(self):
odf = orca.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'], 'A': [1, 2, 3, 4, 5, 6]})
odf_other = orca.DataFrame({'key': ['K0', 'K1', 'K2'], 'B': [11, 22, 33]})
odf_merge = odf.merge(odf_other, left_on="key", right_on="key")
pdf = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'], 'A': [1, 2, 3, 4, 5, 6]})
pdf_other = pd.DataFrame({'key': ['K0', 'K1', 'K2'], 'B': [11, 22, 33]})
pdf_merge = pdf.merge(pdf_other, left_on="key", right_on="key")
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False)
def test_merge_from_dataframe_how(self):
odf = orca.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'], 'A': [1, 2, 3, 4, 5, 6]})
odf_other = orca.DataFrame({'key': ['K0', 'K1', 'K2'], 'B': [11, 22, 33]})
pdf = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'], 'A': [1, 2, 3, 4, 5, 6]})
pdf_other = pd.DataFrame({'key': ['K0', 'K1', 'K2'], 'B': [11, 22, 33]})
# how = left
odf_merge = odf.merge(odf_other, how="left", on='key')
pdf_merge = pdf.merge(pdf_other, how="left", on="key")
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False)
# how = right
odf_merge = odf.merge(odf_other, how="right", on='key')
pdf_merge = pdf.merge(pdf_other, how="right", on="key")
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False)
# how = inner
odf_merge = odf.merge(odf_other, how="inner", on='key')
pdf_merge = pdf.merge(pdf_other, how="inner", on='key')
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False)
# how = outer
odf_merge = odf.merge(odf_other, how="outer", on='key')
pdf_merge = pdf.merge(pdf_other, how="outer", on='key')
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False)
def test_merge_from_dataframe_index(self):
orca_left = orca.DataFrame({'A': [1, 2, 3], 'B': [11, 22, 33]}, index=['K0', 'K1', 'K2'])
orca_right = orca.DataFrame({'C': [111, 222, 333], 'D': [1111, 2222, 3333]}, index=['K0', 'K2', 'K3'])
odf_merge = orca_left.merge(orca_right, left_index=True, right_index=True)
pd_left = pd.DataFrame({'A': [1, 2, 3], 'B': [11, 22, 33]}, index=['K0', 'K1', 'K2'])
pd_right = pd.DataFrame({'C': [111, 222, 333], 'D': [1111, 2222, 3333]}, index=['K0', 'K2', 'K3'])
pdf_merge = pd_left.merge(pd_right, left_index=True, right_index=True)
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False)
def test_merge_from_dataframe_index_param_how(self):
orca_left = orca.DataFrame({'A': [1, 2, 3], 'B': [11, 22, 33]}, index=['K0', 'K1', 'K2'])
orca_right = orca.DataFrame({'C': [111, 222, 333], 'D': [1111, 2222, 3333]}, index=['K0', 'K2', 'K3'])
pd_left = pd.DataFrame({'A': [1, 2, 3], 'B': [11, 22, 33]}, index=['K0', 'K1', 'K2'])
pd_right = pd.DataFrame({'C': [111, 222, 333], 'D': [1111, 2222, 3333]}, index=['K0', 'K2', 'K3'])
# by default, how = left
# how = right
odf_merge = orca_left.merge(orca_right, how="right", left_index=True, right_index=True)
pdf_merge = pd_left.merge(pd_right, how="right", left_index=True, right_index=True)
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False)
# how = inner
odf_merge = orca_left.merge(orca_right, how="inner", left_index=True, right_index=True)
pdf_merge = pd_left.merge(pd_right, how="inner", left_index=True, right_index=True)
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False)
# how = outer
odf_merge = orca_left.merge(orca_right, how="outer", left_index=True, right_index=True)
pdf_merge = pd_left.merge(pd_right, how="outer", left_index=True, right_index=True)
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
11259014
|
<reponame>sorhus/tensorflow<filename>tensorflow/python/kernel_tests/accumulate_n_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for new version of accumulate_n op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class AccumulateNV2Test(test_util.TensorFlowTestCase):
"""Tests of the new, differentiable version of accumulate_n."""
def testFloat(self):
np.random.seed(12345)
x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(5)]
tf_x = ops.convert_n_to_tensor(x)
with self.test_session(use_gpu=True):
self.assertAllClose(sum(x), math_ops.accumulate_n(tf_x).eval())
self.assertAllClose(x[0] * 5,
math_ops.accumulate_n([tf_x[0]] * 5).eval())
def testInt(self):
np.random.seed(54321)
x = [np.random.randint(-128, 128, (5, 4, 3, 2, 1)) for _ in range(6)]
tf_x = ops.convert_n_to_tensor(x)
with self.test_session(use_gpu=True):
self.assertAllEqual(sum(x), math_ops.accumulate_n(tf_x).eval())
self.assertAllEqual(x[0] * 6,
math_ops.accumulate_n([tf_x[0]] * 6).eval())
def testGrad(self):
np.random.seed(42)
for num_inputs in range(1, 10):
with self.test_session(use_gpu=True) as sess:
input_vars = [
variables.Variable(10.0 * np.random.random())
for _ in range(0, num_inputs)
]
accum_n = math_ops.accumulate_n(input_vars)
sess.run(variables.global_variables_initializer())
accum_n_grad = gradients.gradients(accum_n, input_vars)
self.assertAllEqual(
np.repeat(1.0, num_inputs), # d/dx (x + y + ...) = 1
[g.eval() for g in accum_n_grad])
# The tests below used to be in a separate class under cwise_ops_test.py,
# which did not run in the default test target.
# Putting them here so that everything that exercises AccumulateNV2 is in
# one place and the default build runs all unit tests.
def testSimple(self):
with self.test_session():
random_arrays = [
np.random.rand(16, 16, 16, 16).astype(np.float32) for _ in range(20)
]
random_tensors = [
ops.convert_to_tensor(x, dtype=dtypes_lib.float32)
for x in random_arrays
]
tf_val = math_ops.accumulate_n(random_tensors)
np_val = random_arrays[0]
for random_array in random_arrays[1:]:
np_val += random_array
self.assertAllClose(np_val, tf_val.eval())
def testZeroArgs(self):
with self.test_session():
with self.assertRaises(ValueError):
tf_val = math_ops.accumulate_n([])
tf_val.eval()
def testWrongShape(self):
with self.test_session():
with self.assertRaises(ValueError):
a = variables.Variable(0.2)
b = variables.Variable(0.1)
math_ops.accumulate_n([a, b], shape=[2, 2]) # Should be shape=[]
def testIncompatibleShapes(self):
with self.test_session():
with self.assertRaises(ValueError):
a = variables.Variable(np.array([0.1, 0.2]))
b = variables.Variable(np.array([[0.3], [0.4]]))
math_ops.accumulate_n([a, b])
def testWrongType(self):
with self.test_session():
with self.assertRaises(TypeError):
a = variables.Variable(0.2, dtype=np.float32)
b = variables.Variable(0.1, dtype=np.float32)
math_ops.accumulate_n([a, b], tensor_dtype=np.int32)
def testWrongTypeOneInput(self):
# Scenario that used to trigger a bug, even when testWrongType() worked
with self.test_session():
with self.assertRaises(TypeError):
a = variables.Variable(0.2, dtype=np.float32)
math_ops.accumulate_n([a], tensor_dtype=np.int32)
if __name__ == "__main__":
googletest.main()
|
StarcoderdataPython
|
12823255
|
# coding=utf-8
"""
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import print_function
import struct
from binaryninja.architecture import Architecture
from binaryninja.types import Symbol
from binaryninja.function import InstructionInfo, InstructionTextTokenType, InstructionTextToken
try:
from m68k import M68000, OpImmediate
except ModuleNotFoundError:
import sys
import os
import binaryninja
sys.path.append(os.path.join(binaryninja.user_plugin_path(), "..", "repositories", "community", "plugins"))
from wrigjl_binaryninjam68k import M68000, OpImmediate
COPPER_INSTRUCTIONS = [ 'CMOVE', 'CSKIP', 'CWAIT', 'CEND' ]
CEND = 0xFFFFFFFE
#class A500(M68000):
class A500(Architecture):
name = "A500"
# Sizes
SIZE_BYTE = 0
SIZE_WORD = 1
SIZE_LONG = 2
# BROKEN
def perform_get_instruction_info(self, data, addr):
instr, length, _size, _source, dest, _third = self.decode_instruction(data)
if instr == 'unimplemented':
return None
result = InstructionInfo()
result.length = length
if instr in COPPER_INSTRUCTIONS:
conditional = False
branch_dest = None
return result
else:
return None
def perform_get_instruction_low_level_il(self, data, addr, il):
instr, length, size, source, dest, third = self.decode_instruction(data)
if instr is not None:
if source is not None:
pre_il = source.get_pre_il(il)
if pre_il is not None:
il.append(pre_il)
self.generate_instruction_il(il, instr, length, size, source, dest, third)
if source is not None:
post_il = source.get_post_il(il)
if post_il is not None:
il.append(post_il)
else:
il.append(il.unimplemented())
return length
def generate_instruction_il(self, il, instr, length, size, source, dest, third):
size_bytes = None
if size is not None:
size_bytes = 1 << size
if instr == 'CWAIT':
if source is not None:
il.append(source.get_source_il(il))
elif instr == 'CSKIP':
if source is not None:
il.append(source.get_source_il(il))
elif instr == 'CEND':
if source is not None:
il.append(source.get_source_il(il))
elif instr == 'CMOVE':
if source is not None:
il.append(source.get_source_il(il))
else:
il.append(il.uninplemented())
# BROKEN
def perform_get_instruction_text(self, data, addr):
instr, length, _size, source, dest, third = self.decode_instruction(data)
#print("perform_get_instruction_text: %s" % instr)
if instr == 'unimplemented':
return None
if instr in COPPER_INSTRUCTIONS:
#if size is not None:
# instr += SizeSuffix[size]
tokens = [InstructionTextToken(InstructionTextTokenType.InstructionToken, "%-10s" % instr)]
if source is not None:
tokens += source.format(addr)
if dest is not None:
if source is not None:
tokens += [InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ',')]
tokens += dest.format(addr)
if third is not None:
if source is not None or dest is not None:
tokens += [InstructionTextToken(InstructionTextTokenType.OperandSeparatorToken, ',')]
tokens += third.format(addr)
return tokens, length
else:
return None, None
# Yay, fixed!
def decode_instruction(self, data):
error_value = ('unimplemented', len(data), None, None, None, None)
instr = None
length = None
size = None
source = None
dest = None
third = None
if len(data) < 4:
return error_value
instruction = struct.unpack_from('>L', data)[0]
if instruction == CEND:
instr = 'CEND'
size = 4
length = 4
return instr, length, size, source, dest, third
#msb = instruction >> 8
#opcode = msb >> 4
instr_type = instruction & 0x00010001
if instr_type == 0x00010000:
comment = "CWAIT"
#comment += disassemble_wait(value)
_source = struct.unpack_from(">H", data, 0)[0]
src = OpImmediate(2, _source)
instr = comment
size = 4
length = 4
source = src
elif instr_type == 0x00010001:
comment = "CSKIP"
instr = comment
size = 4
length = 4
#mask = ((1 << 0x10) - 1) << 0x10
#_source = instruction & 0xFFFF0000
_source = struct.unpack_from(">H", data, 0)[0]
src = OpImmediate(2, _source)
source = src
#comment += disassemble_wait(value)
elif instr_type == 0x00000000 or instr_type == 0x00000001:
comment = "CMOVE"
_source = struct.unpack_from(">H", data, 0)[0]
src = OpImmediate(2, _source)
instr = comment
size = 4
length = 4
source = src
else:
print("NOT RECOGNIZED")
return instr, length, size, source, dest, third
|
StarcoderdataPython
|
3495694
|
<gh_stars>1-10
#---------------------------------------------------------------------
# Debug notification hook test
#
# This script start the executable and steps through the first five
# instructions. Each instruction is disassembled after execution.
#
# Original Author: <NAME> <<EMAIL>>
#
# Maintained By: IDAPython Team
#
#---------------------------------------------------------------------
import idc
from idaapi import *
import binascii
import struct
from capstone import *
md=Cs(CS_ARCH_X86,CS_MODE_32)
class MyDbgHook(DBG_Hooks):
""" Own debug hook class that implementd the callback functions """
def dbg_process_start(self, pid, tid, ea, name, base, size):
print("Process started, pid=%d tid=%d name=%s" % (pid, tid, name))
self.asmfile=open('dump.asm','wb') #保存记录指令的文件
self.record_count=0
def dbg_process_exit(self, pid, tid, ea, code):
print("Process exited pid=%d tid=%d ea=0x%x code=%d" % (pid, tid, ea, code))
if not self.asmfile ==None:
self.asmfile.close()
# def dbg_library_unload(self, pid, tid, ea, info):
# print("Library unloaded: pid=%d tid=%d ea=0x%x info=%s" % (pid, tid, ea, info))
# return 0
# def dbg_process_attach(self, pid, tid, ea, name, base, size):
# print("Process attach pid=%d tid=%d ea=0x%x name=%s base=%x size=%x" % (pid, tid, ea, name, base, size))
# def dbg_process_detach(self, pid, tid, ea):
# print("Process detached, pid=%d tid=%d ea=0x%x" % (pid, tid, ea))
# return 0
# def dbg_library_load(self, pid, tid, ea, name, base, size):
# print ("Library loaded: pid=%d tid=%d name=%s base=%x" % (pid, tid, name, base))
def dbg_bpt(self, tid, ea):
print("0x%x %s" % (ea, GetDisasm(ea)))
codelen=get_item_size(ea)
self.record_count=self.record_count+codelen
b=get_bytes(ea,codelen)
self.asmfile.write(b)
self.asmfile.flush()
# return values:
# -1 - to display a breakpoint warning dialog
# if the process is suspended.
# 0 - to never display a breakpoint warning dialog.
# 1 - to always display a breakpoint warning dialog.
return 0
# def dbg_suspend_process(self):
# print ("Process suspended")
# def dbg_exception(self, pid, tid, ea, exc_code, exc_can_cont, exc_ea, exc_info):
# print("Exception: pid=%d tid=%d ea=0x%x exc_code=0x%x can_continue=%d exc_ea=0x%x exc_info=%s" % (
# pid, tid, ea, exc_code & idaapi.BADADDR, exc_can_cont, exc_ea, exc_info))
# # return values:
# # -1 - to display an exception warning dialog
# # if the process is suspended.
# # 0 - to never display an exception warning dialog.
# # 1 - to always display an exception warning dialog.
# return 0
def dbg_trace(self, tid, ea):
print("0x%x %s" % (ea, GetDisasm(ea)))
if idc.print_insn_mnem(ea).startswith('j'): #不记录所有的跳转指令
return 0
if idc.print_insn_mnem(ea) == 'retn':#把retn 替换为lea esp,[esp+4]
code=b'\x8D\x64\x24\x04' #lea esp,[esp+4]
self.asmfile.write(code)
self.asmfile.flush()
self.record_count=self.record_count+len(code)
return 0
if idc.print_insn_mnem(ea) == 'call':#把call 替换为call +5
fix_addr=0
mnemonic=struct.pack('B',idc.get_wide_byte(ea))
op=struct.pack('i',fix_addr)
call_asm=mnemonic+op
self.asmfile.write(call_asm)
self.asmfile.flush()
self.record_count=self.record_count+get_item_size(ea)
return 0
for addr in range(ea,idc.next_head(ea)):
b=struct.pack('B',idc.get_wide_byte(addr))
self.asmfile.write(b)
self.asmfile.flush()
self.record_count=self.record_count+get_item_size(ea)
# eip = get_reg_value("EIP")
# print("0x%x %s" % (eip, GetDisasm(eip)))
# print("Trace tid=%d ea=0x%x" % (tid, ea))
# return values:
# 1 - do not log this trace event;
# 0 - log it
return 0
# def dbg_step_into(self):
# eip = get_reg_value("EIP")
# print("0x%x %s" % (eip, GetDisasm(eip)))
# def dbg_run_to(self, pid, tid=0, ea=0):
# print ("Runto: tid=%d" % tid)
# idaapi.continue_process()
# def dbg_step_over(self):
# eip = get_reg_value("EIP")
# print("0x%x %s" % (eip, GetDisasm(eip)))
# self.steps += 1
# if self.steps >= 5:
# request_exit_process()
# else:
# request_step_over()
# Remove an existing debug hook
try:
if debughook:
print("Removing previous hook ...")
debughook.unhook()
except:
pass
# Install the debug hook
debughook = MyDbgHook()
debughook.hook()
debughook.steps = 0
# Stop at the entry point
ep = get_inf_attr(INF_START_IP)
request_run_to(ep)
# Step one instruction
request_step_over()
# Start debugging
run_requests()
|
StarcoderdataPython
|
3286527
|
<filename>genoome/payments/urls.py
from django.conf.urls import url, include
from django.contrib.auth.decorators import login_required
import payments.signals # noqa
from . import views
urlpatterns = [
url(r'^paypal-callback/', include('paypal.standard.ipn.urls'),
name='paypal_callback'),
url(r'^redeem_coupon/$', login_required(views.CouponPaymentView.as_view()), name='redeem_coupon'),
]
|
StarcoderdataPython
|
1954229
|
<filename>epytope/test/external/TestExternalEpitopePrediction.py
"""
Unittest for external epitope prediction methods
"""
import unittest
import os
from epytope.Core import Allele, CombinedAllele
from epytope.Core import Peptide
from epytope.Core import Transcript
from epytope.EpitopePrediction import EpitopePredictorFactory
from epytope.EpitopePrediction import AExternalEpitopePrediction
from epytope.EpitopePrediction import NetMHC_3_4
#only for internal testing
class NetMHC_0_1(NetMHC_3_4):
__version = "0.1"
@property
def version(self):
return self.__version
class TestExternalEpitopePredictionClass(unittest.TestCase):
def setUp(self):
self.peptides_mhcI = [Peptide("SYFPEITHI"), Peptide("IHTIEPFYS")]
self.peptides_mhcII = [Peptide("AAAAAASYFPEITHI"), Peptide("IHTIEPFYSAAAAAA")]
self.mhcI = [Allele("HLA-B*07:02"), Allele("HLA-A*02:01")]
self.mhcII = [Allele("HLA-DRB1*07:01"), Allele("HLA-DRB1*15:01")]
self.mhcII_combined_alleles = [CombinedAllele("DPA1*01:03-DPB1*01:01"), CombinedAllele("DQA1*06:02-DQB1*06:31")]
self.transcript = Transcript("")
def test_multiple_inputs(self):
for m in EpitopePredictorFactory.available_methods():
for v in EpitopePredictorFactory.available_methods()[m]:
mo = EpitopePredictorFactory(m, version=v)
if isinstance(mo, AExternalEpitopePrediction) and not (mo.version=="0.1" and mo.name=="netmhc"):
print("Testing", mo.name, "version", mo.version)
try:
if any(a.name in mo.supportedAlleles for a in self.mhcII):
mo.predict(self.peptides_mhcII, alleles=self.mhcII)
if any(a.name in mo.supportedAlleles for a in self.mhcII_combined_alleles):
mo.predict(self.peptides_mhcII, alleles=self.mhcII_combined_alleles)
if any(a.name in mo.supportedAlleles for a in self.mhcI):
mo.predict(self.peptides_mhcI, alleles=self.mhcI)
print("Success")
except RuntimeError as e: #catch only those stemming from binary unavailability
if "could not be found in PATH" not in e.message:
raise e #all others do not except
else:
print(mo.name, "not available")
def test_single_epitope_input(self):
for m in EpitopePredictorFactory.available_methods():
for v in EpitopePredictorFactory.available_methods()[m]:
mo = EpitopePredictorFactory(m, version=v)
if isinstance(mo, AExternalEpitopePrediction) and not (mo.version=="0.1" and mo.name=="netmhc"):
print("Testing", mo.name, "version", mo.version)
try:
if any(a.name in mo.supportedAlleles for a in self.mhcII):
mo.predict(self.peptides_mhcII[0], alleles=self.mhcII)
if any(a.name in mo.supportedAlleles for a in self.mhcII_combined_alleles):
mo.predict(self.peptides_mhcII[0], alleles=self.mhcII_combined_alleles)
if any(a.name in mo.supportedAlleles for a in self.mhcI):
mo.predict(self.peptides_mhcI[0], alleles=self.mhcI)
print("Success")
except RuntimeError as e: #catch only those stemming from binary unavailability
if "could not be found in PATH" not in e.message:
raise e #all others do not except
else:
print(mo.name, "not available")
def test_single_allele_input(self):
for m in EpitopePredictorFactory.available_methods():
for v in EpitopePredictorFactory.available_methods()[m]:
mo = EpitopePredictorFactory(m, version=v)
if isinstance(mo, AExternalEpitopePrediction) and not (mo.version=="0.1" and mo.name=="netmhc"):
print("Testing", mo.name, "version", mo.version)
try:
if any(a.name in mo.supportedAlleles for a in self.mhcII):
mo.predict(self.peptides_mhcII, alleles=self.mhcII[0])
if any(a.name in mo.supportedAlleles for a in self.mhcII_combined_alleles):
mo.predict(self.peptides_mhcII, alleles=self.mhcII_combined_alleles[0])
if any(a.name in mo.supportedAlleles for a in self.mhcI):
mo.predict(self.peptides_mhcI, alleles=self.mhcI[0])
print("Success")
except RuntimeError as e: #catch only those stemming from binary unavailability
if "could not be found in PATH" not in e.message:
raise e #all others do not except
else:
print(mo.name, "not available")
def test_wrong_epitope_input(self):
with self.assertRaises(ValueError):
EpitopePredictorFactory("NetMHC").predict(self.transcript, alleles=self.mhcI)
def test_wrong_allele_input(self):
with self.assertRaises(ValueError):
EpitopePredictorFactory("NetMHC").predict(self.mhcI, alleles=self.transcript)
def test_wrong_internal_to_external_version(self):
with self.assertRaises(RuntimeError):
EpitopePredictorFactory("NetMHC", version="0.1").predict(self.peptides_mhcI, alleles=self.mhcI)
#--sort flag not supported by newer versions
# def test_path_option_and_optional_parameters_netmhc(self):
# netmhc = EpitopePredictorFactory("NetMHC")
# exe = netmhc.command.split()[0]
# for try_path in os.environ["PATH"].split(os.pathsep):
# try_path = try_path.strip('"')
# exe_try = os.path.join(try_path, exe).strip()
# if os.path.isfile(exe_try) and os.access(exe_try, os.X_OK):
# r = netmhc.predict(self.peptides_mhcI, alleles=self.mhcI, command=exe_try, options="--sort", chunksize=1)
# self.assertTrue(len(r) == len(self.peptides_mhcI))
# self.assertAlmostEqual(r["A*02:01"]["SYFPEITHI"]["netmhc"], 0.150579105869, places=7, msg=None, delta=None)
# self.assertAlmostEqual(r["A*02:01"]["IHTIEPFYS"]["netmhc"], 0.0619540879359, places=7, msg=None, delta=None)
def test_path_and_optional_parameters_netctl(self):
netctlpan = EpitopePredictorFactory("NetCTLpan")
exe = netctlpan.command.split()[0]
for try_path in os.environ["PATH"].split(os.pathsep):
try_path = try_path.strip('"')
exe_try = os.path.join(try_path, exe).strip()
if os.path.isfile(exe_try) and os.access(exe_try, os.X_OK):
print(netctlpan.predict(self.peptides_mhcI, alleles=self.mhcI,
commad=exe_try,
options="-wt 0.05 -wc 0.225 -ethr 0.5"))
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
11216075
|
<reponame>felix-walter/pydtnsim<gh_stars>1-10
import math
import pytest
from pydtnsim.routing import cgr_anchor
from pydtnsim.routing import cgr_basic
from pydtnsim.routing import scgr
from pydtnsim.backend import QSim
from pydtnsim.routing.cgr_basic import Route, Neighbor
from pydtnsim import Contact, ContactPlan, ContactGraph, Packet
from pydtnsim import ContactIdentifier
from pydtnsim.monitors import MonitorNotifier
testdata = [(cgr_basic), (cgr_anchor)]
testdata_routing = [(cgr_basic), (cgr_anchor), (scgr)]
class DummySimulator():
def __init__(self):
self.env = QSim()
self.notifier = MonitorNotifier(self.env)
@pytest.mark.parametrize("mod", testdata)
def test_load_route_list(mod):
"""Test function that tests the route-finding capabilities of the
load_route_list function and tests the correctness.
"""
# First, create an contact plan that is then converted to the contact graph
# representation and later processed by load_route_list
# The following topology is tested in this test case:
# +---+
# | 4 |
# [20;30]+--+---+--+[70:80]
# | |
# +-+-+ +-+-+
# | 2 | | 3 |
# +-+-+ +-+-+
# | |
# [10;20]+--+---+--+[40:50]
# | 1 |
# +---+
contact_plan = ContactPlan(1000, 0)
contact_plan.add_contact('node1', 'node2', 10, 20)
contact_plan.add_contact('node1', 'node3', 40, 50)
contact_plan.add_contact('node2', 'node4', 20, 30)
contact_plan.add_contact('node3', 'node4', 70, 80)
# Generate contact graph representation
contact_graph = ContactGraph(contact_plan)
# Now generate a route list for possible routes from node1 to node4
route_list_node14 = mod.load_route_list(contact_graph, 'node1', 'node4', 0)
# Make sure that two routes were found
assert len(route_list_node14) == 2
# Assert characteristics of the found routes
route1 = route_list_node14[0]
assert route1.transmission_plan == ([('node1', 'node2', 10, 20, 1000, 0),
('node2', 'node4', 20, 30, 1000, 0)])
assert route1.edt == 20
assert route1.capacity == 10000
assert route1.to_time == 20
route2 = route_list_node14[1]
assert route2.transmission_plan == ([('node1', 'node3', 40, 50, 1000, 0),
('node3', 'node4', 70, 80, 1000, 0)])
assert route2.edt == 70
assert route2.capacity == 10000
assert route2.to_time == 50
@pytest.mark.parametrize("mod", testdata)
def test_load_route_list_unavailable_route(mod):
"""Test function that tests that no route is found.
"""
# First, create an contact plan that is then converted to the contact graph
# representation and later processed by load_route_list
# The following topology is tested in this test case:
# +---+ +---+ +---+ +---+
# | 1 +---------+ 2 +---------+ 3 +---------+ 4 |
# +---+ 35:40 +---+ 20:40 +---+ 20:25 +---+
contact_plan = ContactPlan(1000, 0)
contact_plan.add_contact('node1', 'node2', 35, 40)
contact_plan.add_contact('node2', 'node3', 20, 40)
contact_plan.add_contact('node3', 'node4', 20, 25)
# Generate contact graph representation
contact_graph = ContactGraph(contact_plan)
# Now generate a route list for possible routes from node1 to node4
route_list_node14 = mod.load_route_list(contact_graph, 'node1', 'node4', 0)
# Make sure that two routes were found
assert len(route_list_node14) == 0
@pytest.mark.parametrize("mod", testdata)
def test_load_route_list_no_route(mod):
"""Test function that tests the route-finding capabilities of the
load_route_list function and tests that no route is found if contacts on
route do not add up.
"""
# First, create an contact plan that is then converted to the contact graph
# representation and later processed by load_route_list
# The following topology is tested in this test case:
# +---+
# | 5 |
# [20;30]+--+---+--+[70:80]
# | |
# +-+-+ +-+-+
# | 3 | | 4 |
# +-+-+ +-+-+
# | |
# [10;20]+--+---+--+[40:50]
# | 2 |
# +-+-+
# |
# |[50:60]
# |
# +-+-+
# | 1 |
# +---+
contact_plan = ContactPlan(1000, 0)
# Add contacts
contact_plan.add_contact('node1', 'node2', 50, 60)
contact_plan.add_contact('node2', 'node3', 10, 20)
contact_plan.add_contact('node2', 'node4', 40, 50)
contact_plan.add_contact('node3', 'node5', 20, 30)
contact_plan.add_contact('node4', 'node5', 70, 80)
# Generate contact graph representation
contact_graph = ContactGraph(contact_plan)
# Now generate a route list for possible routes from node1 to node4
route_list_node15 = mod.load_route_list(contact_graph, 'node1', 'node5', 0)
# Make sure that two routes were found
assert len(route_list_node15) == 0
@pytest.mark.parametrize("mod", testdata)
def test_load_route_list_anchoring_first_contact(mod):
"""Test function that tests the route-finding capabilities, in particular
the correct behaviour when the anchoring mechanism is involved and the
limiting contact is the first contact of the route.
"""
# First, create an contact plan that is then converted to the contact graph
# representation and later processed by load_route_list
# The following topology is tested in this test case:
# +---+
# | 5 |
# [0:100]+--+---+--+[0:100]
# | |
# +-+-+ +-+-+
# | 3 | | 4 |
# +-+-+ +-+-+
# | |
# [0:100]+--+---+--+[0:100]
# | 2 |
# +-+-+
# |
# |[30:70]
# |
# +-+-+
# | 1 |
# +---+
contact_plan = ContactPlan(1000, 0)
# Add contacts
contact_plan.add_contact('node1', 'node2', 30, 70)
contact_plan.add_contact('node2', 'node3', 0, 100)
contact_plan.add_contact('node2', 'node4', 0, 100)
contact_plan.add_contact('node3', 'node5', 0, 100)
contact_plan.add_contact('node4', 'node5', 0, 100)
# Generate contact graph representation
contact_graph = ContactGraph(contact_plan)
# Now generate a route list for possible routes from node1 to node4
route_list_node15 = mod.load_route_list(contact_graph, 'node1', 'node5', 0)
# Make sure that only route is found (as both possible routes run through
# the identical first limiting contact and thus only one route suffices)
assert len(route_list_node15) == 1
# Check that the route is correct
route = route_list_node15[0]
assert route[0] == [('node1', 'node2', 30, 70, 1000, 0),
('node2', 'node3', 0, 100, 1000, 0),
('node3', 'node5', 0, 100, 1000, 0)] \
or route[0] == [('node1', 'node2', 30, 70, 1000, 0),
('node2', 'node4', 0, 100, 1000, 0),
('node4', 'node5', 0, 100, 1000, 0)]
assert route.edt == 30
assert route.capacity == 40000
assert route.to_time == 70
@pytest.mark.parametrize("mod", testdata)
def test_load_route_list_anchoring_intermediate_contact(mod):
"""Test function that tests the route-finding capabilities, in particular
the correct behaviour when the anchoring mechanism is involved and the
limiting contact is the first contact of the route.
"""
# First, create an contact plan that is then converted to the contact graph
# representation and later processed by load_route_list
# The following topology is tested in this test case:
# +---+
# | 8 |
# [0:100]+--+---+--+[30:90]
# | |
# +-+-+ +-+-+
# | 6 | | 7 |
# +-+-+ +-+-+
# | |
# [0:100]+--+---+--+[30:90]
# | 5 |
# +-+-+
# |
# |[30:70]
# |
# +-+-+
# | 4 |
# [30:90]+--+---+--+[0:100]
# | |
# +-+-+ +-+-+
# | 2 | | 3 |
# +-+-+ +-+-+
# | |
# [30:90]+--+---+--+[0:100]
# | 1 |
# +---+
contact_plan = ContactPlan(1000, 0)
# Add contacts
contact_plan.add_contact('node1', 'node2', 30, 90)
contact_plan.add_contact('node1', 'node3', 0, 100)
contact_plan.add_contact('node2', 'node4', 30, 90)
contact_plan.add_contact('node3', 'node4', 0, 100)
contact_plan.add_contact('node4', 'node5', 30, 70)
contact_plan.add_contact('node5', 'node6', 0, 100)
contact_plan.add_contact('node5', 'node7', 30, 90)
contact_plan.add_contact('node6', 'node8', 0, 100)
contact_plan.add_contact('node7', 'node8', 30, 90)
# Generate contact graph representation
contact_graph = ContactGraph(contact_plan)
# Now generate a route list for possible routes from node1 to node4
route_list_node18 = mod.load_route_list(contact_graph, 'node1', 'node8', 0)
# Make sure that only route is found (as both possible routes run through
# the identical intermediate limiting contact and thus only one route is
# returned)
assert len(route_list_node18) == 1
# Check that the route is correct
route = route_list_node18[0]
assert route.edt == 30
assert route.capacity == 40000
assert route.to_time == 70
def generate_test_graph(remove_edge26=False):
"""Helper function to generate a contact graph for many testcases."""
# The following topology is tested in this test case:
# +---+
# | 8 |
# [0:100]+--+---+--+[30:90]
# | |
# +-+-+ +-+-+
# | 6 | | 7 |
# +-+-+ +-+-+
# | |
# | |
# [10:40]| |[40:80]
# | |
# | |
# +-+-+ +-+-+
# | 2 | | 3 |
# +-+-+ +-+-+
# | |
# [30:90]+--+---+--+[0:100]
# | 1 |
# +---+
contact_plan = ContactPlan(1000, 0)
# Create list of all nodes
contact_plan.add_contact('node1', 'node2', 30, 90)
contact_plan.add_contact('node1', 'node3', 0, 100)
contact_plan.add_contact('node3', 'node7', 40, 80)
contact_plan.add_contact('node6', 'node8', 0, 100)
contact_plan.add_contact('node7', 'node8', 30, 90)
# Only add edge between node2 and node6 if required
if not remove_edge26:
contact_plan.add_contact('node2', 'node6', 10, 40)
# Generate contact graph representation
contact_graph = ContactGraph(contact_plan)
return contact_graph
@pytest.mark.parametrize("mod", testdata)
def test_proximate_nodes_base(mod):
"""Test function that tests the identify_proximate_node_list."""
# First, create an contact plan that is then converted to the contact graph
# representation and later processed by identify_proximate_node_list
# Create contact graph of test topology
contact_graph = generate_test_graph()
# Route bundle from node1 to node 8 with size 1 and no deadline
bundle = Packet('node1', 'node8', 1, math.inf)
# Create empty route_list dictionary so route list for destination will be
# regenerated
route_list = dict()
# Create contact list object
contact_list = dict()
# Create Simulation Environment (dummy, will not be run)
env = QSim()
contact_list[('node1', 'node2', 30, 90, 1000, 0)] = Contact(
30, 90, 1000, 'node1', 'node2')
contact_list[('node1', 'node3', 0, 100, 1000, 0)] = Contact(
0, 100, 1000, 'node1', 'node3')
contact_list[('node1', 'node1', 0, math.inf, 1000, 0)] = Contact(
0, math.inf, 1000, 'node1', 'node1')
# Add dummy simulator to the contacts
dummy = DummySimulator()
contact_list[('node1', 'node2', 30, 90, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node3', 0, 100, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node1', 0, math.inf, 1000, 0)] \
.register_simulator(dummy)
# Now generate a proximate node list
proximate_nodes = mod.identify_proximate_node_list(
'node1', bundle, contact_graph, route_list, [], 0, contact_list)
# Make sure that two routes are found
assert len(proximate_nodes) == 2
# Check individual EDT and hops
node1 = proximate_nodes[0]
assert node1.route.edt == 30
assert node1.route.hops == 3
node2 = proximate_nodes[1]
assert node2.route.edt == 40
assert node2.route.hops == 3
@pytest.mark.parametrize("mod", testdata)
def test_proximate_nodes_past_route(mod):
"""Test function that verifys that identify_proximate_node_list() ignores
routes thats' feasibility ended in the past.
"""
# Create contact graph of test topology
contact_graph = generate_test_graph()
# Route bundle from node1 to node 8 with size 1 and no deadline
bundle = Packet('node1', 'node8', 1, math.inf)
# Create empty route_list dictionary so route list for destination will be
# regenerated
route_list = dict()
# Create contact list object
contact_list = dict()
# Create Simulation Environment (dummy, will not be run)
env = QSim()
contact_list[('node1', 'node2', 30, 90, 1000, 0)] = Contact(
30, 90, 1000, 'node1', 'node2')
contact_list[('node1', 'node3', 0, 100, 1000, 0)] = Contact(
0, 100, 1000, 'node1', 'node3')
contact_list[('node1', 'node1', 0, math.inf, math.inf, 0)] = Contact(
0, math.inf, 1000, 'node1', 'node1')
# Add dummy simulator to the contacts
dummy = DummySimulator()
contact_list[('node1', 'node2', 30, 90, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node3', 0, 100, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node1', 0, math.inf, math.inf, 0)] \
.register_simulator(dummy)
# Now generate a proximate node list with current time set to 50
proximate_nodes = mod.identify_proximate_node_list(
'node1', bundle, contact_graph, route_list, [], 50, contact_list)
# Make sure that only one route is found, (1->2->6->8) has already expired
# at t=50
assert len(proximate_nodes) == 1
# Assert that the correct proximate node (and route) is returned
assert proximate_nodes[0].route.transmission_plan == ([('node1', 'node3',
0, 100, 1000, 0),
('node3', 'node7',
40, 80, 1000, 0),
('node7', 'node8',
30, 90, 1000, 0)])
assert proximate_nodes[0].contact == (('node1', 'node3', 0, 100, 1000, 0))
@pytest.mark.parametrize("mod", testdata)
def test_proximate_nodes_edt_after_deadline(mod):
# Create contact graph of test topology
contact_graph = generate_test_graph()
# Route bundle from node1 to node 8 with size 1 and deadline set to 35
bundle = Packet('node1', 'node8', 1, 35)
# Route bundle from node1 to node 8 with size 1 and deadline set to 30
bundle2 = Packet('node1', 'node8', 1, 30)
# Create empty route_list dictionary so route list for destination will be
# regenerated
route_list = dict()
# Create contact list object
contact_list = dict()
# Create Simulation Environment (dummy, will not be run)
env = QSim()
contact_list[('node1', 'node2', 30, 90, 1000, 0)] = Contact(
30, 90, 1000, 'node1', 'node2')
contact_list[('node1', 'node3', 0, 100, 1000, 0)] = Contact(
0, 100, 1000, 'node1', 'node3')
contact_list[('node1', 'node1', 0, math.inf, math.inf, 0)] = Contact(
0, math.inf, 1000, 'node1', 'node1')
# Add dummy simulator to the contacts
dummy = DummySimulator()
contact_list[('node1', 'node2', 30, 90, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node3', 0, 100, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node1', 0, math.inf, math.inf, 0)] \
.register_simulator(dummy)
# Now generate a proximate node list with current time set to 0
proximate_nodes = mod.identify_proximate_node_list(
'node1', bundle, contact_graph, route_list, [], 0, contact_list)
# Make sure that only one route is found, (1->3->7->8) will not reach the
# target within the deadline
assert len(proximate_nodes) == 1
# Assert that the correct proximate node (and route) is returned
assert proximate_nodes[0].route.transmission_plan == ([('node1', 'node2',
30, 90, 1000, 0),
('node2', 'node6',
10, 40, 1000, 0),
('node6', 'node8',
0, 100, 1000, 0)])
assert proximate_nodes[0].contact == (('node1', 'node2', 30, 90, 1000, 0))
# Now generate a proximate node list with current time set to 0
proximate_nodes2 = mod.identify_proximate_node_list(
'node1', bundle2, contact_graph, route_list, [], 0, contact_list)
# Make sure that only one route is found, (1->3->7->8) will not reach the
# target within the deadline
assert not proximate_nodes2
@pytest.mark.parametrize("mod", testdata)
def test_proximate_nodes_route_capacity(mod):
# Create contact graph of test topology
contact_graph = generate_test_graph()
# Create bundle from node1 to node 8 with size 40 and deadline set to
# infinity
bundle = Packet('node1', 'node8', 40000, math.inf)
# Create bundle from node1 to node 8 with size 41 and deadline set to
# infinity
bundle2 = Packet('node1', 'node8', 41000, math.inf)
# Create empty route_list dictionary so route list for destination will be
# regenerated
route_list = dict()
# Create contact list object
contact_list = dict()
# Create Simulation Environment (dummy, will not be run)
env = QSim()
contact_list[('node1', 'node2', 30, 90, 1000, 0)] = Contact(
30, 90, 1000, 'node1', 'node2')
contact_list[('node1', 'node3', 0, 100, 1000, 0)] = Contact(
0, 100, 1000, 'node1', 'node3')
contact_list[('node1', 'node1', 0, math.inf, math.inf, 0)] = Contact(
0, math.inf, 1000, 'node1', 'node1')
# Add dummy simulator to the contacts
dummy = DummySimulator()
contact_list[('node1', 'node2', 30, 90, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node3', 0, 100, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node1', 0, math.inf, math.inf, 0)] \
.register_simulator(dummy)
# Now generate a proximate node list for 'bundle'
proximate_nodes = mod.identify_proximate_node_list(
'node1', bundle, contact_graph, route_list, [], 0, contact_list)
# Make sure that routes are found as the bundle is not exceeding the
# capacities of the routes' contacts
assert len(proximate_nodes) == 1
# Now generate a proximate node list for 'bundle2'
proximate_nodes = mod.identify_proximate_node_list(
'node1', bundle2, contact_graph, route_list, [], 0, contact_list)
# Make sure that routes are not found as the bundle's size is larger than
# the capacities of all available routes
assert len(proximate_nodes) == 0
# Enqueue that packet to trigger the capacity recalculation
contact_list[('node1', 'node2', 30, 90, 1000, 0)].cap_rem = 10000
# Enqueue that packet to trigger the capacity recalculation
contact_list[('node1', 'node3', 0, 100, 1000, 0)].cap_rem = 40000
# Now generate a proximate node list for 'bundle'
proximate_nodes = cgr_basic.identify_proximate_node_list(
'node1', bundle, contact_graph, route_list, [], 0, contact_list)
# Make sure that one route is found as the bundle is still fitting into
# the queue of the 1->3 contact
assert len(proximate_nodes) == 1
assert proximate_nodes[0].route.transmission_plan == ([('node1', 'node3',
0, 100, 1000, 0),
('node3', 'node7',
40, 80, 1000, 0),
('node7', 'node8',
30, 90, 1000, 0)])
assert proximate_nodes[0].contact == ('node1', 'node3', 0, 100, 1000, 0)
# Now generate a proximate node list for 'bundle2'
proximate_nodes = cgr_basic.identify_proximate_node_list(
'node1', bundle2, contact_graph, route_list, [], 0, contact_list)
# Make sure that routes are not found as the bundle's size is larger than
# the remaining capacities of all feasible contacts to neighbors
assert not proximate_nodes
@pytest.mark.parametrize("mod", testdata)
def test_proximate_nodes_excluded_nodes(mod):
# Create contact graph of test topology
contact_graph = generate_test_graph()
# Route bundle from node1 to node 8 with size 1 and deadline set to
# infinity
bundle = Packet('node1', 'node8', 1, math.inf)
# Create empty route_list dictionary so route list for destination will be
# regenerated
route_list = dict()
# Create contact list object
contact_list = dict()
# Create Simulation Environment (dummy, will not be run)
env = QSim()
contact_list[('node1', 'node2', 30, 90, 1000, 0)] = Contact(
30, 90, 1000, 'node1', 'node2')
contact_list[('node1', 'node3', 0, 100, 1000, 0)] = Contact(
0, 100, 1000, 'node1', 'node3')
contact_list[('node1', 'node1', 0, math.inf, math.inf, 0)] = Contact(
0, math.inf, 1000, 'node1', 'node1')
# Add dummy simulator to the contacts
dummy = DummySimulator()
contact_list[('node1', 'node2', 30, 90, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node3', 0, 100, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node1', 0, math.inf, math.inf, 0)] \
.register_simulator(dummy)
# Now generate a proximate node list with ('node1', 'node2', 30, 90) being
# in the excluded node list
proximate_nodes = mod.identify_proximate_node_list(
'node1', bundle, contact_graph, route_list, ['node2'], 0, contact_list)
# Make sure that only one route is found, (1->3->7->8) will not reach the
# target within the deadline
assert len(proximate_nodes) == 1
assert proximate_nodes[0].route.transmission_plan == ([('node1', 'node3',
0, 100, 1000, 0),
('node3', 'node7',
40, 80, 1000, 0),
('node7', 'node8',
30, 90, 1000, 0)])
assert proximate_nodes[0].contact == ('node1', 'node3', 0, 100, 1000, 0)
def create_route(old_route):
plan = list()
for contact in old_route[0]:
plan.append(
ContactIdentifier(
from_node=contact[0],
to_node=contact[1],
from_time=contact[2],
to_time=contact[3],
datarate=contact[4],
delay=contact[5]))
new_route = Route(
transmission_plan=plan,
edt=old_route[1][0],
capacity=old_route[1][1],
to_time=old_route[1][2],
hops=len(old_route[0]),
next_hop=plan[0])
return new_route
@pytest.mark.parametrize("mod", testdata)
def test_proximate_nodes_optimize_proximate_node(mod):
# Create contact graph of test topology
contact_graph = generate_test_graph()
# Route bundle from node1 to node 8 with size 1 and deadline set to
# infinity
bundle = Packet('node1', 'node8', 1, math.inf)
# Create empty route_list dictionary so route list for destination will be
# regenerated
route_list = dict()
# Create some fake routes that can later be used for optimizing the
# proximate node values
route_list['node8'] = []
# First, add route with worse EDT (50)
route_list['node8'].append(
create_route(([('node1', 'node3', 0, 100, 10, 0),
('node3', 'node4', 0, 100, 10, 0),
('node4', 'node5', 30, 70, 10, 0)], (50, 40, 70))))
# Then add route with better EDT (30)
route_list['node8'].append(
create_route(([('node1', 'node3', 0, 100, 10, 0),
('node3', 'node4', 0, 100, 10, 0),
('node4', 'node5', 30, 70, 10, 0)], (30, 40, 70))))
# First, add route with 5 hops
route_list['node8'].append(
create_route(([('node1', 'node2', 30, 90, 10, 0),
('node3', 'node4', 0, 100, 10, 0),
('node3', 'node4', 0, 100, 10, 0),
('node3', 'node4', 0, 100, 10, 0),
('node4', 'node5', 30, 70, 10, 0)], (30, 40, 70))))
# Then add route with only 4 hops
route_list['node8'].append(
create_route(([('node1', 'node2', 30, 90, 10, 0),
('node3', 'node4', 0, 100, 10, 0),
('node3', 'node4', 0, 100, 10, 0),
('node4', 'node5', 30, 70, 10, 0)], (30, 40, 70))))
# Create contact list object
contact_list = dict()
# Create Simulation Environment (dummy, will not be run)
env = QSim()
contact_list[('node1', 'node2', 30, 90, 10, 0)] = Contact(
30, 90, 10, 'node1', 'node2')
contact_list[('node1', 'node3', 0, 100, 10, 0)] = Contact(
0, 100, 10, 'node1', 'node3')
contact_list[('node1', 'node1', 0, math.inf, 10, 0)] = Contact(
0, math.inf, 10, 'node1', 'node1')
# Add dummy simulator to the contacts
dummy = DummySimulator()
contact_list[('node1', 'node2', 30, 90, 10, 0)].register_simulator(dummy)
contact_list[('node1', 'node3', 0, 100, 10, 0)].register_simulator(dummy)
contact_list[('node1', 'node1', 0, math.inf, 10,
0)].register_simulator(dummy)
# Now generate a proximate node list with ('node1', 'node2', 30, 90) being
# in the excluded node list
proximate_nodes = mod.identify_proximate_node_list(
'node1', bundle, contact_graph, route_list, [], 0, contact_list)
# Make sure that two feasible proximate nodes are found
assert len(proximate_nodes) == 2
# Assert that the proximate nodes are returned with the correct, optimized
# characteristics
assert proximate_nodes[0].contact == ('node1', 'node3', 0, 100, 10, 0)
assert proximate_nodes[0].route.edt == 30
assert proximate_nodes[0].route.hops == 3
assert proximate_nodes[0].route.transmission_plan == ([('node1', 'node3',
0, 100, 10, 0),
('node3', 'node4',
0, 100, 10, 0),
('node4', 'node5',
30, 70, 10, 0)])
assert proximate_nodes[1].contact == ('node1', 'node2', 30, 90, 10, 0)
assert proximate_nodes[1].route.edt == 30
assert proximate_nodes[1].route.hops == 4
assert proximate_nodes[1].route.transmission_plan == ([
('node1', 'node2', 30, 90, 10, 0), ('node3', 'node4', 0, 100, 10, 0),
('node3', 'node4', 0, 100, 10, 0), ('node4', 'node5', 30, 70, 10, 0)
])
# Test function that tests the cgr function
@pytest.mark.parametrize("mod", testdata_routing)
def test_cgr_base(mod):
# First, create an contact plan that is then converted to the contact
# graph representation and later processed by cgr()
# Create contact graph of test topology
contact_graph = generate_test_graph()
# Route bundle from node1 to node 8 with size 1 and no deadline
bundle = Packet('node1', 'node8', 1, math.inf)
# Create empty route_list dictionary so route list for destination will be
# regenerated
route_list = dict()
# Create contact list object
contact_list = dict()
# Create limbo list
limbo = list()
# Create Simulation Environment (dummy, will not be run)
env = QSim()
contact_list[('node1', 'node2', 30, 90, 1000, 0)] = Contact(
30, 90, 1000, 'node1', 'node2', debug=True)
contact_list[('node1', 'node3', 0, 100, 1000, 0)] = Contact(
0, 100, 1000, 'node1', 'node3', debug=True)
contact_list[('node1', 'node1', 0, math.inf, 1000, 0)] = Contact(
0, math.inf, 1000, 'node1', 'node1', debug=True)
# Add dummy simulator to the contacts
dummy = DummySimulator()
contact_list[('node1', 'node2', 30, 90, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node3', 0, 100, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node1', 0, math.inf, 1000, 0)] \
.register_simulator(dummy)
# Now run cgr for the bundle (with current time set to 0)
mod.cgr(bundle, 'node1', contact_graph, route_list, contact_list, 0, limbo)
# Make sure that the bundle is enqueue for the correct contact
assert len(contact_list[('node1', 'node2', 30, 90, 1000, 0)] \
.packet_queue) == 1
assert len(contact_list[('node1', 'node3', 0, 100, 1000, 0)] \
.packet_queue) == 0
assert len(contact_list[('node1', 'node1', 0, math.inf, 1000, 0)] \
.packet_queue) == 0
assert contact_list[('node1', 'node2', 30, 90, 1000, 0)] \
.packet_queue[0] == bundle
# Test function that tests the cgr function
@pytest.mark.parametrize("mod", testdata_routing)
def test_cgr_base_no_route(mod):
# First, create an contact plan that is then converted to the contact
# graph representation and later processed by cgr()
# The following topology is tested in this test case:
# +---+ +---+ +---+ +---+
# | 1 +---------+ 2 +---------+ 3 +---------+ 4 |
# +---+ 35:40 +---+ 20:40 +---+ 20:25 +---+
contact_plan = ContactPlan(1000, 0)
contact_plan.add_contact('node1', 'node2', 35, 40)
contact_plan.add_contact('node2', 'node3', 20, 40)
contact_plan.add_contact('node3', 'node4', 20, 25)
# Generate contact graph representation
contact_graph = ContactGraph(contact_plan)
# Route bundle from node1 to node 8 with size 1 and no deadline
bundle = Packet('node1', 'node4', 1, math.inf)
# Create empty route_list dictionary so route list for destination will be
# regenerated
route_list = dict()
# Create contact list object
contact_list = dict()
# Create limbo list
limbo = list()
# Create Simulation Environment (dummy, will not be run)
env = QSim()
contact_list[('node1', 'node2', 35, 40, 1000, 0)] = Contact(
35, 40, 1000, 'node1', 'node2', debug=True)
contact_list[('node2', 'node3', 20, 40, 1000, 0)] = Contact(
20, 40, 1000, 'node2', 'node3', debug=True)
contact_list[('node3', 'node4', 20, 25, 1000, 0)] = Contact(
20, 25, 1000, 'node3', 'node4', debug=True)
contact_list[('node1', 'node1', 0, math.inf, 1000, 0)] = Contact(
0, math.inf, 1000, 'node1', 'node1', debug=True)
# Add dummy simulator to the contacts
dummy = DummySimulator()
contact_list[('node1', 'node2', 35, 40, 1000, 0)].register_simulator(dummy)
contact_list[('node2', 'node3', 20, 40, 1000, 0)].register_simulator(dummy)
contact_list[('node3', 'node4', 20, 25, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node1', 0, math.inf, 1000, 0)] \
.register_simulator(dummy)
# Now run cgr for the bundle (with current time set to 0)
mod.cgr(bundle, 'node1', contact_graph, route_list, contact_list, 0, limbo)
# Make sure that the bundle is enqueue for the correct contact
assert len(limbo) == 1
assert len(contact_list[('node1', 'node2', 35, 40, 1000, 0)] \
.packet_queue) == 0
assert len(contact_list[('node2', 'node3', 20, 40, 1000, 0)] \
.packet_queue) == 0
assert len(contact_list[('node3', 'node4', 20, 25, 1000, 0)] \
.packet_queue) == 0
assert len(contact_list[('node1', 'node1', 0, math.inf, 1000, 0)] \
.packet_queue) == 0
def generate_neighbors(old_neigbors):
new_neighbors = list()
for neighbor in old_neigbors:
tp = list()
for hops in neighbor[3]:
tp.append(
ContactIdentifier(
from_node=hops[0],
to_node=hops[1],
from_time=hops[2],
to_time=hops[3],
datarate=hops[4],
delay=hops[5]))
route = Route(
transmission_plan=tp,
edt=neighbor[1],
capacity=1000,
to_time=10000,
hops=neighbor[2],
next_hop=ContactIdentifier(*neighbor[0]))
new_neighbor = Neighbor(
contact=route.next_hop,
node_id=route.next_hop.to_node,
route=route)
new_neighbors.append(new_neighbor)
return new_neighbors
@pytest.mark.parametrize("mod", testdata)
def test_cgr_optimization(mod):
# Route bundle from node1 to node 8 with size 1 and no deadline
bundle = Packet('node1', 'node8', 1, math.inf)
# Create empty route_list dictionary so route list for destination will be
# regenerated
route_list = dict()
# Create contact list object
contact_list = dict()
# Create limbo list
limbo = list()
# Create Simulation Environment (dummy, will not be run)
env = QSim()
contact_list[('node1', 'node2', 10, 100, 1000, 0)] = Contact(
10, 100, 1000, 'node1', 'node2', debug=True)
contact_list[('node1', 'node3', 10, 100, 1000, 0)] = Contact(
20, 100, 1000, 'node1', 'node3', debug=True)
# Add dummy simulator to the contacts
dummy = DummySimulator()
contact_list[('node1', 'node2', 10, 100, 1000, 0)] \
.register_simulator(dummy)
contact_list[('node1', 'node3', 10, 100, 1000, 0)] \
.register_simulator(dummy)
# Create a fake proximate node list to isolate the cgr() function's
# behaviour and test it
proximate_nodes = [(('node1', 'node2', 10, 100, 1000, 0), 10, 2,
[('node1', 'node2', 10, 100, 1000, 0)]),
(('node1', 'node3', 10, 100, 1000, 0), 20, 2,
[('node1', 'node3', 10, 100, 1000, 0)])]
proximate_nodes = generate_neighbors(proximate_nodes)
# Now run cgr for the bundle (with current time set to 0)
mod.cgr(
bundle,
'node1',
None,
route_list,
contact_list,
0,
limbo,
proximate_nodes=proximate_nodes)
# Reset bundle so it can be routed from the same node again without
# throwing an exception
bundle.current_node = 'inserted'
# Make sure that the bundle is enqueued in the queue with the best edt
assert len(contact_list[('node1', 'node2', 10, 100, 1000, 0)] \
.packet_queue) == 1
assert not contact_list[('node1', 'node3', 10, 100, 1000, 0)] \
.packet_queue
assert contact_list[('node1', 'node2', 10, 100, 1000, 0)] \
.packet_queue[0] == bundle
# Alter proximate node list so that edt is equal and hops is the relevant
# value
proximate_nodes = [(('node1', 'node2', 10, 100, 1000, 0), 20, 3,
[('node1', 'node2', 10, 100, 1000, 0)]),
(('node1', 'node3', 10, 100, 1000, 0), 20, 2,
[('node1', 'node3', 10, 100, 1000, 0)])]
proximate_nodes = generate_neighbors(proximate_nodes)
# Now run cgr for the bundle (with current time set to 0)
mod.cgr(
bundle,
'node1',
None,
route_list,
contact_list,
0,
limbo,
proximate_nodes=proximate_nodes)
# Reset bundle so it can be routed from the same node again without
# throwing an exception
bundle.current_node = 'inserted'
# Make sure that the bundle is enqueued in the queue with the best edt
assert len(contact_list[('node1', 'node2', 10, 100, 1000,
0)].packet_queue) == 1
assert len(contact_list[('node1', 'node3', 10, 100, 1000,
0)].packet_queue) == 1
assert contact_list[('node1', 'node3', 10, 100, 1000, 0)] \
.packet_queue[0] == bundle
# Alter proximate node list so that edt and hops are equal and hash is the
# deciding value
proximate_nodes = [(('node1', 'node2', 10, 100, 1000, 0), 20, 4,
[('node1', 'node2', 10, 100, 1000, 0)]),
(('node1', 'node3', 10, 100, 1000, 0), 20, 4,
[('node1', 'node3', 10, 100, 1000, 0)])]
proximate_nodes = generate_neighbors(proximate_nodes)
# Now run cgr for the bundle (with current time set to 0)
mod.cgr(
bundle,
'node1',
None,
route_list,
contact_list,
0,
limbo,
proximate_nodes=proximate_nodes)
# Reset bundle so it can be routed from the same node again without
# throwing an exception
bundle.current_node = 'inserted'
if hash('node2') > hash('node3'):
node_a = ('node1', 'node2', 10, 100, 1000, 0)
node_b = ('node1', 'node3', 10, 100, 1000, 0)
else:
node_b = ('node1', 'node2', 10, 100, 1000, 0)
node_a = ('node1', 'node3', 10, 100, 1000, 0)
# Make sure that the bundle is enqueued in the queue with the best edt
if len(contact_list[node_a].packet_queue) == 1:
assert len(contact_list[node_b].packet_queue) == 2
assert contact_list[node_b].packet_queue[1] == bundle
elif len(contact_list[node_a].packet_queue) == 2:
assert len(contact_list[node_b].packet_queue) == 1
assert contact_list[node_a].packet_queue[1] == bundle
# Testcase function that verifies the insertion of bundles that are not
# routable at the moment of routing into the limbo list
@pytest.mark.parametrize("mod", testdata)
def test_cgr_limbo(mod):
# Route bundle from node1 to node 8 with size 1 and no deadline
bundle = Packet('node1', 'node8', 1, math.inf)
# Create empty route_list dictionary so route list for destination will be
# regenerated
route_list = dict()
# Create contact list object
contact_list = dict()
# Create limbo list
limbo = list()
# Create Simulation Environment (dummy, will not be run)
env = QSim()
# Create a fake proximate node list to isolate the cgr() function's
# behaviour and test it, no entries this time to force in insertion into
# limbo
proximate_nodes = []
# Now run cgr for the bundle (with current time set to 0)
mod.cgr(
bundle,
'node1',
None,
route_list,
contact_list,
0,
limbo,
proximate_nodes=proximate_nodes)
# Make sure that the bundle is enqueued in the queue with the best edt
assert len(limbo) == 1
assert limbo[0] == bundle
# Test function that verifies the "flooding" mechanism for critical
# bundles in the cgr() function
@pytest.mark.parametrize("mod", testdata_routing)
def test_cgr_critical_bundle(mod):
# First, create an contact plan that is then converted to the contact graph
# representation and later processed by identify_proximate_node_list
# Create contact graph of test topology
contact_graph = generate_test_graph()
# Route bundle from node1 to node 8 with size 1 and no deadline, but with
# being critical
bundle = Packet('node1', 'node8', 1000, math.inf, False, True)
# Create empty route_list dictionary so route list for destination will be
# regenerated
route_list = dict()
# Create contact list object
contact_list = dict()
# Create limbo list
limbo = list()
# Create Simulation Environment (dummy, will not be run)
env = QSim()
contact_list[('node1', 'node2', 30, 90, 1000, 0)] = Contact(
30, 90, 1000, 'node1', 'node2', debug=True)
contact_list[('node1', 'node3', 0, 100, 1000, 0)] = Contact(
0, 100, 1000, 'node1', 'node3', debug=True)
contact_list[('node1', 'node1', 0, math.inf, 1000, 0)] = Contact(
0, math.inf, 1000, 'node1', 'node1', debug=True)
# Add dummy simulator to the contacts
dummy = DummySimulator()
contact_list[('node1', 'node2', 30, 90, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node3', 0, 100, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node1', 0, math.inf, 1000, 0)] \
.register_simulator(dummy)
mod.cgr(bundle, 'node1', contact_graph, route_list, contact_list, 0, limbo)
# Make sure that the bundle is enqueued in all feasible contacts to
# neighbors
assert len(contact_list[('node1', 'node2', 30, 90, 1000,
0)].packet_queue) == 1
assert len(contact_list[('node1', 'node3', 0, 100, 1000,
0)].packet_queue) == 1
contact_graph2 = generate_test_graph(True)
# Route bundle from node1 to node 8 with size 1 and no deadline, but with
# being critical
bundle2 = Packet('node1', 'node8', 1, math.inf, False, True)
# Reset route list
route_list = dict()
mod.cgr(bundle2, 'node1', contact_graph2, route_list, contact_list, 0,
limbo)
# Make sure that only neighbors are considered that can reach the
# destination (based on the contact graph knowledge)
assert len(contact_list[('node1', 'node2', 30, 90, 1000,
0)].packet_queue) == 1
assert len(contact_list[('node1', 'node3', 0, 100, 1000,
0)].packet_queue) == 2
|
StarcoderdataPython
|
8184910
|
<filename>adventure_game/__init__.py<gh_stars>1-10
from .contracts import *
from .factories import *
from .models import *
from .providers import *
|
StarcoderdataPython
|
3568952
|
#from nltk.corpus import words
from nltk import ngrams
import nltk
import enchant
from nltk.sentiment.vader import SentimentIntensityAnalyzer
d = enchant.Dict("en_US")
class Features:
def __init__(self,essay):
# To Do: Incorporate Google snippets match
# self.google_snippets_match = 0
self.neg_sentiment = 0
self.pos_sentiment = 0
self.neu_sentiment = 0
# self.compound_sentiment = 0
#POS counts
# self.noun_count = 0
self.adj_count = 0
# self.verb_count = 0
self.adv_count = 0
# self.fw_count = 0
#form counts
# self.essay_length = 0
self.long_word = 0
# self.spelling_errors = 0
self.sentence_count = 0
# self.avg_sentence_len = 0
#language model counts
# self.unigrams_count = 0
self.bigrams_count = 0
# self.trigrams_count = 0
self.initialize_features(essay)
def sentiment_tagger(self,sid,sentence):
ss = sid.polarity_scores(sentence)
for k in sorted(ss):
if k == 'compound':
pass
# self.compound_sentiment += ss[k]
elif k == 'neg':
self.neg_sentiment += ss[k]
elif k == 'neu':
self.neu_sentiment += ss[k]
elif k == 'pos':
self.pos_sentiment += ss[k]
def tokenize_sentences(self, essay):
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
sents = sent_detector.tokenize(essay.strip())
self.sentence_count = len(sents)
sid = SentimentIntensityAnalyzer()
for i in sents:
self.sentiment_tagger(sid,i)
# self.lexical_diversity(i.lower())
self.lexical_diversity(essay.lower())
def word_counts(self,essay):
word = nltk.word_tokenize(essay.strip())
self.essay_length = len(word)
# corpus_words = words.words()
# for i in word:
# try:
# if not d.check(i.encode('utf8')):
# self.spelling_errors += 1
# except:
# self.spelling_errors += 1
# if not d.check(i):
# self.spelling_errors += 1
# if len(i) >= 7:
# self.long_word += 1
# if self.sentence_count != 0:
# self.avg_sentence_len = self.essay_length/self.sentence_count
# else:
# self.avg_sentence_len = 0
self.pos_counts(word)
def pos_counts(self,tokens):
tags = nltk.pos_tag(tokens)
for tag in tags:
if tag[1].startswith("NN"):
# self.noun_count += 1
pass
elif tag[1].startswith("JJ"):
self.adj_count += 1
elif tag[1].startswith("RB"):
self.adv_count += 1
elif tag[1].startswith("VB"):
# self.verb_count += 1
pass
elif tag[1].startswith("FW"):
pass
# self.fw_count += 1
def lexical_diversity(self,sentence):
sents = " ".join(nltk.word_tokenize(sentence))
unigrams = [ grams for grams in ngrams(sents.split(), 1)]
bigrams = [ grams for grams in ngrams(sents.split(), 2)]
trigram = [ grams for grams in ngrams(sents.split(), 3)]
# self.unigrams_count = len([(item[0], unigrams.count(item)) for item in sorted(set(unigrams))])
self.bigrams_count = len([(item, bigrams.count(item)) for item in sorted(set(bigrams))])
# self.trigrams_count = len([(item, trigram.count(item)) for item in sorted(set(trigram))])
def initialize_features(self, essay):
self.tokenize_sentences(essay)
self.word_counts(essay)
#self.lexical_diversity(essay)
|
StarcoderdataPython
|
8065653
|
<reponame>edwinfeener/monolithe
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import unicode_literals
from builtins import object
import importlib
from .utils import load_language_plugins
class SDKUtils(object):
""" SDK Utilities
"""
idiomatic_methods_cache = {}
type_methods_cache = {}
@classmethod
def massage_type_name(cls, type_name):
""" Returns a readable type according to a java type
"""
if type_name.lower() in ("enum", "enumeration"):
return "enum"
if type_name.lower() in ("str", "string"):
return "string"
if type_name.lower() in ("boolean", "bool"):
return "boolean"
if type_name.lower() in ("int", "integer"):
return "integer"
if type_name.lower() in ("date", "datetime", "time"):
return "time"
if type_name.lower() in ("double", "float", "long"):
return "float"
if type_name.lower() in ("list", "array"):
return "list"
if type_name.lower() in ("object", "dict"):
return "object"
if "array" in type_name.lower():
return "list"
return "string"
@classmethod
def get_plural(cls, singular_name):
""" Returns the plural name of the singular name
Certain words are invariant.
Args:
singular_name (string): the singular name to pluralize
Returns:
The pluralized version of the singular name
"""
if singular_name[-1] == "y" and singular_name[-2] not in ["a", "e", "i", "o", "u", "y"]:
return singular_name[:-1] + "ies"
if singular_name[-1] != "s":
return singular_name + "s"
return singular_name
@classmethod
def get_string_version(cls, version):
""" Get the sdk version according to the given version
Args:
version (float): the version
Returns:
version as string
Example:
get_string_version(3.1)
>>> v3_1
"""
if version == "master":
return version
return ("v%s" % version).replace(".", "_")
@classmethod
def get_float_version(cls, string_version):
""" Get the sdk version as float according to the given string version
Args:
string_version (stirng): the version
Returns:
version as float
Example:
get_float_version("v3_1")
>>> 3.1
"""
if string_version == "master":
return string_version
return float(string_version.replace("v", "").replace("_", "."))
# Commons language conversion
@classmethod
def get_idiomatic_name_in_language(cls, name, language):
""" Get the name for the given language
Args:
name (str): the name to convert
language (str): the language to use
Returns:
a name in the given language
Example:
get_idiomatic_name_in_language("EnterpriseNetwork", "python")
>>> enterprise_network
"""
if language in cls.idiomatic_methods_cache:
m = cls.idiomatic_methods_cache[language]
if not m:
return name
return m(name)
found, method = load_language_plugins(language, 'get_idiomatic_name')
if found:
cls.idiomatic_methods_cache[language] = method
if method:
return method(name)
else:
return name
module = importlib.import_module('.lang.%s' % language, package="monolithe.generators")
if not hasattr(module, 'get_idiomatic_name'):
cls.idiomatic_methods_cache[language] = None
return name
method = getattr(module, 'get_idiomatic_name')
cls.idiomatic_methods_cache[language] = method
return method(name)
@classmethod
def get_type_name_in_language(cls, type_name, sub_type, language):
""" Get the type for the given language
Args:
type_name (str): the type to convert
language (str): the language to use
Returns:
a type name in the given language
Example:
get_type_name_in_language("Varchar", "python")
>>> str
"""
if language in cls.type_methods_cache:
m = cls.type_methods_cache[language]
if not m:
return type_name
return m(type_name)
found, method = load_language_plugins(language, 'get_type_name')
if found:
cls.type_methods_cache[language] = method
if method:
return method(type_name, sub_type)
else:
return type_name
module = importlib.import_module('.lang.%s' % language, package="monolithe.generators")
if not hasattr(module, 'get_type_name'):
cls.type_methods_cache[language] = None
return type_name
method = getattr(module, 'get_type_name')
cls.type_methods_cache[language] = method
return method(type_name, sub_type)
|
StarcoderdataPython
|
3458803
|
import numpy as np
import os
import time
import tensorflow as tf
from shark.shark_trainer import SharkTrainer
from shark.parser import parser
from urllib import request
parser.add_argument(
"--download_mlir_path",
type=str,
default="bert_tf_training.mlir",
help="Specifies path to target mlir file that will be loaded.",
)
load_args, unknown = parser.parse_known_args()
tf.random.set_seed(0)
vocab_size = 100
NUM_CLASSES = 5
SEQUENCE_LENGTH = 512
BATCH_SIZE = 1
# Download BERT model from tank and train.
if __name__ == "__main__":
predict_sample_input = [
np.random.randint(5, size=(BATCH_SIZE, SEQUENCE_LENGTH)),
np.random.randint(5, size=(BATCH_SIZE, SEQUENCE_LENGTH)),
np.random.randint(5, size=(BATCH_SIZE, SEQUENCE_LENGTH)),
]
file_link = "https://storage.googleapis.com/shark_tank/users/stanley/bert_tf_training.mlir"
response = request.urlretrieve(file_link, load_args.download_mlir_path)
sample_input_tensors = [
tf.convert_to_tensor(val, dtype=tf.int32)
for val in predict_sample_input
]
num_iter = 10
if not os.path.isfile(load_args.download_mlir_path):
raise ValueError(
f"Tried looking for target mlir in {load_args.download_mlir_path}, but cannot be found."
)
with open(load_args.download_mlir_path, "rb") as input_file:
bert_mlir = input_file.read()
shark_module = SharkTrainer(
bert_mlir,
(
sample_input_tensors,
tf.convert_to_tensor(
np.random.randint(5, size=(BATCH_SIZE)), dtype=tf.int32
),
),
)
shark_module.set_frontend("mhlo")
shark_module.compile()
start = time.time()
print(shark_module.train(num_iter))
end = time.time()
total_time = end - start
print("time: " + str(total_time))
print("time/iter: " + str(total_time / num_iter))
|
StarcoderdataPython
|
21453
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import numpy as np
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.mil.passes.pass_registry import register_pass
def get_const_mode(val):
# Heuristics to determine if a val should be file value or immediate
# value.
if isinstance(val, (str, bool, int)):
return "immediate_value"
if isinstance(val, (np.generic, np.ndarray)):
if val.size > 10:
return "file_value"
return "immediate_value"
raise ValueError("val {} not recognized.".format(val))
def const_elimination_block(block):
# shallow copy hides changes on f.operations during the loop
for op in list(block.operations):
if op.op_type == "const":
continue
for b in op.blocks:
const_elimination_block(b)
all_outputs_are_const = True
for i, o in enumerate(op.outputs):
if o.val is not None:
with block:
res = mb.const(
val=o.val,
mode=get_const_mode(o.val),
before_op=op,
# same var name, but different python
# instance does not violate SSA property.
name=o.name,
)
op.enclosing_block.replace_uses_of_var_after_op(
anchor_op=op, old_var=o, new_var=res
)
# rename the const output
o.set_name(o.name+'_ignored')
else:
all_outputs_are_const = False
if all_outputs_are_const:
op.remove_from_block()
@register_pass(namespace="common")
def const_elimination(prog):
"""
prog: Program
# Replace non-const ops that have const Var
# outputs replaced with const op. Example:
#
# Given:
# %2, %3 = non_const_op(...) # %2 is const, %3 isn't const
# %4 = other_op(%2, %3)
#
# Result:
# _, %3 = non_const_op(...) # _ is the ignored output
# %2_const = const(mode=m) # %2_const name is for illustration only
# %4 = other_op(%2_const, %3)
#
# where m is 'file_value' / 'immediate_value' depending on heuristics
# in get_const_mode.
"""
for f_name, f in prog.functions.items():
const_elimination_block(f)
|
StarcoderdataPython
|
255979
|
<reponame>samj1912/python-libcnb
import sys
from pathlib import Path
import pytest
import toml
import libcnb
@pytest.fixture
def mock_layers(tmp_path):
return tmp_path / "layers"
@pytest.fixture
def mock_build_context(
mock_platform_path, mock_layers, mock_plan, monkeypatch, mock_buildpack_path
):
monkeypatch.setattr(
"sys.argv", ["build", str(mock_layers), str(mock_platform_path), str(mock_plan)]
)
monkeypatch.setenv("CNB_BUILDPACK_DIR", str(mock_buildpack_path))
monkeypatch.setenv("CNB_STACK_ID", "test")
yield mock_layers, mock_platform_path, mock_plan, mock_buildpack_path, "test"
def test_build_values(mock_build_context):
# GIVEN
layers_path, platform_path, plan, buildpack_path, stack_id = mock_build_context
def builder(context: libcnb.BuildContext):
assert context.application_dir == Path(".").absolute()
assert context.layers.path == layers_path
assert context.platform.path == platform_path.absolute()
assert context.buildpack.path == buildpack_path.absolute()
assert context.stack_id == stack_id
layer = context.layers.get("test")
another_layer = context.layers.get("test-another")
another_layer.dump()
assert another_layer.metadata_file.exists()
layer.metadata["test"] = "1"
layer.launch = True
layer.build = True
layer.launch_env.append("test", "test", ":")
return libcnb.BuildResult(
layers=[layer],
store=libcnb.Store(metadata={"test_store": 1}),
launch_metadata=libcnb.LaunchMetadata(
labels=[libcnb.Label(key="test_label", value="test")],
processes=[libcnb.Process(type_="test", command="test")],
slices=[libcnb.Slice(paths=[Path("."), Path("*")])],
bom=[libcnb.BOMEntry(name="test", metadata={"test": 1})],
),
build_metadata=libcnb.BuildMetadata(
bom=[libcnb.BOMEntry(name="test", metadata={"test": 1})],
unmet=[libcnb.UnmetPlanEntry(name="unmet")],
),
)
# WHEN
libcnb.build(builder)
# THEN
assert (layers_path / "test").exists()
assert not (layers_path / "test-another.toml").exists()
assert toml.loads((layers_path / "test.toml").read_text()) == {
"types": {"build": True, "cache": False, "launch": True},
"metadata": {"test": "1"},
}
assert toml.loads((layers_path / "launch.toml").read_text()) == {
"labels": [{"key": "test_label", "value": "test"}],
"processes": [
{"type": "test", "command": "test", "args": [], "direct": False, "default": False}
],
"slices": [{"paths": [".", "*"]}],
"bom": [{"name": "test", "metadata": {"test": 1}}],
}
assert toml.loads((layers_path / "build.toml").read_text()) == {
"bom": [{"name": "test", "metadata": {"test": 1}}],
"unmet": [{"name": "unmet"}],
}
assert toml.loads((layers_path / "store.toml").read_text()) == {"metadata": {"test_store": 1}}
def test_detect_errors_on_missing_stack(mock_build_context, monkeypatch):
# GIVEN
builder = None
# WHEN
monkeypatch.delenv("CNB_STACK_ID")
# THEN
with pytest.raises(Exception, match="CNB_STACK_ID is not set"):
libcnb.build(builder)
def test_build_errors_on_missing_buildpack_path(mock_build_context, monkeypatch):
# GIVEN
builder = None
# WHEN
monkeypatch.delenv("CNB_BUILDPACK_DIR")
# THEN
with pytest.raises(Exception, match="CNB_BUILDPACK_DIR is not set"):
libcnb.build(builder)
def test_build_errors_on_old_api(mock_build_context, monkeypatch, mock_old_buildpack_path):
# GIVEN
builder = None
# WHEN
monkeypatch.setenv("CNB_BUILDPACK_DIR", str(mock_old_buildpack_path))
print(sys.argv)
# THEN
with pytest.raises(
Exception,
match="This version of libcnb is only compatible with buildpack API .* or greater",
):
libcnb.build(builder)
def test_build_context(mock_buildpack_path, mock_platform_path, tmp_path):
# GIVEN
context_input = {
"application_dir": Path(".").absolute(),
"layers": libcnb.Layers(path=(tmp_path / "layers")),
"store": libcnb.Store(),
"plan": libcnb.BuildpackPlan(),
"buildpack": libcnb.Buildpack.from_path(mock_buildpack_path),
"platform": libcnb.Platform.from_path(mock_platform_path),
"stack_id": "test",
}
# WHEN
context: libcnb.BuildContext = libcnb.BuildContext.parse_obj(context_input)
# THEN
assert context.application_dir == Path(".").absolute()
assert context.buildpack.path == mock_buildpack_path.absolute()
assert context.platform.path == mock_platform_path.absolute()
assert context.stack_id == "test"
def test_build_context_error(mock_buildpack_path, mock_platform_path, tmp_path):
# GIVEN
context_input = {
"application_dir": Path("."),
"layers": libcnb.Layers(path=(tmp_path / "layers")),
"store": libcnb.Store(),
"plan": libcnb.BuildpackPlan(),
"buildpack": 1,
"platform": libcnb.Platform.from_path(mock_platform_path),
"stack_id": "test",
}
# THEN
with pytest.raises(ValueError, match="Invalid type"):
libcnb.BuildContext.parse_obj(context_input)
|
StarcoderdataPython
|
313902
|
from .abstractanalyzer import AbstractAnalyzer
from vaderSentiment import vaderSentiment
class VaderAnalyzer(AbstractAnalyzer):
def __init__(self):
pass
def analyze(self, text_content):
pass
|
StarcoderdataPython
|
8173447
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from concurrent.futures import ThreadPoolExecutor
from itertools import repeat
# External imports
from flaky import flaky
from tornado.ioloop import IOLoop
# Bokeh imports
from bokeh.util.serialization import make_id
# Module under test
from bokeh.util.tornado import _CallbackGroup # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
def _make_invocation_counter(loop, stop_after=1):
from types import MethodType
counter = { 'count' : 0 }
def func():
counter['count'] += 1
if stop_after is not None and counter['count'] >= stop_after:
loop.stop()
def count(self):
return self.counter['count']
func.count = MethodType(count, func)
func.counter = counter
return func
# this is so ctrl+c out of the tests will show the actual
# error, which pytest otherwise won't do by default
def run(loop):
try:
loop.start()
except KeyboardInterrupt:
print("Keyboard interrupt")
class LoopAndGroup:
def __init__(self, quit_after=None) -> None:
self.io_loop = IOLoop()
self.io_loop.make_current()
self.group = _CallbackGroup(self.io_loop)
if quit_after is not None:
self.io_loop.call_later(quit_after / 1000.0,
lambda: self.io_loop.stop())
def __exit__(self, type, value, traceback):
run(self.io_loop)
self.io_loop.close()
def __enter__(self):
return self
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class TestCallbackGroup:
@flaky(max_runs=10)
def test_next_tick_runs(self) -> None:
with (LoopAndGroup()) as ctx:
func = _make_invocation_counter(ctx.io_loop)
assert 0 == len(ctx.group._next_tick_callback_removers)
ctx.group.add_next_tick_callback(callback=func, callback_id=make_id())
assert 1 == len(ctx.group._next_tick_callback_removers)
assert 1 == func.count()
# check for leaks
assert 0 == len(ctx.group._next_tick_callback_removers)
@flaky(max_runs=10)
def test_timeout_runs(self) -> None:
with (LoopAndGroup()) as ctx:
func = _make_invocation_counter(ctx.io_loop)
assert 0 == len(ctx.group._timeout_callback_removers)
ctx.group.add_timeout_callback(callback=func, timeout_milliseconds=1, callback_id=make_id())
assert 1 == len(ctx.group._timeout_callback_removers)
assert 1 == func.count()
# check for leaks
assert 0 == len(ctx.group._timeout_callback_removers)
@flaky(max_runs=10)
def test_periodic_runs(self) -> None:
with (LoopAndGroup()) as ctx:
func = _make_invocation_counter(ctx.io_loop, stop_after=5)
assert 0 == len(ctx.group._periodic_callback_removers)
cb_id = make_id()
ctx.group.add_periodic_callback(callback=func, period_milliseconds=1, callback_id=cb_id)
assert 1 == len(ctx.group._periodic_callback_removers)
assert 5 == func.count()
# check for leaks... periodic doesn't self-remove though
assert 1 == len(ctx.group._periodic_callback_removers)
ctx.group.remove_periodic_callback(cb_id)
assert 0 == len(ctx.group._periodic_callback_removers)
@flaky(max_runs=10)
def test_next_tick_does_not_run_if_removed_immediately(self) -> None:
with (LoopAndGroup(quit_after=15)) as ctx:
func = _make_invocation_counter(ctx.io_loop)
cb_id = make_id()
ctx.group.add_next_tick_callback(callback=func, callback_id=cb_id)
ctx.group.remove_next_tick_callback(cb_id)
assert 0 == func.count()
@flaky(max_runs=10)
def test_timeout_does_not_run_if_removed_immediately(self) -> None:
with (LoopAndGroup(quit_after=15)) as ctx:
func = _make_invocation_counter(ctx.io_loop)
cb_id = ctx.group.add_timeout_callback(callback=func, timeout_milliseconds=1, callback_id=make_id())
ctx.group.remove_timeout_callback(cb_id)
assert 0 == func.count()
@flaky(max_runs=10)
def test_periodic_does_not_run_if_removed_immediately(self) -> None:
with (LoopAndGroup(quit_after=15)) as ctx:
func = _make_invocation_counter(ctx.io_loop, stop_after=5)
cb_id = make_id()
ctx.group.add_periodic_callback(callback=func, period_milliseconds=1, callback_id=cb_id)
ctx.group.remove_periodic_callback(cb_id)
assert 0 == func.count()
@flaky(max_runs=10)
def test_same_callback_as_all_three_types(self) -> None:
with (LoopAndGroup()) as ctx:
func = _make_invocation_counter(ctx.io_loop, stop_after=5)
# we want the timeout and next_tick to run before the periodic
ctx.group.add_periodic_callback(callback=func, period_milliseconds=2, callback_id=make_id())
ctx.group.add_timeout_callback(callback=func, timeout_milliseconds=1, callback_id=make_id())
ctx.group.add_next_tick_callback(callback=func, callback_id=make_id())
assert 5 == func.count()
@flaky(max_runs=10)
def test_adding_next_tick_twice(self) -> None:
with (LoopAndGroup()) as ctx:
func = _make_invocation_counter(ctx.io_loop, stop_after=2)
ctx.group.add_next_tick_callback(callback=func, callback_id=make_id())
ctx.group.add_next_tick_callback(callback=func, callback_id=make_id())
assert 2 == func.count()
@flaky(max_runs=10)
def test_adding_timeout_twice(self) -> None:
with (LoopAndGroup()) as ctx:
func = _make_invocation_counter(ctx.io_loop, stop_after=2)
ctx.group.add_timeout_callback(callback=func, timeout_milliseconds=1, callback_id=make_id())
ctx.group.add_timeout_callback(callback=func, timeout_milliseconds=2, callback_id=make_id())
assert 2 == func.count()
@flaky(max_runs=10)
def test_adding_periodic_twice(self) -> None:
with (LoopAndGroup()) as ctx:
func = _make_invocation_counter(ctx.io_loop, stop_after=2)
ctx.group.add_periodic_callback(callback=func, period_milliseconds=3, callback_id=make_id())
ctx.group.add_periodic_callback(callback=func, period_milliseconds=2, callback_id=make_id())
assert 2 == func.count()
@flaky(max_runs=10)
def test_remove_all_callbacks(self) -> None:
with (LoopAndGroup(quit_after=15)) as ctx:
# add a callback that will remove all the others
def remove_all():
ctx.group.remove_all_callbacks()
ctx.group.add_next_tick_callback(callback=remove_all, callback_id=make_id())
# none of these should run
func = _make_invocation_counter(ctx.io_loop, stop_after=5)
ctx.group.add_periodic_callback(callback=func, period_milliseconds=2, callback_id=make_id())
ctx.group.add_timeout_callback(callback=func, timeout_milliseconds=1, callback_id=make_id())
ctx.group.add_next_tick_callback(callback=func, callback_id=make_id())
assert 0 == func.count()
@flaky(max_runs=10)
def test_removing_next_tick_twice(self) -> None:
with (LoopAndGroup(quit_after=15)) as ctx:
func = _make_invocation_counter(ctx.io_loop)
cb_id = make_id()
ctx.group.add_next_tick_callback(callback=func, callback_id=cb_id)
ctx.group.remove_next_tick_callback(cb_id)
with pytest.raises(ValueError) as exc:
ctx.group.remove_next_tick_callback(cb_id)
assert 0 == func.count()
assert "twice" in repr(exc.value)
@flaky(max_runs=10)
def test_removing_timeout_twice(self) -> None:
with (LoopAndGroup(quit_after=15)) as ctx:
func = _make_invocation_counter(ctx.io_loop)
cb_id = make_id()
ctx.group.add_timeout_callback(callback=func, timeout_milliseconds=1, callback_id=cb_id)
ctx.group.remove_timeout_callback(cb_id)
with pytest.raises(ValueError) as exc:
ctx.group.remove_timeout_callback(cb_id)
assert 0 == func.count()
assert "twice" in repr(exc.value)
@flaky(max_runs=10)
def test_removing_periodic_twice(self) -> None:
with (LoopAndGroup(quit_after=15)) as ctx:
func = _make_invocation_counter(ctx.io_loop, stop_after=5)
cb_id = make_id()
ctx.group.add_periodic_callback(callback=func, period_milliseconds=1, callback_id=cb_id)
ctx.group.remove_periodic_callback(cb_id)
with pytest.raises(ValueError) as exc:
ctx.group.remove_periodic_callback(cb_id)
assert 0 == func.count()
assert "twice" in repr(exc.value)
@flaky(max_runs=10)
def test_adding_next_tick_from_another_thread(self) -> None:
# The test has probabilistic nature - there's a slight change it'll give a false negative
with LoopAndGroup(quit_after=15) as ctx:
n = 1000
func = _make_invocation_counter(ctx.io_loop, stop_after=n)
tpe = ThreadPoolExecutor(n)
def make_cb(cb):
return ctx.group.add_next_tick_callback(cb, callback_id=make_id())
list(tpe.map(make_cb, repeat(func, n)))
assert n == func.count()
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
StarcoderdataPython
|
11264921
|
import datetime
import json
import logging
import util
from google.appengine.api import app_identity
from google.appengine.api import urlfetch
def timedelta_to_sql_time(delta):
# https://stackoverflow.com/questions/8906926/formatting-python-timedelta-objects
days = delta.days
hours, h_remainder = divmod(delta.seconds, 3600)
minutes, seconds = divmod(h_remainder, 60)
if days > 0 or hours >= 839 or hours <= -839:
# https://dev.mysql.com/doc/refman/5.7/en/time.html
raise Exception(
"Timedelta too long for SQL format. Days: {}, Hours: {}"
.format(days, hours)
)
return "{}:{}:{}".format(hours, minutes, seconds)
def dumps_default(obj):
# Tells json.dumps() how to handle special values.
if isinstance(obj, datetime.timedelta):
return timedelta_to_sql_time(obj)
return util.json_dumps_default(obj)
class BigQueryApi(object):
"""Use the BigQuery REST API."""
scopes = [
'https://www.googleapis.com/auth/bigquery',
]
def __init__(self):
"""Set project-based constants."""
self.projectId = app_identity.get_application_id()
self.auth_token = None
self.url_prefix = (
'https://www.googleapis.com/bigquery/v2/projects/{}'
.format(self.projectId)
)
self.headers = {}
def __enter__(self):
"""Get access token, which will only live until __exit__()."""
self.auth_token, _ = app_identity.get_access_token(self.scopes)
self.headers = {
'Authorization': 'Bearer {}'.format(self.auth_token),
}
return self
def __exit__(self, type, value, traceback):
self.auth_token = None
def fetch(self, method, path, headers=None, body=None):
"""Convenience wrapper for urlfetch.fetch()."""
if headers is None:
headers = {}
if body is not None:
headers.update({'Content-Type': 'application/json'})
response = urlfetch.fetch(
self.url_prefix + path,
method=getattr(urlfetch, method),
headers=dict(self.headers, **headers),
payload=json.dumps(body, default=dumps_default),
)
logging.info(
'Call complete. {} {} {}\n\n Body {}'
.format(method, path, response.status_code, response.content)
)
return (response.status_code, json.loads(response.content))
def list_datasets(self):
status, response_body = self.fetch('GET', '/datasets')
if status != 200:
raise Exception(
"BigQueryApi.list_datasets() failed: {} {}"
.format(status, response_body)
)
return response_body
def ensure_dataset(self, datasetId):
"""Create a dataset if not already present."""
status, response_body = self.fetch(
'POST',
'/datasets',
body={
"datasetReference": {
"datasetId": datasetId,
"projectId": self.projectId,
},
},
)
err_status = response_body.get('error', {}).get('status', None)
if (status == 409 and err_status == 'ALREADY_EXISTS') or status == 200:
# Duplicate errors are fine.
return
raise Exception(
"BigQueryApi.ensure_dataset() failed: {} {}"
.format(status, response_body)
)
def ensure_table(self, datasetId, tableId, schema=None):
"""Create a table if not already present."""
status, response_body = self.fetch(
'POST',
'/datasets/{}/tables'.format(datasetId),
body={
"tableReference": {
"datasetId": datasetId,
"projectId": self.projectId,
"tableId": tableId,
},
"schema": schema or {},
},
)
return status, response_body
err_status = response_body.get('error', {}).get('status', None)
if (status == 409 and err_status == 'ALREADY_EXISTS') or status == 200:
return
raise Exception(
"BigQueryApi.ensure_table() failed: {} {}"
.format(status, response_body)
)
def insert_data(self, datasetId, tableId, row_dicts, insert_id_field=None):
"""Stream rows to table. Doesn't start a job.
Args:
datasetId - str
tableId - str
row_dicts - list of dicts
insert_id_field - str, optional. If provided, will be passed to
BigQuery, which will attempt to avoid duplication based on its
value, kind of like a unique index, but they say "best effort".
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll
"""
rows = []
for r in row_dicts:
insert_row = {"json": r}
if insert_id_field and insert_id_field in r:
insert_row["insertId"] = r[insert_id_field]
rows.append(insert_row)
status, response_body = self.fetch(
'POST',
'/datasets/{}/tables/{}/insertAll'.format(datasetId, tableId),
body={
"skipInvalidRows": True,
"ignoreUnknownValues": True,
# "templateSuffix": string,
"rows": rows,
},
)
if status != 200:
raise Exception(
"BigQueryApi.insert_data() failed: {} {}"
.format(status, response_body)
)
return status, response_body
|
StarcoderdataPython
|
3375646
|
import nextcord
from nextcord.ext import commands
class say(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
async def say(self, ctx, *,message=None):
if message == None:
await ctx.reply('Give me a word to say!')
else:
e=nextcord.Embed(description=message)
e.set_footer(text=f"Requested by {ctx.author.name}")
await ctx.send(embed=e)
def setup(client):
client.add_cog(say(client))
|
StarcoderdataPython
|
1738180
|
<filename>projecteuler/problems/problem_1.py
"""Problem one of https://projecteuler.net"""
def problem_1():
"""Solution to problem one."""
answer = sum([x for x in range(1, 1000) if x % 3 == 0 or x % 5 == 0])
return answer
|
StarcoderdataPython
|
1818004
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file based on https://github.com/kennethreitz/setup.py/blob/master/setup.py
# From https://packaging.python.org/discussions/install-requires-vs-requirements/#requirements-files :
#
# Whereas install_requires defines the dependencies for a single project,
# requirements files are often used to define the requirements for a complete Python environment.
# Whereas install_requires requirements are minimal,
# requirements files often contain an exhaustive listing of pinned versions for the purpose of achieving
# repeatable installations of a complete environment.
#
import os
from setuptools import setup, find_packages
# Package meta-data.
NAME = 'twenty_questions'
DESCRIPTION = 'A fun project to teach python'
# URL = 'https://github.com/me/myproject'
EMAIL = '<EMAIL>'
AUTHOR = '<NAME>'
# What packages are required for this module to be executed?
REQUIRED = []
# You can install using eg. `pip install twenty-questions[dev]==1.0.1`.
EXTRAS = {
'dev': ['pytest-cov', 'pytest', 'mypy', 'radon', 'pycodestyle'],
}
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
# with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
# long_description = '\n' + f.read()
# Load the package's __version__.py module as a dictionary.
about: dict = {}
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
# long_description=long_description,
author=AUTHOR,
author_email=EMAIL,
# url=URL,
packages=find_packages(exclude=('scripts', 'test_utilities')),
# packages=find_packages(exclude=('tests',)),
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
package_data={'twenty-questions': ['LICENSE.txt',]},
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: Other/Proprietary License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython' # Haven't tried others.
],
# $ setup.py publish support.
# cmdclass={
# 'upload': UploadCommand,
# },
)
|
StarcoderdataPython
|
5103657
|
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# Written by <NAME> <<EMAIL>>
#
# Changed by Christian 'Tiran' Heimes <<EMAIL>> for the placeless
# translation service (PTS) of zope
#
# Slightly updated by <NAME> <<EMAIL>>
#
# Included by Ingeniweb from PlacelessTranslationService 1.4.8
"""Generate binary message catalog from textual translation description.
This program converts a textual Uniforum-style message catalog (.po file) into
a binary GNU catalog (.mo file). This is essentially the same function as the
GNU msgfmt program, however, it is a simpler implementation.
This file was taken from Python-2.3.2/Tools/i18n and altered in several ways.
Now you can simply use it from another python module:
from msgfmt import Msgfmt
mo = Msgfmt(po).get()
where po is path to a po file as string, an opened po file ready for reading or
a list of strings (readlines of a po file) and mo is the compiled mo
file as binary string.
Exceptions:
* IOError if the file couldn't be read
* msgfmt.PoSyntaxError if the po file has syntax errors
"""
import struct
import array
import types
from cStringIO import StringIO
__version__ = "1.1pts"
class PoSyntaxError(Exception):
""" Syntax error in a po file """
def __init__(self, msg):
self.msg = msg
def __str__(self):
return 'Po file syntax error: %s' % self.msg
class Msgfmt:
""" """
def __init__(self, po, name='unknown'):
self.po = po
self.name = name
self.messages = {}
def readPoData(self):
""" read po data from self.po and store it in self.poLines """
output = []
if isinstance(self.po, types.FileType):
self.po.seek(0)
output = self.po.readlines()
if isinstance(self.po, list):
output = self.po
if isinstance(self.po, str):
output = open(self.po, 'rb').readlines()
if not output:
raise ValueError("self.po is invalid! %s" % type(self.po))
return output
def add(self, id, str, fuzzy):
"Add a non-empty and non-fuzzy translation to the dictionary."
if str and not fuzzy:
self.messages[id] = str
def generate(self):
"Return the generated output."
keys = self.messages.keys()
# the keys are sorted in the .mo file
keys.sort()
offsets = []
ids = strs = ''
for id in keys:
# For each string, we need size and file offset. Each string is NUL
# terminated; the NUL does not count into the size.
offsets.append((len(ids), len(id), len(strs), len(self.messages[id])))
ids += id + '\0'
strs += self.messages[id] + '\0'
output = ''
# The header is 7 32-bit unsigned integers. We don't use hash tables, so
# the keys start right after the index tables.
# translated string.
keystart = 7*4+16*len(keys)
# and the values start after the keys
valuestart = keystart + len(ids)
koffsets = []
voffsets = []
# The string table first has the list of keys, then the list of values.
# Each entry has first the size of the string, then the file offset.
for o1, l1, o2, l2 in offsets:
koffsets += [l1, o1+keystart]
voffsets += [l2, o2+valuestart]
offsets = koffsets + voffsets
output = struct.pack("Iiiiiii",
0x950412de, # Magic
0, # Version
len(keys), # # of entries
7*4, # start of key index
7*4+len(keys)*8, # start of value index
0, 0) # size and offset of hash table
output += array.array("i", offsets).tostring()
output += ids
output += strs
return output
def get(self):
""" """
ID = 1
STR = 2
section = None
fuzzy = 0
lines = self.readPoData()
# Parse the catalog
lno = 0
for l in lines:
lno += 1
# If we get a comment line after a msgstr or a line starting with
# msgid, this is a new entry
# XXX: l.startswith('msgid') is needed because not all msgid/msgstr
# pairs in the plone pos have a leading comment
if (l[0] == '#' or l.startswith('msgid')) and section == STR:
self.add(msgid, msgstr, fuzzy)
section = None
fuzzy = 0
# Record a fuzzy mark
if l[:2] == '#,' and 'fuzzy' in l:
fuzzy = 1
# Skip comments
if l[0] == '#':
continue
# Now we are in a msgid section, output previous section
if l.startswith('msgid'):
section = ID
l = l[5:]
msgid = msgstr = ''
# Now we are in a msgstr section
elif l.startswith('msgstr'):
section = STR
l = l[6:]
# Skip empty lines
l = l.strip()
if not l:
continue
# XXX: Does this always follow Python escape semantics?
# XXX: eval is evil because it could be abused
try:
l = eval(l, globals())
except Exception as msg:
raise PoSyntaxError('%s (line %d of po file %s): \n%s' % (msg, lno, self.name, l))
if section == ID:
msgid += l
elif section == STR:
msgstr += l
else:
raise PoSyntaxError('error in line %d of po file %s' % (lno, self.name))
# Add last entry
if section == STR:
self.add(msgid, msgstr, fuzzy)
# Compute output
return self.generate()
def getAsFile(self):
return StringIO(self.get())
def __call__(self):
return self.getAsFile()
|
StarcoderdataPython
|
371830
|
# Generated by Django 2.0.5 on 2018-07-25 11:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("core", "0010_notification")]
operations = [
migrations.AlterModelOptions(
name="notification", options={"ordering": ["-pk"]}
),
migrations.AlterField(
model_name="solution",
name="summary",
field=models.TextField(max_length=3000, null=True),
),
]
|
StarcoderdataPython
|
9711300
|
import os
import sys
import json
current_path = os.path.dirname(os.path.abspath(__file__))
if current_path not in sys.path:
sys.path.append(current_path)
import launcher_log
root_path = os.path.abspath( os.path.join(current_path, os.pardir))
data_path = os.path.join(root_path, 'data')
config_path = os.path.join(root_path, 'config.json')
LOGIN_USERNAME = "login_username"
LOGIN_PASSWORD = "<PASSWORD>"
ACCESS_ID = "access_id"
ACCESS_SECRET = "access_secret"
BUCKET_NAME = "bucket_name"
HOME_DIR = "home_dir"
config = {}
def load():
global config, config_path
try:
config = json.load(open(config_path, 'r'))
#print json.dumps(config, sort_keys=True, separators=(',',':'), indent=4)
except Exception as exc:
print("Error in configuration file:", exc)
def save():
global config, config_path
try:
json.dump(config, open(config_path, "w"), sort_keys=True, separators=(',',':'), indent=2)
except Exception as e:
launcher_log.warn("save config %s fail %s", config_path, e)
def get(path, default_val=""):
global config
try:
value = default_val
cmd = "config"
for p in path:
cmd += '["%s"]' % p
value = eval(cmd)
return value
except:
return default_val
def _set(m, k_list, v):
k0 = k_list[0]
if len(k_list) == 1:
m[k0] = v
return
if k0 not in m:
m[k0] = {}
_set(m[k0], k_list[1:], v)
def set(path, val):
global config
_set(config, path, val)
def recheck_module_path():
global config
need_save_config = False
modules = ["ossftp", "launcher"]
if get(["modules", "ossftp", "address"], -1) == -1:
need_save_config = True
set(["modules", "ossftp", "address"], "127.0.0.1")
if get(["modules", "ossftp", "port"], -1) == -1:
need_save_config = True
set(["modules", "ossftp", "port"], 2048)
if get(["modules", "launcher", "control_port"], 0) == 0:
need_save_config = True
set(["modules", "launcher", "control_port"], 8192)
if get(["modules", "launcher", "language"], "") == "":
need_save_config = True
set(["modules", "launcher", "language"], "cn")
accounts = get(["modules", "accounts"], [])
if len(accounts) == 0:
account = {}
account[LOGIN_USERNAME] = ''
account[LOGIN_PASSWORD] = ''
account[ACCESS_ID] = ''
account[ACCESS_SECRET] = ''
account[BUCKET_NAME] = ''
account[HOME_DIR] = ''
accounts.append(account)
need_save_config = True
set(["modules", "accounts"], accounts)
return need_save_config
def create_data_path():
if not os.path.isdir(data_path):
os.mkdir(data_path)
data_launcher_path = os.path.join(data_path, 'launcher')
if not os.path.isdir(data_launcher_path):
os.mkdir(data_launcher_path)
data_ossftp_path = os.path.join(data_path, 'ossftp')
if not os.path.isdir(data_ossftp_path):
os.mkdir(data_ossftp_path)
def get_account_info(login_username, login_password):
if login_username is None or login_username == "":
return None
global config
account_list = config['modules']['accounts']
for item in account_list:
if login_username == item['login_username'] and login_password == item['login_password']:
return item
return None
def main():
create_data_path()
if os.path.isfile(config_path):
load()
if recheck_module_path():
save()
main()
|
StarcoderdataPython
|
8149499
|
<reponame>DanielVenturini/BCDetect<filename>NodeManager.py<gh_stars>1-10
# -*- coding:ISO-8859-1 -*-
import subprocess
import re
'''
This file contains the implementations of the functions that are
responsable to change the node version.
'''
# 0.11.16 -> 2015-01-14
# 1.8.2 -> 2015-05-04
# 2.5.0 -> 2015-08-04
# 3.3.1 -> 2015-09-08
# 4.2.2 -> 2015-10-29
# 5.11.1 -> 2016-04-26
# 6.9.2 -> 2016-10-25
# 7.10.1 -> 2017-05-30
# 8.9.0 -> 2017-10-31
# 9.11.2 -> 2018-04-24
# 10.12.0 -> 2018-10-10
# 12.18.3 -> 2020-04-21
nodeDates = ['2015-01-14', '2015-05-04', '2015-08-04', '2015-09-08', '2015-10-29',
'2016-04-26', '2016-10-25', '2017-05-30', '2017-10-31', '2018-04-24', '2018-10-10', '2020-04-21']
nodeVersions = {'2015-01-14':'0.11.16', '2015-05-04':'1.8.2', '2015-08-04':'2.5.0', '2015-09-08':'3.3.1', '2015-10-29':'4.2.2',
'2016-04-26':'5.11.1', '2016-10-25':'6.9.2', '2017-05-30':'7.10.1', '2017-10-31':'8.9.0', '2018-04-24':'9.11.2', '2018-10-10': '10.12.0', '2020-04-21': '12.18.3'}
# get the latest version based in current version
def getVersionOnVersion(version):
versions = list(nodeVersions.values()) # get all versions
versions.sort() # sort the version
for i, lVersion in enumerate(versions): # in each version
if lVersion > version: # if latest version is minor than version
return versions[i] # return the previous version
return versions[-1] # return the last version
# check if version is installed
def isInstalled(version):
if subprocess.getstatusoutput('bash nvm.sh version {0}'.format(version))[1].__eq__('N/A'):
return False
else:
return True
# install in local machine the specify version of node
def installVersion(version):
subprocess.getstatusoutput('bash nvm.sh install {0}'.format(version))
# format the output
def printLine(num, version):
if num < 10:
line = str(num) + ' - '
else:
line = str(num) + ' - '
line += version
i = 8 - len(version)
line += (' ' * i)
line += ': '
print(line, end='', flush=True)
# install all required versions of node js
def installAllVersions():
print('\nInstall all - 11 - required versions of NodeJs')
for i, date in enumerate(nodeDates):
version = nodeVersions[date]
printLine(i+1, version)
if not isInstalled(version):
installVersion(version)
print("OK")
print('')
# based in date, get the latest version
# of Node before this date
def getVersionOnDate(date):
for dateNode in nodeDates:
if date < dateNode: # if date release is menor than date node
return nodeVersions[dateNode] # get the node version in this date
return nodeVersions[nodeDates[-1]] # latest version
# return the version if there is in package
# if the developer put the version in 'engines'->'node', get this
def getVersionOnPackage(package):
engines = package.get('engines') # get the map engines, if exists, or raise KeyError
version = engines['node'] # get the version of node
version = re.search('[\d]+', version).group(0)
version = getVersionOnVersion(version) # get last version of this version
return version
# return the version of NodeJs if there is in package.json based in date
# and install the current version of node if version isnt installed
def getVersion(package, date):
# first, try to get the version on the package.json
# after, get based in the date of release
version = ' '
try:
version = getVersionOnPackage(package)
if not isInstalled(version):
installVersion(version) # try install
except (KeyError, IndexError, AttributeError, TypeError):
version = getVersionOnDate(date)
return version
|
StarcoderdataPython
|
11375160
|
#! /usr/bin/python3
import sys
import pennylane as qml
from pennylane import numpy as np
# DO NOT MODIFY any of these parameters
a = 0.7
b = -0.3
dev = qml.device("default.qubit", wires=3)
def natural_gradient(params):
"""Calculate the natural gradient of the qnode() cost function.
The code you write for this challenge should be completely contained within this function
between the # QHACK # comment markers.
You should evaluate the metric tensor and the gradient of the QNode, and then combine these
together using the natural gradient definition. The natural gradient should be returned as a
NumPy array.
The metric tensor should be evaluated using the equation provided in the problem text. Hint:
you will need to define a new QNode that returns the quantum state before measurement.
Args:
params (np.ndarray): Input parameters, of dimension 6
Returns:
np.ndarray: The natural gradient evaluated at the input parameters, of dimension 6
"""
natural_grad = np.zeros(6)
# QHACK #
num_qubits = 3
num_params = len(params)
F = np.zeros((num_params, num_params))
sign_ei = [1, 1, -1, -1]
sign_ej = [1, -1, 1, -1]
for i in range(num_params):
for j in range(num_params):
ei = np.zeros((len(params)))
ei[i] += 1
ej = np.zeros((len(params)))
ej[j] += 1
temp = []
for k in range(4):
qnode(params)
psi_original = dev.state
qnode(params + (sign_ei[k]*ei + sign_ej[k]*ej)*np.pi/2)
psi_shifted = dev.state
temp += [np.absolute(np.dot(np.conj(psi_original), psi_shifted))**2]
F[i,j] = (-1*temp[0] + temp[1] + temp[2] - temp[3])/8
F_inv = np.linalg.inv(F)
grad_function = qml.grad(qnode)
params_grad = grad_function(params)[0]
natural_grad = np.dot(F_inv, params_grad)
# QHACK #
return natural_grad
def non_parametrized_layer():
"""A layer of fixed quantum gates.
# DO NOT MODIFY anything in this function! It is used to judge your solution.
"""
qml.RX(a, wires=0)
qml.RX(b, wires=1)
qml.RX(a, wires=1)
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 2])
qml.RZ(a, wires=0)
qml.Hadamard(wires=1)
qml.CNOT(wires=[0, 1])
qml.RZ(b, wires=1)
qml.Hadamard(wires=0)
def variational_circuit(params):
"""A layered variational circuit composed of two parametrized layers of single qubit rotations
interleaved with non-parameterized layers of fixed quantum gates specified by
``non_parametrized_layer``.
The first parametrized layer uses the first three parameters of ``params``, while the second
layer uses the final three parameters.
# DO NOT MODIFY anything in this function! It is used to judge your solution.
"""
non_parametrized_layer()
qml.RX(params[0], wires=0)
qml.RY(params[1], wires=1)
qml.RZ(params[2], wires=2)
non_parametrized_layer()
qml.RX(params[3], wires=0)
qml.RY(params[4], wires=1)
qml.RZ(params[5], wires=2)
@qml.qnode(dev)
def qnode(params):
"""A PennyLane QNode that pairs the variational_circuit with an expectation value
measurement.
# DO NOT MODIFY anything in this function! It is used to judge your solution.
"""
variational_circuit(params)
return qml.expval(qml.PauliX(1))
if __name__ == "__main__":
# DO NOT MODIFY anything in this code block
# Load and process inputs
params = sys.stdin.read()
params = params.split(",")
params = np.array(params, float)
updated_params = natural_gradient(params)
print(*updated_params, sep=",")
|
StarcoderdataPython
|
3488204
|
<reponame>ea42gh/holoviews
from __future__ import absolute_import, division, unicode_literals
import param
from .chart import ScatterPlot
from ...element import Tiles
class LabelPlot(ScatterPlot):
xoffset = param.Number(default=None, doc="""
Amount of offset to apply to labels along x-axis.""")
yoffset = param.Number(default=None, doc="""
Amount of offset to apply to labels along x-axis.""")
style_opts = ['visible', 'color', 'family', 'size']
_nonvectorized_styles = []
_style_key = 'textfont'
@classmethod
def trace_kwargs(cls, is_geo=False, **kwargs):
if is_geo:
return {'type': 'scattermapbox', 'mode': 'text'}
else:
return {'type': 'scatter', 'mode': 'text'}
def get_data(self, element, ranges, style, is_geo=False, **kwargs):
text_dim = element.vdims[0]
xs = element.dimension_values(0)
if self.xoffset:
xs = xs + self.xoffset
ys = element.dimension_values(1)
if self.yoffset:
ys = ys + self.yoffset
text = [text_dim.pprint_value(v) for v in element.dimension_values(2)]
if is_geo:
lon, lat = Tiles.easting_northing_to_lon_lat(xs, ys)
return [{"lon": lon, "lat": lat, 'text': text}]
else:
x, y = ('y', 'x') if self.invert_axes else ('x', 'y')
return [{x: xs, y: ys, 'text': text}]
|
StarcoderdataPython
|
6608127
|
from sitemap import Siteindex, Sitemap
def test_siteindex():
siteindex = Siteindex()
sitemap = Sitemap('https://www.example.com/sitemap.xml')
siteindex.add_sitemap(sitemap)
expected = '''<?xml version=\'1.0\' encoding=\'utf-8\'?>\n<siteindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/siteindex.xsd"><sitemap><loc>https://www.example.com/sitemap.xml</loc></sitemap></siteindex>'''
assert siteindex.to_string() == expected
|
StarcoderdataPython
|
6462238
|
<filename>components/stdproc/orbit/mocompbaseline/Mocompbaseline.py
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2010 California Institute of Technology. ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# United States Government Sponsorship acknowledged. This software is subject to
# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
# (No [Export] License Required except when exporting to an embargoed country,
# end user, or in support of a prohibited end use). By downloading this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# The user has the responsibility to obtain export licenses, or other export
# authority as may be required before exporting this software to any 'EAR99'
# embargoed foreign country or citizen of those countries.
#
# Author: <NAME>
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from __future__ import print_function
from isceobj import Constants as CN
from iscesys.Component.Component import Component, Port
from iscesys.Compatibility import Compatibility
from stdproc.orbit import mocompbaseline
DIM = 3
ELLIPSOID_ECCENTRICITY_SQUARED = Component.Parameter(
'ellipsoidEccentricitySquared',
public_name='ELLIPSOID_ECCENTRICITY_SQUARED',
default=CN.EarthEccentricitySquared,
type=float,
mandatory=False,
intent='input',
doc=''
)
ELLIPSOID_MAJOR_SEMIAXIS = Component.Parameter(
'ellipsoidMajorSemiAxis',
public_name='ELLIPSOID_MAJOR_SEMIAXIS',
default=CN.EarthMajorSemiAxis,
type=float,
mandatory=False,
intent='input',
doc=''
)
HEIGHT = Component.Parameter(
'height',
public_name='HEIGHT',
default=None,
type=float,
mandatory=True,
intent='input',
doc=''
)
POSITION1 = Component.Parameter(
'position1',
public_name='POSITION1',
default=[],
container=list,
type=float,
mandatory=True,
intent='input',
doc=''
)
POSITION2 = Component.Parameter(
'position2',
public_name='POSITION2',
default=[],
container=list,
type=float,
mandatory=True,
intent='input',
doc=''
)
MOCOMP_POSITION1 = Component.Parameter(
'mocompPosition1',
public_name='MOCOMP_POSITION1',
default=[],
container=list,
type=float,
mandatory=True,
intent='input',
doc=''
)
MOCOMP_POSITION2 = Component.Parameter(
'mocompPosition2',
public_name='MOCOMP_POSITION2',
default=[],
container=list,
type=float,
mandatory=True,
intent='input',
doc=''
)
MOCOMP_POSITION_INDEX1 = Component.Parameter(
'mocompPositionIndex1',
public_name='MOCOMP_POSITION_INDEX1',
default=[],
container=list,
type=int,
mandatory=True,
intent='input',
doc=''
)
MOCOMP_POSITION_INDEX2 = Component.Parameter(
'mocompPositionIndex2',
public_name='MOCOMP_POSITION_INDEX2',
default=[],
container=list,
type=int,
mandatory=True,
intent='input',
doc=''
)
PEG_HEADING = Component.Parameter(
'pegHeading',
public_name='PEG_HEADING',
default=None,
type=float,
mandatory=True,
intent='input',
doc=''
)
PEG_LATITUDE = Component.Parameter(
'pegLatitude',
public_name='PEG_LATITUDE',
default=None,
type=float,
mandatory=True,
intent='input',
doc=''
)
PEG_LONGITUDE = Component.Parameter(
'pegLongitude',
public_name='PEG_LONGITUDE',
default=None,
type=float,
mandatory=True,
intent='input',
doc=''
)
PLANET_LOCAL_RADIUS = Component.Parameter(
'planetLocalRadius',
public_name='PLANET_LOCAL_RADIUS',
default=None,
type=float,
mandatory=True,
intent='input',
doc=''
)
BASE1 = Component.Parameter(
'base1',
public_name='BASE1',
default=[],
container=list,
type=float,
mandatory=False,
intent='output',
doc=''
)
BASE2 = Component.Parameter(
'base2',
public_name='BASE2',
default=[],
container=list,
type=float,
mandatory=False,
intent='output',
doc=''
)
MIDPOINT = Component.Parameter(
'midpoint',
public_name='MIDPOINT',
default=[],
container=list,
type=float,
mandatory=False,
intent='output',
doc=''
)
MIDPOINT1 = Component.Parameter(
'midpoint1',
public_name='MIDPOINT1',
default=[],
container=list,
type=float,
mandatory=False,
intent='output',
doc=''
)
MIDPOINT2 = Component.Parameter(
'midpoint2',
public_name='MIDPOINT2',
default=[],
container=list,
type=float,
mandatory=False,
intent='output',
doc=''
)
MOCOMP_BASELINE = Component.Parameter(
'baselineArray',
public_name='MOCOMP_BASELINE',
default=[],
container=list,
type=float,
mandatory=False,
intent='output',
doc=''
)
SC = Component.Parameter(
'sc',
public_name='SC',
default=[],
container=list,
type=float,
mandatory=False,
intent='output',
doc=''
)
SCH = Component.Parameter(
'sch',
public_name='SCH',
default=[],
container=list,
type=float,
mandatory=False,
intent='output',
doc=''
)
class Mocompbaseline(Component):
parameter_list = (
HEIGHT,
ELLIPSOID_ECCENTRICITY_SQUARED,
PEG_LATITUDE,
PEG_LONGITUDE,
PLANET_LOCAL_RADIUS,
MOCOMP_POSITION_INDEX1,
ELLIPSOID_MAJOR_SEMIAXIS,
MOCOMP_POSITION_INDEX2,
POSITION1,
POSITION2,
MOCOMP_POSITION1,
MOCOMP_POSITION2,
PEG_HEADING,
SCH,
SC,
BASE2,
MIDPOINT1,
MIDPOINT2,
MIDPOINT,
BASE1,
MOCOMP_BASELINE
)
logging_name = 'isce.stdproc.orbit.mocompbaseline'
family = 'mocompbaseline'
def __init__(self,family='',name=''):
super(Mocompbaseline, self).__init__(family if family else self.__class__.family, name=name)
self.dim1_midpoint = None
self.dim2_midpoint = None
self.dim1_midpoint1 = None
self.dim2_midpoint1 = None
self.dim1_midpoint2 = None
self.dim2_midpoint2 = None
self.dim1_base1 = None
self.dim2_base1 = None
self.dim1_base2 = None
self.dim2_base2 = None
self.dim1_sch = None
self.dim2_sch = None
self.dim1_sc = None
self.dim2_sc = None
# Planet information
# Peg information
# Orbit2SCH information
self.dim1_position1 = None
self.dim2_position1 = None
self.dim1_position2 = None
self.dim2_position2 = None
# FormSLC information
self.dim1_mocompPosition1 = None
self.dim1_mocompPositionIndex1 = None
self.dim1_mocompPosition2 = None
self.dim1_mocompPositionIndex2 = None
# Output
self.dim1_baselineArray = None
self.dim2_baselineArray = None
# self.createPorts()
self.initOptionalAndMandatoryLists()
return None
def createPorts(self):
masterOrbitPort = Port(name='masterOrbit', method=self.addMasterOrbit)
slaveOrbitPort = Port(name='slaveOrbit', method=self.addSlaveOrbit)
pegPort = Port(name='peg', method=self.addPeg)
ellipsoidPort = Port(name='ellipsoid', method=self.addEllipsoid)
self._inputPorts.add(masterOrbitPort)
self._inputPorts.add(slaveOrbitPort)
self._inputPorts.add(pegPort)
self._inputPorts.add(ellipsoidPort)
return None
def mocompbaseline(self):
for port in self.inputPorts:
port()
self.prepareArraySizes()
self.allocateArrays()
self.setState()
mocompbaseline.mocompbaseline_Py()
self.getState()
self.deallocateArrays()
def prepareArraySizes(self):
self.dim1_baselineArray = len(self.mocompPosition1)
self.dim2_baselineArray = DIM
self.dim1_base1 = len(self.mocompPosition1)
self.dim2_base1 = DIM
self.dim1_base2 = len(self.mocompPosition1)
self.dim2_base2 = DIM
self.dim1_sch = len(self.mocompPosition1)
self.dim2_sch = DIM
self.dim1_sc = len(self.mocompPosition1)
self.dim2_sc = DIM
self.dim1_midpoint = len(self.mocompPosition1)
self.dim2_midpoint = DIM
self.dim1_midpoint1 = len(self.mocompPosition1)
self.dim2_midpoint1 = DIM
self.dim1_midpoint2 = len(self.mocompPosition1)
self.dim2_midpoint2 = DIM
def setState(self):
mocompbaseline.setStdWriter_Py(int(self.stdWriter))
mocompbaseline.setSchPosition1_Py(self.position1,
self.dim1_position1,
self.dim2_position1)
mocompbaseline.setSchPosition2_Py(self.position2,
self.dim1_position2,
self.dim2_position2)
mocompbaseline.setMocompPosition1_Py(self.mocompPosition1,
self.dim1_mocompPosition1)
mocompbaseline.setMocompPositionIndex1_Py(
self.mocompPositionIndex1,
self.dim1_mocompPositionIndex1)
mocompbaseline.setMocompPosition2_Py(self.mocompPosition2,
self.dim1_mocompPosition2)
mocompbaseline.setMocompPositionIndex2_Py(
self.mocompPositionIndex2,
self.dim1_mocompPositionIndex2)
mocompbaseline.setEllipsoidMajorSemiAxis_Py(
float(self.ellipsoidMajorSemiAxis)
)
mocompbaseline.setEllipsoidEccentricitySquared_Py(
float(self.ellipsoidEccentricitySquared)
)
mocompbaseline.setPlanetLocalRadius_Py(float(self.planetLocalRadius))
mocompbaseline.setPegLatitude_Py(float(self.pegLatitude))
mocompbaseline.setPegLongitude_Py(float(self.pegLongitude))
mocompbaseline.setPegHeading_Py(float(self.pegHeading))
mocompbaseline.setHeight_Py(float(self.height))
def setSchPosition1(self, var):
self.position1 = var
def setSchPosition2(self, var):
self.position2 = var
def setHeight(self, var):
self.height = var
def setMocompPosition1(self, var):
self.mocompPosition1 = var
def setMocompPositionIndex1(self, var):
self.mocompPositionIndex1 = var
def setMocompPosition2(self, var):
self.mocompPosition2 = var
def setMocompPositionIndex2(self, var):
self.mocompPositionIndex2 = var
def setEllipsoidMajorSemiAxis(self, var):
self.ellipsoidMajorSemiAxis = float(var)
def setEllipsoidEccentricitySquared(self, var):
self.ellipsoidEccentricitySquared = float(var)
def setPegLatitude(self, var):
self.pegLatitude = float(var)
def setPegLongitude(self, var):
self.pegLongitude = float(var)
def setPegHeading(self, var):
self.pegHeading = float(var)
def getState(self):
dim1 = mocompbaseline.get_dim1_s1_Py()
if dim1 != self.dim1_baselineArray:
self.logger.info("dim1_baselineArray changed to %d" % (dim1))
self.dim1_baselineArray = dim1
self.dim1_midpoint = dim1
self.dim1_midpoint1 = dim1
self.dim1_midpoint2 = dim1
self.dim1_base1 = dim1
self.dim1_base2 = dim1
self.dim1_sch = dim1
self.dim1_sc = dim1
self.baselineArray = mocompbaseline.getBaseline_Py(
self.dim1_baselineArray, self.dim2_baselineArray
)
self.midpoint = mocompbaseline.getMidpoint_Py(self.dim1_midpoint,
self.dim2_midpoint)
self.midpoint1 = mocompbaseline.getMidpoint1_Py(self.dim1_midpoint1,
self.dim2_midpoint1)
self.midpoint2 = mocompbaseline.getMidpoint2_Py(self.dim1_midpoint2,
self.dim2_midpoint2)
self.base1 = mocompbaseline.getBaseline1_Py(self.dim1_base1,
self.dim2_base1)
self.base2 = mocompbaseline.getBaseline2_Py(self.dim1_base2,
self.dim2_base2)
self.sch = mocompbaseline.getSch_Py(self.dim1_sch, self.dim2_sch)
self.sc = mocompbaseline.getSc_Py(self.dim1_sc, self.dim2_sc)
def getBaseline(self):
return self.baselineArray
@property
def baseline(self):
return self.baselineArray
def getMidpoint(self):
return self.midpoint
def getMidpoint1(self):
return self.midpoint1
def getMidpoint2(self):
return self.midpoint2
def getBaseline1(self):
return self.base1
def getBaseline2(self):
return self.base2
def getSchs(self):
return self.position1, self.sch
def getSc(self):
return self.sc
def allocateArrays(self):
if self.dim1_position1 is None:
self.dim1_position1 = len(self.position1)
self.dim2_position1 = len(self.position1[0])
if (not self.dim1_position1) or (not self.dim2_position1):
print("Error. Trying to allocate zero size array")
raise Exception
mocompbaseline.allocate_sch1_Py(self.dim1_position1,
self.dim2_position1)
if self.dim1_position2 is None:
self.dim1_position2 = len(self.position2)
self.dim2_position2 = len(self.position2[0])
if (not self.dim1_position2) or (not self.dim2_position2):
print("Error. Trying to allocate zero size array")
raise Exception
mocompbaseline.allocate_sch2_Py(self.dim1_position2,
self.dim2_position2)
if self.dim1_mocompPosition1 is None:
self.dim1_mocompPosition1 = len(self.mocompPosition1)
if (not self.dim1_mocompPosition1):
print("Error. Trying to allocate zero size array")
raise Exception
mocompbaseline.allocate_s1_Py(self.dim1_mocompPosition1)
if self.dim1_mocompPositionIndex1 is None:
self.dim1_mocompPositionIndex1 = len(self.mocompPositionIndex1)
if (not self.dim1_mocompPositionIndex1):
print("Error. Trying to allocate zero size array")
raise Exception
mocompbaseline.allocate_is1_Py(self.dim1_mocompPositionIndex1)
if self.dim1_mocompPosition2 is None:
self.dim1_mocompPosition2 = len(self.mocompPosition2)
if not self.dim1_mocompPosition2:
print("Error. Trying to allocate zero size array")
raise Exception
mocompbaseline.allocate_s2_Py(self.dim1_mocompPosition2)
if self.dim1_mocompPositionIndex2 is None:
self.dim1_mocompPositionIndex2 = len(self.mocompPositionIndex2)
if not self.dim1_mocompPositionIndex2:
print("Error. Trying to allocate zero size array")
raise Exception
mocompbaseline.allocate_is2_Py(self.dim1_mocompPositionIndex2)
if self.dim1_baselineArray is None:
self.dim1_baselineArray = len(self.baselineArray)
self.dim2_baselineArray = len(self.baselineArray[0])
if (not self.dim1_baselineArray) or (not self.dim2_baselineArray):
print("Error. Trying to allocate zero size array")
raise Exception
mocompbaseline.allocate_baselineArray_Py(self.dim1_baselineArray,
self.dim2_baselineArray)
if self.dim1_midpoint is None:
self.dim1_midpoint = len(self.midpoint)
self.dim2_midpoint = len(self.midpoint[0])
if (not self.dim1_midpoint) or (not self.dim2_midpoint):
print("Error. Trying to allocate zero size array")
raise Exception
mocompbaseline.allocate_midPointArray_Py(self.dim1_midpoint,
self.dim2_midpoint)
if self.dim1_midpoint1 is None:
self.dim1_midpoint1 = len(self.midpoint1)
self.dim2_midpoint1 = len(self.midpoint1[0])
if (not self.dim1_midpoint1) or (not self.dim2_midpoint1):
print("Error. Trying to allocate zero size array")
raise Exception
mocompbaseline.allocate_midPointArray1_Py(self.dim1_midpoint1,
self.dim2_midpoint1)
if self.dim1_midpoint2 is None:
self.dim1_midpoint2 = len(self.midpoint2)
self.dim2_midpoint2 = len(self.midpoint2[0])
if (not self.dim1_midpoint2) or (not self.dim2_midpoint2):
print("Error. Trying to allocate zero size array")
raise Exception
mocompbaseline.allocate_midPointArray2_Py(self.dim1_midpoint2,
self.dim2_midpoint2)
if self.dim1_base1 is None:
self.dim1_base1 = len(self.base1)
self.dim2_base1 = len(self.base1[0])
if (not self.dim1_base1) or (not self.dim2_base1):
print("Error. Trying to allocate zero size array")
raise Exception
mocompbaseline.allocate_baselineArray1_Py(self.dim1_base1,
self.dim2_base1)
if self.dim1_base2 is None:
self.dim1_base2 = len(self.base2)
self.dim2_base2 = len(self.base2[0])
if (not self.dim1_base2) or (not self.dim2_base2):
print("Error. Trying to allocate zero size array")
raise Exception
mocompbaseline.allocate_baselineArray2_Py(self.dim1_base2,
self.dim2_base2)
if self.dim1_sch is None:
self.dim1_sch = len(self.sch)
self.dim2_sch = len(self.sch[0])
if (not self.dim1_sch) or (not self.dim2_sch):
print("Error. Trying to allocate zero size array")
raise Exception
mocompbaseline.allocate_schArray_Py(self.dim1_sch,
self.dim2_sch)
if self.dim1_sc is None:
self.dim1_sc = len(self.sc)
self.dim2_sc = len(self.sc[0])
if (not self.dim1_sc) or (not self.dim2_sc):
print("Error. Trying to allocate zero size array")
raise Exception
mocompbaseline.allocate_scArray_Py(self.dim1_sc, self.dim2_sc)
def deallocateArrays(self):
mocompbaseline.deallocate_sch1_Py()
mocompbaseline.deallocate_sch2_Py()
mocompbaseline.deallocate_s1_Py()
mocompbaseline.deallocate_is1_Py()
mocompbaseline.deallocate_s2_Py()
mocompbaseline.deallocate_is2_Py()
mocompbaseline.deallocate_baselineArray_Py()
mocompbaseline.deallocate_midPointArray_Py()
mocompbaseline.deallocate_midPointArray1_Py()
mocompbaseline.deallocate_midPointArray2_Py()
mocompbaseline.deallocate_baselineArray1_Py()
mocompbaseline.deallocate_baselineArray2_Py()
mocompbaseline.deallocate_schArray_Py()
mocompbaseline.deallocate_scArray_Py()
def addPeg(self):
import math
peg = self._inputPorts.getPort(name='peg').getObject()
if peg:
try:
self.planetLocalRadius = peg.getRadiusOfCurvature()
self.pegLatitude = math.radians(peg.getLatitude())
self.pegLongitude = math.radians(peg.getLongitude())
self.pegHeading = math.radians(peg.getHeading())
except AttributeError:
self.logger.error("Object %s requires getLatitude(), getLongitude() and getHeading() methods" % (peg.__class__))
def addEllipsoid(self):
ellipsoid = self._inputPorts.getPort(name='ellipsoid').getObject()
if(ellipsoid):
try:
self.ellipsoidEccentricitySquared = ellipsoid.get_e2()
self.ellipsoidMajorSemiAxis = ellipsoid.get_a()
except AttributeError:
self.logger.error("Object %s requires get_e2() and get_a() methods" % (ellipsoid.__class__))
def addMasterOrbit(self):
orbit = self._inputPorts.getPort(name='masterOrbit').getObject()
if (orbit):
try:
(time,position,velocity,offset) = orbit._unpackOrbit()
self.time = time
self.position1 = position
except AttributeError:
self.logger.error("Object %s requires an _unpackOrbit() method" % (orbit.__class__))
raise AttributeError
def addSlaveOrbit(self):
orbit = self._inputPorts.getPort(name='slaveOrbit').getObject()
if (orbit):
try:
(time,position,velocity,offset) = orbit._unpackOrbit()
self.time = time
self.position2 = position
except AttributeError:
self.logger.error("Object %s requires an _unpackOrbit() method" % (orbit.__class__))
raise AttributeError
pass
|
StarcoderdataPython
|
6421546
|
"""
--- Day 20: Particle Swarm ---
Suddenly, the GPU contacts you, asking for help. Someone has asked it to simulate too many particles, and it won't be
able to finish them all in time to render the next frame at this rate.
It transmits to you a buffer (your puzzle input) listing each particle in order (starting with particle 0, then particle
1, particle 2, and so on). For each particle, it provides the X, Y, and Z coordinates for the particle's position (p),
velocity (v), and acceleration (a), each in the format <X,Y,Z>.
Each tick, all particles are updated simultaneously. A particle's properties are updated in the following order:
Increase the X velocity by the X acceleration.
Increase the Y velocity by the Y acceleration.
Increase the Z velocity by the Z acceleration.
Increase the X position by the X velocity.
Increase the Y position by the Y velocity.
Increase the Z position by the Z velocity.
Because of seemingly tenuous rationale involving z-buffering, the GPU would like to know which particle will stay
closest to position <0,0,0> in the long term. Measure this using the Manhattan distance, which in this situation is
simply the sum of the absolute values of a particle's X, Y, and Z position.
For example, suppose you are only given two particles, both of which stay entirely on the X-axis (for simplicity).
Drawing the current states of particles 0 and 1 (in that order) with an adjacent a number line and diagram of current X
positions (marked in parentheses), the following would take place:
p=< 3,0,0>, v=< 2,0,0>, a=<-1,0,0> -4 -3 -2 -1 0 1 2 3 4
p=< 4,0,0>, v=< 0,0,0>, a=<-2,0,0> (0)(1)
p=< 4,0,0>, v=< 1,0,0>, a=<-1,0,0> -4 -3 -2 -1 0 1 2 3 4
p=< 2,0,0>, v=<-2,0,0>, a=<-2,0,0> (1) (0)
p=< 4,0,0>, v=< 0,0,0>, a=<-1,0,0> -4 -3 -2 -1 0 1 2 3 4
p=<-2,0,0>, v=<-4,0,0>, a=<-2,0,0> (1) (0)
p=< 3,0,0>, v=<-1,0,0>, a=<-1,0,0> -4 -3 -2 -1 0 1 2 3 4
p=<-8,0,0>, v=<-6,0,0>, a=<-2,0,0> (0)
At this point, particle 1 will never be closer to <0,0,0> than particle 0, and so, in the long run, particle 0 will stay
closest.
Which particle will stay closest to position <0,0,0> in the long term?
Your puzzle answer was 170.
--- Part Two ---
To simplify the problem further, the GPU would like to remove any particles that collide. Particles collide if their
positions ever exactly match. Because particles are updated simultaneously, more than two particles can collide at the
same time and place. Once particles collide, they are removed and cannot collide with anything else after that tick.
For example:
p=<-6,0,0>, v=< 3,0,0>, a=< 0,0,0>
p=<-4,0,0>, v=< 2,0,0>, a=< 0,0,0> -6 -5 -4 -3 -2 -1 0 1 2 3
p=<-2,0,0>, v=< 1,0,0>, a=< 0,0,0> (0) (1) (2) (3)
p=< 3,0,0>, v=<-1,0,0>, a=< 0,0,0>
p=<-3,0,0>, v=< 3,0,0>, a=< 0,0,0>
p=<-2,0,0>, v=< 2,0,0>, a=< 0,0,0> -6 -5 -4 -3 -2 -1 0 1 2 3
p=<-1,0,0>, v=< 1,0,0>, a=< 0,0,0> (0)(1)(2) (3)
p=< 2,0,0>, v=<-1,0,0>, a=< 0,0,0>
p=< 0,0,0>, v=< 3,0,0>, a=< 0,0,0>
p=< 0,0,0>, v=< 2,0,0>, a=< 0,0,0> -6 -5 -4 -3 -2 -1 0 1 2 3
p=< 0,0,0>, v=< 1,0,0>, a=< 0,0,0> X (3)
p=< 1,0,0>, v=<-1,0,0>, a=< 0,0,0>
------destroyed by collision------
------destroyed by collision------ -6 -5 -4 -3 -2 -1 0 1 2 3
------destroyed by collision------ (3)
p=< 0,0,0>, v=<-1,0,0>, a=< 0,0,0>
In this example, particles 0, 1, and 2 are simultaneously destroyed at the time and place marked X. On the next tick,
particle 3 passes through unharmed.
How many particles are left after all collisions are resolved?
Your puzzle answer was 571.
Both parts of this puzzle are complete! They provide two gold stars: **
"""
import re
from collections import namedtuple, defaultdict
Particle = namedtuple("Particle", "index, position, velocity, acceleration")
def find_closest_to_zero(particles, iterations=1_000):
for i in range(iterations):
particles = tick(particles)
return min(
_find_closest_to_zero(particles).items(),
key=lambda item: item[1])[0].index
def left_after_collisions(particles, iterations=1_000):
for i in range(iterations):
particles = tick_with_collisions(particles)
return len(particles)
def _find_closest_to_zero(particles):
return {particle: manhatan_distance(particle) for particle in particles}
def manhatan_distance(particle):
return sum(map(abs, particle.position))
def tick_with_collisions(particles):
particles = tick(particles)
positions = defaultdict(list)
for particle in particles:
positions[particle.position].append(particle)
return [
particles[0]
for position, particles
in positions.items()
if len(particles) == 1
]
def tick(particles):
return list(map(_tick_single, particles))
def _tick_single(particle):
vx, vy, vz = particle.velocity
ax, ay, az = particle.acceleration
x, y, z = particle.position
vx, vy, vz = vx + ax, vy + ay, vz + az
return Particle(
particle.index,
(x + vx, y + vy, z + vz),
(vx, vy, vz),
particle.acceleration)
def parser(particles):
coordinates_pattern = re.compile(r"\w=<\s*(-?\d+),(-?\d+),(-?\d+)>")
def parse_coordinates(coordinates):
x, y, z = coordinates_pattern.match(coordinates).groups()
return tuple(map(int, [x, y, z]))
result = []
for particle_idx in range(len(particles)):
particle = particles[particle_idx]
position, velocity, acceleration = particle.split(", ")
result.append(Particle(
particle_idx,
parse_coordinates(position),
parse_coordinates(velocity),
parse_coordinates(acceleration)))
return result
if __name__ == "__main__":
with open("20_particle_swarm.txt") as file:
puzzle = [line.strip() for line in file.readlines()]
particles = parser(puzzle)
print(f"part 1: {find_closest_to_zero(particles)}")
print(f"part 2: {left_after_collisions(particles)}")
|
StarcoderdataPython
|
1722439
|
<filename>Interview-Preparation/Facebook/ArraysStrings-valid-palindrome.py
class Solution:
def isPalindrome(self, s: str) -> bool:
s = s.lower()
s = s.replace(' ','')
for p in string.punctuation:
s = s.replace(p, '')
i, j = 0, len(s)-1
while i < j:
if s[i] != s[j]:
return False
i, j = i+1, j-1
return True
|
StarcoderdataPython
|
1623739
|
<filename>Algorithms/A Number After a Double Reversal/solution.py
class Solution:
def isSameAfterReversals(self, num: int) -> bool:
return str(num) == str(num).rstrip("0") or num == 0
|
StarcoderdataPython
|
1889844
|
import sys
from cs50 import get_string
def main():
if len(sys.argv) != 2:
print("Usage: ./caesar k")
sys.exit(1)
k = int(sys.argv[1])
plaintext = get_string("plaintext: ")
print("ciphertext: ", end="")
for ch in plaintext:
if not ch.isalpha():
print(ch, end="") # printing non alpha characters as it is
continue
ascii_offset = 65 if ch.isupper() else 97 # ascii offset calculation for lower case letters and upper case letters
pi = ord(ch) - ascii_offset # getting value of ascii character in between 0 and 25
ci = (pi + k) % 26 # adding k to the digit and modulus by 26
print(chr(ci + ascii_offset), end="") # printing character of encrypted text
print()
return 0
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
179989
|
<filename>1-computer-vision/1-cv-preprocess-augmentation.py
# Understand how ImageDataGenerator labels images based on the directory structure
|
StarcoderdataPython
|
9617753
|
<reponame>Hyeji-Kim/ENC<gh_stars>10-100
import numpy as np
import copy
import json
import os
import os.path as osp
import sys
import time
import itertools
import google.protobuf as pb
import random
from argparse import ArgumentParser
from pprint import pprint
import subprocess
from scipy import interpolate
from scipy.interpolate import interp1d
from itertools import groupby
sys.path.insert(0, osp.join('../../utils'))
from basic import *
from enc_map import *
from enc_model import *
from accuracy_metric import *
def Cost_Opt(Ctarget):
# -------------------------------
# -------------------------------
# Initial setting
# -------------------------------
# -------------------------------
# Parameters --------------------------------------------------------------
Sel_space = 0 # space margin {0 ~ 1.0} {0 : ENC-Map}
Sel_conv1_comp = 1 # compressed layer type {0 : comp w/ conv1} {1 : comp w/o conv1}
Sel_conv1 = 2 # initial rank of conv1 {0 : conv1 = from ENC-Map} {1 : conv1 = half} {2 : conv1 = max}
Sel_comp = 0 # baseline complexity {0 : only conv} {1 : only conv (all cost)} {2 : all layers}
Sel_norm = 0 # type of layer-wise accuracy metric {0 : PCA energy} {1 : validation accuracy}
# -------------------------------------------------------------------------
W_orig, C_orig, FC_cost, Net, W_norm, Wmax, C_norm, Cmax, R_norm, L, A, R, W, C = Net_spec(Sel_comp)
g = Net[:,3]
a = Net[:,7]/Net[:,1]
w = Net[:,6]/Net[:,1]
r10 = np.around(Net[:,1]/10)
r1_orig = [max(np.around(Net[i,1]/100),1) for i in range(len(Net))]
# complexity setting
c = a
# Load the eigenvalue cumsum
eigen_cumsum, eigenvalue = Eigen_cumsum_load(net_type, g)
# -----------------------------------------------
# ENC-Map : Single-shot Determination
# -----------------------------------------------
print 'Sel_space : ',Sel_space
print '=========== ENC-Map ==========='
# --------------------------------------------------
if Sel_norm == 0: ## PCA-energy
Map_out = ENC_Map(Ctarget[0],eigen_cumsum, Sel_comp, Sel_space, Sel_conv1, 0, Sel_norm)
Rmax_ = np.asarray(Map_out[2][:])
elif Sel_norm == 1: ## Measurement-based
command = 'addpath({}); enc_map({}, {}, {}, {}, {}); exit;'.format("'../../utils'",Ctarget[0], Sel_space, Sel_comp, 1, Sel_conv1)
subprocess.call(['matlab','-nodisplay','-nodesktop', '-nosplash', '-r', command])
matlab_tmp = np.loadtxt('tmp_/MATLAB_result.txt')
Rmax_ = matlab_tmp[:L]
Rmax_ = np.array([min(Rmax_[i],Net[i,1]) for i in range(L)])
R_Amax = np.array([max(Rmax_[i],r10[i]) for i in range(L)])
print 'Rmax : ',R_Amax.tolist()
# Complexity of fianl rank configuration
if Sel_comp == 1:
C_max = (R_Amax.dot(a) + FC_cost)/C_orig
W_max = (R_Amax.dot(w) + FC_cost)/W_orig
else:
C_max = (R_Amax.dot(a))/C_orig
W_max = (R_Amax.dot(w))/W_orig
print('\nFINAL Rank Configuration : ')
for i in range(len(R_Amax)):
print int(R_Amax[i]),
print('\niter = {}, Cost = {}({}), Weight = {}({})'.format(0, C_max, round(C_max*C_orig/1000000), W_max, round(W_max*W_orig/1000000)))
print('\nEND ------- ')
return R_Amax, C_max, W_max
def main(args):
tmp = Net_spec(1)
L = np.shape(tmp[8])[1]
k = 0
global net_type
global gpu_num
global gpu_idx
# ----------------------------------------
# Parameters
# ----------------------------------------
gpu_idx = map(int, list(args.gpu.split(','))) # available GPU indice
#c_list = [0.5, 0.6, 0.7] # target complexity
c_list = [float(args.tar_comp)] # target complexity
# ----------------------------------------
net_type = args.type
gpu_num = len(gpu_idx)
R_list = np.zeros([len(c_list),L])
C_list = np.zeros(len(c_list))
W_list = np.zeros(len(c_list))
# Netowrk Compression -----------
for i in range(len(c_list)) :
tic()
Ctarget = [c_list[i], c_list[i]]
R_list[k,:], C_list[k], W_list[k] = Cost_Opt(Ctarget)
print('[Done] : Netsork Compression for complexity {} - {}'.format(c_list[i], i))
toc()
k += 1
R_list = R_list[:k]
C_list = C_list[:k]
W_list = W_list[:k]
# Accuracy check ---------------
len_R = min(gpu_num, k)
gpu_idx = gpu_idx[:len_R]
gpu_num = len(gpu_idx)
A_val = np.zeros([k, 2])
A_test = np.zeros([k, 2])
tmp = Check_Acc_Train(R_list, k, gpu_idx, gpu_num, net_type, 1)
A_val[:,0] = tmp[0]
A_val[:,1] = tmp[1]
A_test[:,0], A_test[:,1] = Check_Acc_Test(R_list, gpu_idx, gpu_num, net_type)
# File write (final result) --------
filename = 'final_result_summary.txt'
f = open(filename, 'a+')
for i in range(len(R_list)):
for j in range(L):
f.write('{} '.format(R_list[i,j]))
f.write('{} '.format(C_list[i]))
f.write('{} '.format(W_list[i]))
f.write('{} '.format(A_val[i][0]))
f.write('{} '.format(A_val[i][1]))
f.write('{} '.format(A_test[i][0]))
f.write('{}\n'.format(A_test[i][1]))
if __name__ == '__main__':
parser = ArgumentParser(description="Network Compression")
parser.add_argument('--type')
parser.add_argument('--tar_comp', help="compression rate, range: (0.0 ~ 1.0)")
parser.add_argument('--gpu', help="avaiable gpu indice, ex) [0,2,3]")
args = parser.parse_args()
main(args)
|
StarcoderdataPython
|
1871593
|
<reponame>hussam-almarzoq/django-versatileimagefield
# -*- coding: utf-8 -*-
from distutils.core import setup
from setuptools import find_packages
setup(
name='django-versatileimagefield',
packages=find_packages(),
version='1.11',
author=u'<NAME>',
author_email='<EMAIL>',
url='http://github.com/respondcreate/django-versatileimagefield/',
license='MIT License, see LICENSE',
description="A drop-in replacement for django's ImageField that provides "
"a flexible, intuitive and easily-extensible interface for "
"creating new images from the one assigned to the field.",
long_description=open('README.rst').read(),
zip_safe=False,
install_requires=['Pillow>=2.4.0', 'python-magic>=0.4.15,<1.0.0'],
include_package_data=True,
keywords=[
'django',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
'Framework :: Django :: 2.2',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Multimedia :: Graphics :: Presentation',
]
)
|
StarcoderdataPython
|
1785933
|
# @property
# def urls():
# import django
# if django.VERSION < (1, 9):
# from .mfa_urls import url_patterns
# return url_patterns, 'mfa', ''
# else:
# from .mfa_urls import url_patterns
# return url_patterns,'mfa'
#
|
StarcoderdataPython
|
11339307
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by <NAME>, <EMAIL>, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PacbioDamasker(MakefilePackage):
"""Damasker: The Dazzler Repeat Masking Suite. This is a special fork
required for some pacbio utilities."""
homepage = "https://github.com/PacificBiosciences/DAMASKER"
url = "https://github.com/PacificBiosciences/DAMASKER"
version('2017-02-11',
git='https://github.com/PacificBiosciences/DAMASKER.git',
commit='<PASSWORD>4<PASSWORD>d<PASSWORD>85<PASSWORD>f3f0c<PASSWORD>4')
depends_on('gmake', type='build')
def edit(self, spec, prefix):
mkdirp(prefix.bin)
makefile = FileFilter('Makefile')
makefile.filter('DEST_DIR\s*=\s*~/bin', 'DEST_DIR = ' + prefix.bin)
gmf = FileFilter('GNUmakefile')
gmf.filter('rsync\s*-av\s*\$\{ALL\}\s*\$\{PREFIX\}/bin',
'cp ${ALL} ' + prefix.bin)
|
StarcoderdataPython
|
1775209
|
<gh_stars>0
import json
from django import template
register = template.Library()
@register.filter
def here(page, request):
return request.path.startswith(page.get_absolute_url())
@register.simple_tag
def node_module(path):
return '/node_modules/{}'.format(path)
@register.assignment_tag(takes_context=True)
def navigation_json(context, pages, section=None):
"""
Renders a navigation list for the given pages.
The pages should all be a subclass of PageBase, and possess a get_absolute_url() method.
You can also specify an alias for the navigation, at which point it will be set in the
context rather than rendered.
"""
request = context["request"]
# Compile the entries.
def page_entry(page):
# Do nothing if the page is to be hidden from not logged in users
if page.hide_from_anonymous and not request.user.is_authenticated():
return
# Do nothing if the page is set to offline
if not page.is_online:
return
url = page.get_absolute_url()
return {
"url": url,
"title": str(page),
"here": request.path.startswith(url),
"children": [page_entry(x) for x in page.navigation if
page is not request.pages.homepage]
}
# All the applicable nav items
entries = [page_entry(x) for x in pages if page_entry(x) is not None]
# Add the section.
if section:
section_entry = page_entry(section)
entries = [section_entry] + list(entries)
return json.dumps(entries)
|
StarcoderdataPython
|
195846
|
import sys
import os
import torch
import pandas as pd
import datetime
from argparse import ArgumentParser
import numpy as np
from torch import nn, optim
import torch.nn.functional as F
from torch.utils.data import DataLoader, random_split
import pytorch_lightning as pl
from pytorch_lightning.metrics import functional as FM
from network.ecgresnet_auxout import ECGResNet_AuxOut
from utils.helpers import create_results_directory, create_weights_directory
from utils.focalloss_weights import FocalLoss
class ECGResNetSnapshotEnsemble_AuxOutSystem(pl.LightningModule):
"""
This class implements an snapshot ensemble of ECGResNets with auxiliary output in PyTorch Lightning.
It can estimate the epistemic and aleatoric uncertainty of its predictions.
"""
def __init__(self, in_channels, n_grps, N,
num_classes, dropout, first_width, stride,
dilation, learning_rate, ensemble_size, max_epochs, initial_lr, cyclical_learning_rate_type, n_logit_samples, loss_weights=None,
**kwargs):
"""
Initializes the ECGResNetSnapshotEnsemble_AuxOutSystem
Args:
in_channels: number of channels of input
n_grps: number of ResNet groups
N: number of blocks per groups
num_classes: number of classes of the classification problem
dropout: probability of an argument to get zeroed in the dropout layer
first_width: width of the first input
stride: tuple with stride value per block per group
dilation: spacing between the kernel points of the convolutional layers
learning_rate: the learning rate of the model
ensemble_size: the number of models that make up the ensemble
max_epochs: total number of epochs to train for
initial_lr: the initial learning rate at the start of a learning cycle
cyclical_learning_rate_type: the type of learning rate cycling to apply
n_logit_samples: number of logit samples of the auxiliary output
loss_weights: array of weights for the loss term
"""
super().__init__()
self.save_hyperparameters()
self.learning_rate = learning_rate
self.num_classes = num_classes
self.ensemble_size = ensemble_size
self.max_epochs = max_epochs
self.initial_lr = initial_lr
self.cyclical_learning_rate_type = cyclical_learning_rate_type
self.n_logit_samples = n_logit_samples
self.IDs = torch.empty(0).type(torch.LongTensor)
self.predicted_labels = torch.empty(0).type(torch.LongTensor)
self.correct_predictions = torch.empty(0).type(torch.BoolTensor)
self.epistemic_uncertainty = torch.empty(0).type(torch.FloatTensor)
self.aleatoric_uncertainty = torch.empty(0).type(torch.FloatTensor)
self.total_uncertainty = torch.empty(0).type(torch.FloatTensor)
self.models = []
self.optimizers = []
self.models.append(ECGResNet_AuxOut(in_channels,
n_grps, N, num_classes,
dropout, first_width,
stride, dilation)
)
if loss_weights is not None:
weights = torch.tensor(loss_weights, dtype = torch.float)
else:
weights = loss_weights
self.loss = FocalLoss(gamma=1, weights = weights)
create_weights_directory()
def forward(self, x, model_idx):
"""Performs a forward through a single ensemble member.
Args:
x (tensor): Input data.
model_idx (int): Index of the ensemble member.
Returns:
output1: Output at the auxiliary point of the ensemble member
output2: Output at the end of the ensemble member
output2_log_var: The log variance of the ensemble_member
"""
output1, output2_mean, output2_log_var = self.models[model_idx](x)
return output1, output2_mean, output2_log_var
def on_train_epoch_start(self):
"""
Set the cyclical learning rate for the current epoch
"""
learning_rate = self.get_learning_rate(self.current_epoch, self.ensemble_size, self.max_epochs, self.initial_lr, self.cyclical_learning_rate_type)
self.set_learning_rate(self.optimizers[0], learning_rate)
self.log('Learning rate', learning_rate)
print('Epoch: {} learning rate: {}'.format(self.current_epoch, learning_rate))
def training_step(self, batch, batch_idx):
"""Performs a training step for all ensemble members.
Args:
batch (dict): Output of the dataloader.
batch_idx (int): Index no. of this batch.
Returns:
tensor: Total loss for this step.
"""
data, target = batch['waveform'], batch['label']
model_idx = 0
# Make prediction
output1, output2_mean, output2_log_var = self(data, model_idx)
# Sample from logits, returning a vector x_i
x_i = self.models[model_idx].sample_logits(self.n_logit_samples, output2_mean, output2_log_var, average=True)
train_loss1 = self.loss(output1, target)
train_loss2 = self.loss(x_i, target)
total_train_loss = (0.3 * train_loss1) + train_loss2
# Update weights for single model using individual optimizer
self.manual_backward(total_train_loss, self.optimizers[model_idx])
self.optimizers[model_idx].step()
self.optimizers[model_idx].zero_grad()
self.log('train_loss'.format(model_idx), total_train_loss)
return {'loss': total_train_loss}
def on_train_epoch_end(self, outputs):
"""
Save the model after each learning-rate cycle
"""
if self.cyclical_learning_rate_type == 'cosine-annealing':
epochs_per_cycle = self.max_epochs/self.ensemble_size
# Check if we are at the end of a learning-rate cycle
if (self.current_epoch +1) % epochs_per_cycle == 0:
model_idx = int((self.current_epoch+1 )/ epochs_per_cycle)
# Save current model
print('\nSaving model: {}/{}'.format(model_idx, self.ensemble_size))
torch.save({
'epoch': self.current_epoch,
'model_state_dict': self.models[0].state_dict(),
'optimizer_state_dict': self.optimizers[0].state_dict(),
}, "weights/ssensemble_auxout_model{}.pt".format(model_idx))
# self.trainer.save_checkpoint("weights/ssensemble_model{}.ckpt".format(model_idx))
def validation_step(self, batch, batch_idx):
data, target = batch['waveform'], batch['label']
model_idx = 0
# Make prediction
_, output2_mean, output2_log_var = self(data, model_idx)
# Sample from logits, returning vector x_i
x_i = self.models[model_idx].sample_logits(self.n_logit_samples, output2_mean, output2_log_var, average=True)
# Apply softmax to obtain probability vector p_i
p_i = F.softmax(x_i, dim=1)
val_loss = self.loss(p_i, target)
acc = FM.accuracy(p_i, target)
# Log metrics
metrics = {'val_loss': val_loss.item(), 'val_acc': acc.item()}
self.log('val_acc', acc.item())
self.log('val_loss', val_loss.item())
return metrics
def on_test_epoch_start(self):
"""
Initialize ensemble members from saved checkpoints
"""
print('\nInitializing ensemble members from checkpoints')
# Remove first model from self.models
self.models.clear()
for model_idx in range(self.ensemble_size):
# Initialize ensemble members from different epochs in the training stage of the original model
self.models.append(ECGResNet_AuxOut(self.hparams.in_channels,
self.hparams.n_grps, self.hparams.N, self.hparams.num_classes,
self.hparams.dropout, self.hparams.first_width,
self.hparams.stride, self.hparams.dilation, self.hparams.n_logit_samples)
)
model_path = 'weights/ssensemble_auxout_model{}.pt'.format(model_idx+1)
checkpoint = torch.load(model_path)
self.models[model_idx].load_state_dict(checkpoint['model_state_dict'])
self.models[model_idx].eval()
print('Model {}/{} initialized\n'.format(model_idx+1, self.ensemble_size))
def test_step(self, batch, batch_idx, save_to_csv=False):
prediction_individual = torch.empty(batch['label'].shape[0], self.ensemble_size, self.num_classes)
aleatoric_var = torch.empty(batch['label'].shape[0], self.ensemble_size, self.num_classes)
data, target = batch['waveform'], batch['label']
# Predict for each model
for model_idx, model in enumerate(self.models):
# Make prediction
_, output2_mean, output2_log_var = self(data, model_idx)
# Sample from logits, returning a vector x_i
x_i = self.models[model_idx].sample_logits(self.n_logit_samples, output2_mean, output2_log_var, average=True)
prediction_individual[:, model_idx] = x_i.data
# Take exponent to get the variance
output2_var = output2_log_var.exp()
aleatoric_var[:, model_idx] = output2_var.data
# Calculate mean and variance over predictions from individual ensemble members
prediction_ensemble_mean = F.softmax(torch.mean(prediction_individual, dim=1), dim=1)
prediction_ensemble_var = torch.var(prediction_individual, dim=1)
# Get the average aleatoric uncertainty for each prediction
prediction_aleatoric_var = torch.mean(aleatoric_var, dim=1)
# Select the predicted labels
predicted_labels = prediction_ensemble_mean.argmax(dim=1)
test_loss = self.loss(prediction_ensemble_mean, target)
acc = FM.accuracy(prediction_ensemble_mean, target)
# Get the epistemic variance of the predicted labels by selecting the variance of
# the labels with highest average Softmax value
predicted_labels_var = torch.gather(prediction_ensemble_var, 1, prediction_ensemble_mean.argmax(dim=1).unsqueeze_(1))[:, 0].cpu()
# Get the aleatoric variance of the predicted labels by selecting the variance of
# the labels with highest average Softmax value
predicted_labels_aleatoric_var = torch.gather(prediction_aleatoric_var, 1, prediction_ensemble_mean.argmax(dim=1).unsqueeze_(1))[:, 0].cpu()
total_var = predicted_labels_var + predicted_labels_aleatoric_var
# Log and save metrics
self.log('test_acc', acc.item())
self.log('test_loss', test_loss.item())
self.IDs = torch.cat((self.IDs, batch['id']), 0)
self.predicted_labels = torch.cat((self.predicted_labels, predicted_labels), 0)
self.epistemic_uncertainty = torch.cat((self.epistemic_uncertainty, predicted_labels_var), 0)
self.aleatoric_uncertainty = torch.cat((self.aleatoric_uncertainty, predicted_labels_aleatoric_var), 0)
self.total_uncertainty = torch.cat((self.total_uncertainty, total_var), 0)
self.correct_predictions = torch.cat((self.correct_predictions, torch.eq(predicted_labels, target.data.cpu())), 0)
return {'test_loss': test_loss.item(), 'test_acc': acc.item(), 'test_loss': test_loss.item()}
# Initialize an optimizer for each model in the ensemble
def configure_optimizers(self):
"""
Initialize the optimizer, during training only a single model is used
"""
model_idx = 0
self.optimizers.append(optim.SGD(self.models[model_idx].parameters(), lr=self.initial_lr))
return self.optimizers
def get_learning_rate(self, epoch_idx, n_models, total_epochs, initial_lr, cyclical_learning_rate_type):
"""
Returns the learning rate for the current epoch.
Args:
epoch_idx: index of the current epoch
n_models: total number of ensemble members
total_epochs: total number of epochs to train for
initial_lr: the initial learning rate at the start of a learning cycle
cyclical_learning_rate_type: the type of learning rate cycling to apply
"""
if cyclical_learning_rate_type == 'cosine-annealing':
"""
Apply a cosine-annealing cyclical learning rate as proposed by
Loshchilov et al. in: "SGDR: Stochastic Gradient Descent with Warm Restarts"
"""
epochs_per_cycle = total_epochs/n_models
learning_rate = initial_lr * (np.cos(np.pi * (epoch_idx % epochs_per_cycle) / epochs_per_cycle) + 1) / 2
return learning_rate
else:
return learning_rate
def set_learning_rate(self, optimizer, learning_rate):
"""
Sets the learning rate for an optimizer
Args:
optimizer: optimizer to apply learning rate to
learning_rate: learning rate to set
"""
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--model_name', type=str, default='ssensemble_none')
parser.add_argument('--ensemble_size', type=int, default=2)
parser.add_argument('--ensembling_method', type=bool, default=True)
parser.add_argument('--initial_lr', type=float, default=0.1)
parser.add_argument('--cyclical_learning_rate_type', type=str, default='cosine-annealing', choices=['cosine-annealing', 'none'])
parser.add_argument('--n_logit_samples', type=int, default=100)
return parser
# Combine results into single dataframe and save to disk
def save_results(self):
"""
Combine results into single dataframe and save to disk as .csv file
"""
results = pd.concat([
pd.DataFrame(self.IDs.numpy(), columns= ['ID']),
pd.DataFrame(self.predicted_labels.numpy(), columns= ['predicted_label']),
pd.DataFrame(self.correct_predictions.numpy(), columns= ['correct_prediction']),
pd.DataFrame(self.epistemic_uncertainty.numpy(), columns= ['epistemic_uncertainty']),
pd.DataFrame(self.aleatoric_uncertainty.numpy(), columns= ['aleatoric_uncertainty']),
pd.DataFrame(self.total_uncertainty.numpy(), columns= ['total_uncertainty']),
], axis=1)
create_results_directory()
results.to_csv('results/{}_{}_results.csv'.format(self.__class__.__name__, datetime.datetime.now().replace(microsecond=0).isoformat()), index=False)
|
StarcoderdataPython
|
1935169
|
<gh_stars>0
import math
num=int(input("Want to find out if a number is prime? Enter a number here: "))
nroot=int(math.sqrt(num))
start=2
stop=nroot+1
if num==2:
print("Your number is prime and special. 2 is the only even prime number.")
if num > 1:
for i in range(2, nroot):
if (num%i)==0:
print(num,"isn't Prime, since" ,i, "times",num//i,"equals",num,".")
break
if (num%i)!=0:
print(num,"is Prime. Lucky You.")
break
else:
print("Honey, keep it out of the negatives, and nobody wants to divide by zero.")
#ok so the problem i am running into with this is that the range is excluding everything but 2 if i enter it as "for i in range(2,nroot)..."
#my other option was to try a specific number such as the square root of a ridiculous number....that didnt work...for the same reasons.
#trying to test the exclusion didnt work. i tried nroot=int(math.sqrt(num)+1) to see if it was an exclusion problem.
#but for some reason... nothing works. the program only works if the the input is divisible by 2....
#i even tried altering the indentations but that just generated an error. the only other thing i can list as responsible for the issue...
#is the i in range coding itself... i tried to redefine start and stop points...that didnt work either...
|
StarcoderdataPython
|
3390226
|
""" Plot Dwell Time
This script will be useful for plotting existing dwell time files.
The calculation of dwell times, similar to diffusion coefficients, can often
take 10-30 min, depending on how many trajectories are analyzed. If a user
would rather configure plots on a pre-existing CSV files, they can do so here.
Inputs:
---
dwell_file_in : str
CSV file containing the data
Outputs:
----
hist_file_out : str
PNG file to export. Default name is dwell_file_in + '_hist.png'
"""
import csv
import matplotlib.pyplot as plt
import sys
def main():
""" Main function for plotting data
"""
# set-up
dwell_file_in = 'sample_outputs/sample_traj_dwell_times.csv'
hist_file_out = dwell_file_in[:-4] + '_hist.png'
# read the file in
try:
file = open(dwell_file_in, 'r') # open file with traj info
except FileNotFoundError:
print("Could not find file " + dwell_file_in)
sys.exit(1)
header = None
dwell_time_plot = []
for line in file:
if header is None:
header = line
continue
curr_line = line.rstrip().split(',')
# only store data that is not empty
if curr_line[1] != '':
dwell_time_plot.append(float(curr_line[1]))
file.close()
# begin plotting
nbins = 100 # number of bins for hist
width = 3
height = 3
fig = plt.figure(figsize=(width, height), dpi=300)
ax = fig.add_subplot(1, 1, 1)
ax.hist(dwell_time_plot, nbins)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_xlabel('Dwell Time (s)')
ax.set_ylabel('Events')
plt.savefig(hist_file_out, bbox_inches='tight')
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1953768
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from .vq_ema import VectorQuantizerEMA
from .ulosd_layers_modified import FeatureMapsToKeyPoints, KeyPointsToFeatureMaps
class Residual(nn.Module):
def __init__(self, in_channels, num_hiddens, num_residual_hiddens):
super(Residual, self).__init__()
self._block = nn.Sequential(
nn.ReLU(True),
nn.Conv2d(in_channels=in_channels,
out_channels=num_residual_hiddens,
kernel_size=3, stride=1, padding=1, bias=False),
nn.ReLU(True),
nn.Conv2d(in_channels=num_residual_hiddens,
out_channels=num_hiddens,
kernel_size=1, stride=1, bias=False)
)
def forward(self, x):
return x + self._block(x)
class ResidualStack(nn.Module):
def __init__(self, in_channels, num_hiddens, num_residual_layers, num_residual_hiddens):
super(ResidualStack, self).__init__()
self._num_residual_layers = num_residual_layers
self._layers = nn.ModuleList([Residual(in_channels, num_hiddens, num_residual_hiddens)
for _ in range(self._num_residual_layers)])
def forward(self, x):
for i in range(self._num_residual_layers):
x = self._layers[i](x)
return F.relu(x)
class Encoder(nn.Module):
def __init__(self, in_channels, num_hiddens, num_residual_layers, num_residual_hiddens):
super(Encoder, self).__init__()
self._conv_1 = nn.Conv2d(in_channels=in_channels,
out_channels=num_hiddens // 2,
kernel_size=4,
stride=2, padding=1)
self._conv_2 = nn.Conv2d(in_channels=num_hiddens // 2,
out_channels=num_hiddens,
kernel_size=4,
stride=2, padding=1)
self._conv_3 = nn.Conv2d(in_channels=num_hiddens,
out_channels=num_hiddens,
kernel_size=3,
stride=1, padding=1)
self._residual_stack = ResidualStack(in_channels=num_hiddens,
num_hiddens=num_hiddens,
num_residual_layers=num_residual_layers,
num_residual_hiddens=num_residual_hiddens)
def forward(self, inputs):
x = self._conv_1(inputs)
x = F.relu(x)
x = self._conv_2(x)
x = F.relu(x)
x = self._conv_3(x)
return self._residual_stack(x)
class Decoder(nn.Module):
def __init__(self, in_channels, num_hiddens, num_residual_layers, num_residual_hiddens):
super(Decoder, self).__init__()
self._conv_1 = nn.Conv2d(in_channels=in_channels,
out_channels=num_hiddens,
kernel_size=3,
stride=1, padding=1)
self._residual_stack = ResidualStack(in_channels=num_hiddens,
num_hiddens=num_hiddens,
num_residual_layers=num_residual_layers,
num_residual_hiddens=num_residual_hiddens)
self._conv_trans_1 = nn.ConvTranspose2d(in_channels=num_hiddens,
out_channels=num_hiddens // 2,
kernel_size=4,
stride=2, padding=1)
self._conv_trans_2 = nn.ConvTranspose2d(in_channels=num_hiddens // 2,
out_channels=3,
kernel_size=4,
stride=2, padding=1)
def forward(self, inputs):
x = self._conv_1(inputs)
x = self._residual_stack(x)
x = self._conv_trans_1(x)
x = F.relu(x)
return self._conv_trans_2(x)
class KeyPointsToGaussianMaps(nn.Module):
def __init__(self,
batch_size: int = 1,
time_steps: int = 1,
n_kpts: int = 1,
heatmap_width: int = 32,
device: str = 'cpu'):
super(KeyPointsToGaussianMaps, self).__init__()
self.device = device
self.heatmap_width = heatmap_width
x_range = torch.linspace(0, heatmap_width, heatmap_width)
y_range = torch.linspace(0, heatmap_width, heatmap_width)
self.x_range, self.y_range = torch.meshgrid(x_range, y_range)
self.x_range = self.x_range.view((1, 1, heatmap_width, heatmap_width))
self.x_range = self.x_range.expand(batch_size * time_steps, n_kpts, -1, -1)
self.y_range = self.y_range.view((1, 1, heatmap_width, heatmap_width))
self.y_range = self.y_range.expand(batch_size * time_steps, n_kpts, -1, -1)
self.x_range = self.x_range.to(device)
self.y_range = self.y_range.to(device)
self.blank_map = torch.zeros((batch_size * time_steps, n_kpts, heatmap_width, heatmap_width)).to(device)
def get_grid(self):
return self.x_range, self.y_range
def gaussian_2d_pdf(self,
x: torch.Tensor, y: torch.Tensor,
mean_x: torch.Tensor, mean_y: torch.Tensor,
sd_x: torch.Tensor, sd_y: torch.Tensor):
# Expand mean to (N*T, K, H', W')
mean_x = mean_x.view(*mean_x.shape, 1, 1)
mean_x = mean_x.expand(-1, -1, self.heatmap_width, self.heatmap_width)
mean_y = mean_y.view(*mean_y.shape, 1, 1)
mean_y = mean_y.expand(-1, -1, self.heatmap_width, self.heatmap_width)
# Expand sd to (N*T, K, H', W')
sd_x_exp = (2 * torch.pow(sd_x, 2)).view(*x.shape[:2], 1, 1)
sd_x_exp = sd_x_exp.expand(-1, -1, self.heatmap_width, self.heatmap_width)
sd_y_exp = (2 * torch.pow(sd_x, 2)).view(*y.shape[:2], 1, 1)
sd_y_exp = sd_y_exp.expand(-1, -1, self.heatmap_width, self.heatmap_width)
denominator = 1 / (2 * math.pi * sd_x * sd_y)
denominator_exp = denominator.view(*denominator.shape, 1, 1)
denominator_exp = denominator_exp.expand(-1, -1, self.heatmap_width, self.heatmap_width)
x_diff = torch.pow((x - mean_x), 2)
y_diff = torch.pow((y - mean_y), 2)
numerator = torch.exp(-(x_diff / sd_x_exp +
y_diff / sd_y_exp))
return denominator_exp * numerator
def forward(self, kpts: torch.Tensor) -> torch.Tensor:
""" Converts a (N, T, K, 3) tensor of key-point coordinates
into an (N, T, C, H', W') tensor of gaussian feature maps
with key-point position as mean.
"""
feature_map = self.blank_map + self.gaussian_2d_pdf(
x=self.x_range, y=self.y_range,
mean_x=kpts[..., 0], mean_y=kpts[..., 1],
sd_x=kpts[..., 3], sd_y=kpts[..., 3])
return feature_map
class VQ_VAE(nn.Module):
def __init__(self):
super(VQ_VAE, self).__init__()
self._encoder = Encoder(in_channels=3,
num_hiddens=128,
num_residual_layers=2,
num_residual_hiddens=32)
self._pre_vq_conv = nn.Conv2d(in_channels=128,
out_channels=64,
kernel_size=1,
stride=1)
self._vq_vae = VectorQuantizerEMA(num_embeddings=512,
embedding_dim=64,
commitment_cost=0.25,
decay=0.99)
self._decoder = Decoder(in_channels=64,
num_hiddens=128,
num_residual_layers=2,
num_residual_hiddens=32)
def forward(self, x):
z = self._encoder(x)
z = self._pre_vq_conv(z)
loss, quantized, perplexity, _ = self._vq_vae(z)
rec = self._decoder(quantized)
return loss, rec, perplexity
class VQ_VAE_KPT(nn.Module):
def __init__(self,
batch_size: int = 1,
time_steps: int = 1,
num_embeddings: int = 1,
heatmap_width: int = 32,
encoder_in_channels: int = 3,
num_hiddens: int = 128,
embedding_dim: int = 5,
num_residual_layers: int = 2,
num_residual_hiddens: int = 32,
device: str = 'cpu'
):
super(VQ_VAE_KPT, self).__init__()
# Shape specifications
self.N, self.T = batch_size, time_steps
self.C, self.H, self.W, self.Cp, self.Hp, self.Wp, self.K, self.D = \
None, None, None, None, None, None, None, None
self._encoder = Encoder(in_channels=encoder_in_channels,
num_hiddens=num_hiddens,
num_residual_layers=num_residual_layers,
num_residual_hiddens=num_residual_hiddens).to(device)
self._appearance_encoder = Encoder(in_channels=encoder_in_channels,
num_hiddens=embedding_dim,
num_residual_layers=num_residual_layers,
num_residual_hiddens=num_residual_hiddens).to(device)
self._pre_vq_conv = nn.Conv2d(in_channels=num_hiddens,
out_channels=embedding_dim,
kernel_size=1,
stride=1).to(device)
self._vq_vae = VectorQuantizerEMA(num_embeddings=num_embeddings, # num_embeddings
embedding_dim=embedding_dim,
commitment_cost=0.25,
decay=0.99).to(device)
self._fmap2kpt = FeatureMapsToKeyPoints(device=device)
self._kpt2gmap = KeyPointsToFeatureMaps(heatmap_width=heatmap_width, device=device)
self._decoder = Decoder(in_channels=embedding_dim * 3,
# in_channels=n_kpts, # embedding dim
num_hiddens=num_hiddens,
num_residual_layers=num_residual_layers,
num_residual_hiddens=num_residual_hiddens).to(device)
self._gmap_decoder = Decoder(in_channels=embedding_dim,
# in_channels=n_kpts, # embedding dim
num_hiddens=num_hiddens,
num_residual_layers=num_residual_layers,
num_residual_hiddens=num_residual_hiddens).to(device)
self.heatmap_width = heatmap_width
def transport(self,
source_gaussian_maps: torch.Tensor,
target_gaussian_maps: torch.Tensor,
source_feature_maps: torch.Tensor,
target_feature_maps: torch.Tensor):
""" Transports features by suppressing features from the source image
and adding features from the target image around its key-points.
:param source_gaussian_maps:
:param target_gaussian_maps:
:param source_feature_maps:
:param target_feature_maps:
:return:
"""
_out = source_feature_maps
for s, t in zip(torch.unbind(source_gaussian_maps, 1), torch.unbind(target_gaussian_maps, 1)):
_out = (1 - s.unsqueeze(1)) * (1 - t.unsqueeze(1)) * _out + t.unsqueeze(1) * target_feature_maps
return _out
def encode(self, image_sequence: torch.Tensor, verbose: bool = False) \
-> (torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor):
self.N, self.T, self.C, self.H, self.W = image_sequence.shape
if verbose:
print('Input: ', image_sequence.shape)
fmaps = self._encoder(image_sequence.view((self.N * self.T, self.C, self.H, self.W)))
if verbose:
print('Encoded: ', fmaps.shape)
fmaps = self._pre_vq_conv(fmaps)
_, self.Cp, self.Hp, self.Wp = fmaps.shape
if verbose:
print('Pre VQ: ', fmaps.shape)
vq_loss, quantized, perplexity, _ = self._vq_vae(fmaps)
if verbose:
print('Quantized: ', quantized.shape)
# kpts = self._fmap2kpt(torch.flatten(z, start_dim=2))
sp = F.softplus(quantized)
kpts = self._fmap2kpt(sp)
_, self.K, self.D = kpts.shape
# Unstack time-steps into separate dimension
kpts = kpts.view((self.N, self.T, self.K, self.D))
fmaps = fmaps.view((self.N, self.T, self.Cp, self.Hp, self.Wp))
if verbose:
print('KeyPoints: ', kpts.shape)
return quantized, fmaps, kpts, vq_loss, perplexity
def decode_quantized_stream(self, quantized_maps_series: torch.Tensor, verbose: bool = False) -> torch.Tensor:
rec = self._decoder(quantized_maps_series)\
# Unstack time-steps into separate dimension
rec = rec.view((self.N, self.T, self.C, self.H, self.W))
if verbose:
print('Reconstruction: ', rec.shape)
return rec
def decode_kpt_stream(self, key_point_series: torch.Tensor, verbose: bool = False) \
-> (torch.Tensor, torch.Tensor):
gmaps = self._kpt2gmap(key_point_series.view((self.N * self.T, self.K, self.D)))
if verbose:
print('Gaussian maps: ', gmaps.shape)
gmap_rec = self._gmap_decoder(gmaps)
# Unstack time-steps into separate dimension
gmaps = gmaps.view((self.N, self.T, self.Cp, self.Hp, self.Wp))
gmap_rec = gmap_rec.view((self.N, self.T, self.C, self.H, self.W))
if verbose:
print('Gaussian map reconstruction: ', gmap_rec.shape)
return gmap_rec, gmaps
def forward(self, image_sequence: torch.Tensor, verbose: bool = False) -> torch.Tensor:
# Encode image series
quantized_series, feature_map_series, key_point_series, _, _ = self.encode(image_sequence)
# Decode quantized encodings
reconstructed_images = self.decode(quantized_series)
return reconstructed_images
|
StarcoderdataPython
|
11317293
|
""" Entry point to the sdem cli. """
import typer
from . import state # global settings
from . import template
from .cli import run, dvc, clean, vis, sync, setup, rollback, install, info
import warnings
from time import sleep
commands_no_start_up_check = ["setup", "install"]
app = typer.Typer()
# Construct cli from subfiles
app.command()(run.run)
app.command()(clean.clean)
app.command()(sync.sync)
info_app = typer.Typer()
info_app.add_typer(info.model_app, name='models')
info_app.add_typer(info.results_app, name='results')
app.add_typer(info_app, name='info')
#app.command()(setup.setup)
#app.command()(rollback.rollback)
#app.command()(install.install)
dvc_app = typer.Typer()
app.add_typer(dvc.app, name="dvc")
vis_app = typer.Typer()
app.add_typer(vis.app, name="vis")
@app.callback()
def global_state(ctx: typer.Context, verbose: bool = False, dry: bool = False):
"""
This function will be run before every cli function
It sets up the current state and sets global settings.
"""
config = state.get_state(verbose, dry)
config.console.print('Running in verbose mode')
config.console.print('Running in dry mode')
config.load_experiment_config()
# Ensure that sdem is running in the correct folder etc
# This is not required if setup is being called and so we simply check that the command is not setup
if not (ctx.invoked_subcommand in commands_no_start_up_check):
pass_flag = config.check()
if not pass_flag:
exit()
# store the config in the typer/click context that will be passed to all commands
ctx.obj = config
def main():
app()
|
StarcoderdataPython
|
3364136
|
def get_max(lst):
max_v = lst[0]
for item in lst:
if item > max_v:
max_v = item
return max_v
def get_min(lst):
min_v = lst[0]
for item in lst:
if item < min_v:
min_v = item
return min_v
def cal_max_diff(lst):
max_v = get_max(lst)
min_v = get_min(lst)
max_diff = max_v - min_v
return max_diff
arr1 = [13, 15, 12, 19, 5, 7, 10]
print(cal_max_diff(arr1))
|
StarcoderdataPython
|
8098585
|
"""Test the basic DAP functions."""
import numpy as np
from six import MAXSIZE
from pydap.model import (DatasetType, BaseType,
StructureType)
from pydap.exceptions import ConstraintExpressionError
from pydap.lib import (quote, encode, fix_slice, combine_slices, hyperslab,
walk, fix_shorthand, get_var)
import unittest
class TestQuote(unittest.TestCase):
"""Test quoting.
According to the DAP 2 specification a variable name MUST contain only
upper or lower case letters, numbers, or characters from the set
_ ! ~ * ' - "
All other characters must be escaped. This includes the period, which is
normally not quoted by ``urllib.quote``.
"""
def test_quoting(self):
"""Test a simple quoting."""
self.assertEqual(quote("White space"), "White%20space")
def test_quoting_period(self):
"""Test if period is also quoted."""
self.assertEqual(quote("Period."), "Period%2E")
class TestEncode(unittest.TestCase):
"""Test encoding.
According to the DAP 2 specification, numbers must be encoded using the C
notation "%.6g". Other objects are encoded as escaped strings.
"""
def test_integer(self):
"""Test integer encoding."""
self.assertEqual(encode(1), "1")
def test_float(self):
"""Test floating encoding."""
self.assertEqual(encode(np.pi), "3.14159")
def test_string(self):
"""Test string encoding."""
self.assertEqual(encode("test"), '"test"')
def test_string_with_quotation(self):
"""Test encoding a string with a quotation mark."""
self.assertEqual(encode('this is a "test"'), '"this is a \"test\""')
def test_unicode(self):
"""Unicode objects are encoded just like strings."""
self.assertEqual(encode(u"test"), '"test"')
def test_obj(self):
"""Other objects are encoded according to their ``repr``."""
self.assertEqual(encode({}), '"{}"')
def test_numpy_string(self):
self.assertEqual(encode(np.array('1', dtype='<U1')), '"1"')
class TestFixSlice(unittest.TestCase):
"""Test the ``fix_slice`` function."""
def test_not_tuple(self):
"""Non tuples should be converted and handled correctly."""
x = np.arange(10)
slice1 = 0
slice2 = fix_slice(slice1, x.shape)
# ``fix_slice`` will convert to a tuple
self.assertEqual(slice2, (0,))
# assert that the slice is equivalent to the original
np.testing.assert_array_equal(x[slice1], x[slice2])
def test_ellipsis(self):
"""Expand Ellipsis to occupy the missing dimensions."""
x = np.arange(6).reshape(2, 3, 1)
slice1 = Ellipsis, 0
slice2 = fix_slice(slice1, x.shape)
# an Ellipsis is expanded to slice(None)
self.assertEqual(
slice2,
((slice(0, 2, 1), slice(0, 3, 1), 0)))
np.testing.assert_array_equal(x[slice1], x[slice2])
def test_negative_int(self):
"""Negative values are converted to positive."""
x = np.arange(10)
slice1 = -5
slice2 = fix_slice(slice1, x.shape)
self.assertEqual(slice2, (5,))
np.testing.assert_array_equal(x[slice1], x[slice2])
def test_negative_start(self):
"""Test for slices with a negative start."""
x = np.arange(10)
slice1 = slice(-8, 8)
slice2 = fix_slice(slice1, x.shape)
self.assertEqual(slice2, (slice(2, 8, 1),))
np.testing.assert_array_equal(x[slice1], x[slice2])
def test_negative_stop(self):
"""Test for slices with a negative stop."""
x = np.arange(10)
slice1 = slice(2, -2)
slice2 = fix_slice(slice1, x.shape)
self.assertEqual(slice2, (slice(2, 8, 1),))
np.testing.assert_array_equal(x[slice1], x[slice2])
class TestCombineSlices(unittest.TestCase):
"""Test the ``combine_slices`` function."""
def test_not_tuple(self):
"""The function fails when one of the slices is not a tuple."""
slice1 = 0
slice2 = (0,)
with self.assertRaises(TypeError):
combine_slices(slice1, slice2)
with self.assertRaises(TypeError):
combine_slices(slice2, slice1)
def test_integer(self):
"""Test slices that are just integers."""
slice1 = (0,)
slice2 = (1,)
combined = combine_slices(slice1, slice2)
self.assertEqual(combined, (slice(1, 1, 1),))
def test_stops_none(self):
"""Test when both of the slices have ``None`` for stop."""
x = np.arange(10)
slice1 = (slice(0, None),)
slice2 = (slice(5, None),)
combined = combine_slices(slice1, slice2)
self.assertEqual(combined, (slice(5, None, 1),))
np.testing.assert_array_equal(x[combined], x[slice1][slice2])
def test_first_stop_none(self):
"""Test when the first slice has ``None`` for stop."""
x = np.arange(10)
slice1 = (slice(5, None),)
slice2 = (slice(0, 8),)
combined = combine_slices(slice1, slice2)
self.assertEqual(combined, (slice(5, 13, 1),))
np.testing.assert_array_equal(x[combined], x[slice1][slice2])
def test_second_stop_none(self):
"""Test when the second slice has ``None`` for stop."""
x = np.arange(10)
slice1 = (slice(0, 8),)
slice2 = (slice(5, None),)
combined = combine_slices(slice1, slice2)
self.assertEqual(combined, (slice(5, 8, 1),))
np.testing.assert_array_equal(x[combined], x[slice1][slice2])
def test_all_values(self):
"""Test when start and stop are all integers."""
x = np.arange(20)
slice1 = (slice(0, 8),)
slice2 = (slice(5, 6),)
combined = combine_slices(slice1, slice2)
self.assertEqual(combined, (slice(5, 6, 1),))
np.testing.assert_array_equal(x[combined], x[slice1][slice2])
class TestHyperslab(unittest.TestCase):
"""Test hyperslab generation from Python slices."""
def test_no_tuple(self):
"""Test that slices that are not tuples work."""
slice_ = slice(0)
self.assertEqual(hyperslab(slice_), "[0:1:%d]" % (MAXSIZE-1))
def test_remove(self):
"""Test that excess slices are removed."""
slice_ = (slice(0), slice(None))
self.assertEqual(hyperslab(slice_), "[0:1:%d]" % (MAXSIZE-1))
def test_ndimensional(self):
"""Test n-dimensions slices."""
slice_ = (slice(1, 10, 1), slice(2, 10, 2))
self.assertEqual(hyperslab(slice_), "[1:1:9][2:2:9]")
class TestWalk(unittest.TestCase):
"""Test the ``walk`` function to iterate over a dataset."""
def setUp(self):
"""Create a basic dataset."""
self.dataset = DatasetType("a")
self.dataset["b"] = BaseType("b")
self.dataset["c"] = StructureType("c")
def test_walk(self):
"""Test that all variables are yielded."""
self.assertEqual(
list(walk(self.dataset)),
[self.dataset, self.dataset.b, self.dataset.c])
def test_walk_type(self):
"""Test the filtering of variables yielded."""
self.assertEqual(list(walk(self.dataset, BaseType)), [self.dataset.b])
class TestFixShorthand(unittest.TestCase):
"""Test the ``fix_shorthand`` function."""
def test_fix_projection(self):
"""Test a dataset that can use the shorthand notation."""
dataset = DatasetType("a")
dataset["b"] = StructureType("b")
dataset["b"]["c"] = BaseType("c")
projection = [[("c", ())]]
self.assertEqual(
fix_shorthand(projection, dataset),
[[('b', ()), ('c', ())]])
def test_conflict(self):
"""Test a dataset with conflicting short names."""
dataset = DatasetType("a")
dataset["b"] = StructureType("b")
dataset["b"]["c"] = BaseType("c")
dataset["d"] = StructureType("d")
dataset["d"]["c"] = BaseType("c")
projection = [[("c", ())]]
with self.assertRaises(ConstraintExpressionError):
fix_shorthand(projection, dataset)
class TestGetVar(unittest.TestCase):
"""Test the ``get_var`` function."""
def test_get_var(self):
"""Test that the id is returned properly."""
dataset = DatasetType("a")
dataset["b"] = StructureType("b")
dataset["b"]["c"] = BaseType("c")
self.assertEqual(get_var(dataset, 'b.c'), dataset['b']['c'])
|
StarcoderdataPython
|
1988884
|
<reponame>ahammadshawki8/Proggraming-Terms
# WET = Write Everything Twice
# DRY = Don't Repeat Yourself.
# it is a principle of software developmment, aimed at reducing repeatition of information of all kinds.
# wet is totally different from dry. we have to always make our code dey.
def homePage():
print("<div class='header'>")
print("<a href='#'>Home</a>")
print("<a href='#'>About</a>")
print("<a href='#'>Contact</a>")
print("</div>")
print("<p> Welcome to our Home Page </p>")
print("<div class='footer'>")
print("<a href='#'>Home</a>")
print("<a href='#'>About</a>")
print("<a href='#'>Contact</a>")
print("</div>")
def aboutPage():
print("<div class='header'>")
print("<a href='#'>Home</a>")
print("<a href='#'>About</a>")
print("<a href='#'>Contact</a>")
print("</div>")
print("<p> Welcome to our About Page </p>")
print("<div class='footer'>")
print("<a href='#'>Home</a>")
print("<a href='#'>About</a>")
print("<a href='#'>Contact</a>")
print("</div>")
def contactPage():
print("<div class='header'>")
print("<a href='#'>Home</a>")
print("<a href='#'>About</a>")
print("<a href='#'>Contact</a>")
print("</div>")
print("<p> Welcome to our Contact Page </p>")
print("<div class='footer'>")
print("<a href='#'>Home</a>")
print("<a href='#'>About</a>")
print("<a href='#'>Contact</a>")
print("</div>")
# here all the header and footer information we have repeated again and again in every page. So it is not dry.
# it is very difficult to maintain this website.
# if we want to add a new link to the header, we have to do that manually in every page.
# so it would be lot nicer if we do that in one place.
# How can we make the code DRY?
# we can make header and footer function instead of repeating that again and again.
def nav_menu():
print("<a href='#'>Home</a>")
print("<a href='#'>About</a>")
print("<a href='#'>Contact</a>")
def header():
print("<div class='header'>")
nav_menu()
print("</div>")
def footer():
print("<div class='footer'>")
nav_menu()
print("</div>")
def HomePage():
header()
print("<p> Welcome to our Home Page </p>")
footer()
def AboutPage():
header()
print("<p> Welcome to our About Page </p>")
footer()
def ContactPage():
header()
print("<p> Welcome to our Contact Page </p>")
footer()
HomePage()
# now we can maintain and update our page easily. It is easy to read too.
# And we can update the code in one place.
# we can use this concept in unit test too.
import helpers_calc as calc
import unittest
class CaclTectCase(unittest.TestCase):
"""Test helpers_calc.py"""
def setUp(self):
self.x=10
self.y=5
def tearDown(self):
pass
def test_add(self):
# here everytime we have enter the x and y values. but we can put those in setUp function to make our code dry.
# now we dont have to write down those values again and again. But we have to write self every where when we used those values.
self.assertTrue(calc.add(self.x,self.y),self.x+self.y)
def test_sub(self):
self.assertTrue(calc.sub(self.x,self.y),self.x-self.y)
def test_multiply(self):
self.assertTrue(calc.multiply(self.x,self.y),self.x*self.y)
def test_devide(self):
self.assertTrue(calc.devide(self.x,self.y),self.x/self.y)
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
3206978
|
<reponame>RaimundoLima/Zivot
from .base import Base
from sqlalchemy import DateTime,Time,ForeignKey,Column, Integer, Numeric, Binary, String,VARCHAR,Float
from sqlalchemy.orm import relationship
class Notificacoes(Base):
titulo = Column(VARCHAR(50), nullable=False)
descricao= Column(VARCHAR(200),nullable=False)
#usuario_notificacoes=relationship('usuario_notificacoes',backref='notificacao',lazy=True)
#medico_notificacoes=relationship('medico_notificacoes',backref='notificacao',lazy=True)
|
StarcoderdataPython
|
3582750
|
from typing import Optional
from pydantic import BaseModel, Field
__all__ = ["SetTrackingId"]
class SetTrackingId(BaseModel):
tracking_id: str = Field(None, title="New Tracking ID")
comment: Optional[str] = Field(None, title="Optional comment")
class Config:
schema_extra = {
"example": {
"tracking_id": "newtrackingid",
"comment": "Optional comment explaining or documenting the change",
}
}
|
StarcoderdataPython
|
51957
|
from floodsystem.station import MonitoringStation
from floodsystem.geo import rivers_by_station_number
def test_rivers_by_station_number():
"""Test for Task1E functions"""
#create 4 test stations
station_id = "Test station_id"
measure_id = "Test measure_id"
label = "Test station"
coord = (0.0, 0.0)
typical_range = (0.0, 1.0)
town = "Test Town"
station1 = MonitoringStation(station_id, measure_id, label, coord, typical_range, "River A", town)
station2 = MonitoringStation(station_id, measure_id, label, coord, typical_range, "River A", town)
station3 = MonitoringStation(station_id, measure_id, label, coord, typical_range, "River B", town)
station4 = MonitoringStation(station_id, measure_id, label, coord, typical_range, "River C", town)
x = rivers_by_station_number([station1, station2, station3, station4], 1)
y = rivers_by_station_number([station1, station2, station3, station4], 2)
#test for N = 1, return most number of stations: A with 2
assert x == [("River A", 2)]
#test for N = 2, return most number of stations: A, B and C since they have the same number of stations
assert y == [("River A", 2), ("River B", 1), ("River C", 1)]
|
StarcoderdataPython
|
3465224
|
from flask import jsonify, make_response, request
from flask_restful import Resource
from app.api.v2.request import Request
from app.api.v2.models.user import User
from werkzeug.security import check_password_hash
from flask_jwt_extended import create_access_token, jwt_required
import datetime
class AuthController(Resource):
def post(self):
data = request.get_json()
request_schema = {'email': 'required|email',
'password': 'required|string|min:6|max:12'}
validator = Request(data, request_schema)
if validator.validate() == None:
email = data['email']
password = data['password']
''' verify password '''
if AuthController.__verify_password(email,password):
user=User.get_by_email(email)
exp = datetime.timedelta(minutes=45)
token=AuthController.__generate_token(user['id'],exp)
user.pop("password")
return make_response(jsonify({"message": "Login successful",
"access_token": token,
"exp":str(exp),
"user":user}), 200)
else:
return make_response(jsonify({"message": "Invalid credentials"}), 401)
else:
return make_response(jsonify(validator.validate()), 422)
@staticmethod
def __generate_token(user_id,exp):
return create_access_token(user_id, exp)
@staticmethod
def __verify_password(email,password):
if User.exists({'email':email}):
user=User.get_by_email(email)
return check_password_hash(user['password'], password)
return False
|
StarcoderdataPython
|
1868105
|
# Generated by Django 3.2.10 on 2022-01-25 05:17
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapi', '0044_auto_20220118_1205'),
]
operations = [
migrations.AlterModelOptions(
name='regdevdata',
options={'verbose_name': 'Устройства', 'verbose_name_plural': 'Устройства'},
),
migrations.AlterModelOptions(
name='regdevdatatest',
options={'verbose_name': 'Устройства (test)', 'verbose_name_plural': 'Устройства (test)'},
),
migrations.AlterModelOptions(
name='regtsedata',
options={'verbose_name': 'Мерчанты', 'verbose_name_plural': 'Мерчанты'},
),
migrations.AlterModelOptions(
name='regtsedatatest',
options={'verbose_name': 'Мерчанты (test)', 'verbose_name_plural': 'Мерчанты (test)'},
),
migrations.AlterModelOptions(
name='requestdata',
options={'verbose_name': 'Запросы', 'verbose_name_plural': 'Запросы'},
),
migrations.AlterModelOptions(
name='requestdatatest',
options={'verbose_name': 'Запросы (test)', 'verbose_name_plural': 'Запросы (test)'},
),
migrations.AlterField(
model_name='regdevdata',
name='dt',
field=models.DateTimeField(default=datetime.datetime(2022, 1, 25, 10, 17, 47, 685523), verbose_name='dt'),
),
migrations.AlterField(
model_name='regdevdatatest',
name='dt',
field=models.DateTimeField(default=datetime.datetime(2022, 1, 25, 10, 17, 47, 687385), verbose_name='dt'),
),
migrations.AlterField(
model_name='regtsedata',
name='dt',
field=models.DateTimeField(default=datetime.datetime(2022, 1, 25, 10, 17, 47, 684873), verbose_name='dt'),
),
migrations.AlterField(
model_name='regtsedatatest',
name='dt',
field=models.DateTimeField(default=datetime.datetime(2022, 1, 25, 10, 17, 47, 686767), verbose_name='dt'),
),
]
|
StarcoderdataPython
|
5146031
|
from random import randint
num1 = randint(0, 10)
num2 = randint(0, 10)
num3 = randint(0, 10)
num4 = randint(0, 10)
num5 = randint(0, 10)
lista = (num1, num2, num3, num4, num5)
print(f'Os valores sorteados foram: {lista}')
print(f'O menor valor sorteado foi {sorted(lista)[0]}')
print(f'O maior valor sorteado foi {sorted(lista)[-1]}')
|
StarcoderdataPython
|
6415697
|
import connexion
import six
from mcenter_server_api.models.pipeline_pattern import PipelinePattern # noqa: E501
from mcenter_server_api import util
def onboarding_pipeline_patterns_get(): # noqa: E501
"""Get list of all pipeline patterns
# noqa: E501
:rtype: List[PipelinePattern]
"""
return 'do some magic!'
def onboarding_pipeline_patterns_pipeline_pattern_id_delete(pipeline_pattern_id): # noqa: E501
"""Delete an existing pipeline pattern
# noqa: E501
:param pipeline_pattern_id: Pipeline pattern identifier
:type pipeline_pattern_id: str
:rtype: None
"""
return 'do some magic!'
def onboarding_pipeline_patterns_pipeline_pattern_id_get(pipeline_pattern_id): # noqa: E501
"""Get specific pipeline pattern
# noqa: E501
:param pipeline_pattern_id: Pipeline pattern identifier
:type pipeline_pattern_id: str
:rtype: PipelinePattern
"""
return 'do some magic!'
def onboarding_pipeline_patterns_pipeline_pattern_id_put(pipeline_pattern_id, pipeline_pattern): # noqa: E501
"""Update an existing pipeline pattern
# noqa: E501
:param pipeline_pattern_id: Pipeline pattern identifier
:type pipeline_pattern_id: str
:param pipeline_pattern: Pipeline pattern detail configuration
:type pipeline_pattern: dict | bytes
:rtype: PipelinePattern
"""
if connexion.request.is_json:
pipeline_pattern = PipelinePattern.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def onboarding_pipeline_patterns_post(pipeline_pattern): # noqa: E501
"""Create a new pipeline pattern
# noqa: E501
:param pipeline_pattern: Pipeline detail description
:type pipeline_pattern: dict | bytes
:rtype: PipelinePattern
"""
if connexion.request.is_json:
pipeline_pattern = PipelinePattern.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
|
StarcoderdataPython
|
1897768
|
from __future__ import absolute_import, print_function
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
# Consumer key and secret
consumer_key="YOUR_CONSUMER_KEY"
CONSUMER_secret="YOUR_CONSUMER_SECRET"
# Access token
access_token="YOUR_ACCESS_TOKEN"
access_token_secret="ACCESS_TOKEN_SECRET"
f = open('tweets.json', 'w')
class StdOutListener(StreamListener):
""" A listener handles tweets that are received from the stream. """
def on_data(self, data):
print(data)
f.write(data + '\n')
return True
def on_error(self, status):
print(status)
f.close()
if __name__ == '__main__':
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
stream.filter(track=['elections', 'presidentielles', 'france'])
|
StarcoderdataPython
|
6568165
|
from ...models import DOCUMENT_CLASSIFICATION, SEQ2SEQ, SEQUENCE_LABELING
from . import catalog, data, dataset, label
def get_data_class(project_type: str):
text_projects = [DOCUMENT_CLASSIFICATION, SEQUENCE_LABELING, SEQ2SEQ]
if project_type in text_projects:
return data.TextData
else:
return data.FileData
def get_dataset_class(format: str):
mapping = {
catalog.TextFile.name: dataset.TextFileDataset,
catalog.TextLine.name: dataset.TextLineDataset,
catalog.CSV.name: dataset.CsvDataset,
catalog.JSONL.name: dataset.JSONLDataset,
catalog.JSON.name: dataset.JSONDataset,
catalog.FastText.name: dataset.FastTextDataset,
catalog.Excel.name: dataset.ExcelDataset,
catalog.CoNLL.name: dataset.CoNLLDataset
}
if format not in mapping:
ValueError(f'Invalid format: {format}')
return mapping[format]
def get_label_class(project_type: str):
mapping = {
DOCUMENT_CLASSIFICATION: label.CategoryLabel,
SEQUENCE_LABELING: label.OffsetLabel,
SEQ2SEQ: label.TextLabel
}
if project_type not in mapping:
ValueError(f'Invalid project type: {project_type}')
return mapping[project_type]
|
StarcoderdataPython
|
1903181
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Advent of Code 2020
Day 25, Part 1
"""
def main():
with open('in.txt') as f:
card_key, door_key = map(int, f.readlines())
subject_number = 7
value = 1
card_loop = 0
while True:
card_loop += 1
value *= subject_number
value %= 20201227
if value == card_key:
print(card_loop)
break
value = 1
door_loop = 0
while True:
door_loop += 1
value *= subject_number
value %= 20201227
if value == door_key:
print(door_loop)
break
encryption_key = 1
subject_number = card_key
for _ in range(door_loop):
encryption_key *= subject_number
encryption_key %= 20201227
print(encryption_key)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
6509847
|
<filename>pantra/components/loader.py
from __future__ import annotations
import os
import re
import typing
import traceback
import cssutils
import sass
from antlr4 import FileStream, CommonTokenStream, IllegalStateException
from antlr4.error.ErrorListener import ErrorListener
from pantra.common import UniNode, ADict
from cssutils.css import CSSMediaRule, CSSStyleRule
from .grammar.PMLLexer import PMLLexer
from .grammar.PMLParser import PMLParser
from .grammar.PMLParserVisitor import PMLParserVisitor
from pantra.defaults import CSS_PATH, COMPONENTS_PATH
if typing.TYPE_CHECKING:
from pantra.session import Session
from typing import *
from types import CodeType
__all__ = ['HTMLTemplate', 'collect_styles', 'collect_template']
VOID_ELEMENTS = 'area|base|br|col|embed|hr|img|input|link|meta|param|source|track|wbr'.split('|')
SPECIAL_ELEMENTS = 'slot|event|scope|react|component|'.split('|')
templates: typing.Dict[str, HTMLTemplate] = {}
class HTMLTemplate(UniNode):
code_base: Dict[str, CodeType] = {}
__slots__ = ('tag_name', 'attributes', 'text', 'macro', 'name', 'filename', 'code')
def __init__(self, tag_name: str, parent: Optional['HTMLTemplate'] = None, attributes: Optional[List[Union[Dict, ADict]]] = None, text: str = None):
super().__init__(parent)
self.tag_name: str = tag_name
self.attributes: Dict[str, Union[str, CodeType]] = attributes and ADict(attributes) or ADict()
self.text: str = text
self.macro: CodeType = None
self.name: Optional[str] = None
self.filename: Optional[str] = None
self.code: Optional[Union[CodeType, str]] = None
def __str__(self):
return self.tag_name
class MyVisitor(PMLParserVisitor):
def __init__(self, filename: str):
name = os.path.splitext(os.path.basename(filename))[0]
root = HTMLTemplate(f'${name}')
root.filename = filename
self.root: typing.Optional[HTMLTemplate] = root
self.current: typing.Optional[HTMLTemplate] = root
self.cur_attr: typing.Optional[str] = None
def visitText(self, ctx: PMLParser.TextContext):
text = ctx.getText().strip().strip('\uFEFF')
if text and self.current != self.root:
HTMLTemplate('@text', parent=self.current, text=text)
def visitRawText(self, ctx: PMLParser.RawTextContext):
text = ctx.getText()
if text.strip().strip('\uFEFF'):
tag_name = self.current.tag_name
if tag_name == '@python':
line_no = ctx.start.line
text = '#\n' * (line_no - 1) + text
self.current.text = text
def visitRawTag(self, ctx: PMLParser.RawTagContext):
tag_name = '@' + ctx.getText().strip()[1:]
self.current = HTMLTemplate(tag_name, parent=self.current)
self.current.filename = self.root.filename
# raw nodes goes first
self.current.parent.children.insert(0, self.current.parent.children.pop())
def visitRawCloseTag(self, ctx: PMLParser.RawCloseTagContext):
self.current = self.current.parent
def visitTagBegin(self, ctx: PMLParser.TagBeginContext):
tag_name = ctx.children[1].getText()
if tag_name in SPECIAL_ELEMENTS:
tag_name = '@' + tag_name
self.current = HTMLTemplate(tag_name=tag_name, parent=self.current)
# if not self.root: self.root = self.current
self.visitChildren(ctx)
if ctx.children[-1].symbol.type == PMLLexer.SLASH_CLOSE or self.current.tag_name.lower() in VOID_ELEMENTS:
self.current = self.current.parent
def visitAttrName(self, ctx: PMLParser.AttrNameContext):
self.cur_attr = ctx.getText()
if self.cur_attr != 'class':
self.current.attributes[self.cur_attr] = None
def visitAttrValue(self, ctx: PMLParser.AttrValueContext):
text = ctx.getText().strip('"\'')
if '{' in text:
if text.startswith('{'):
if not self.cur_attr.startswith('set:'):
value = compile(text.strip('{}'), f'<attribute:{self.cur_attr}>', 'eval')
else:
text = text.strip('{}')
text = f"({text} or '')"
value = compile(text, f'<attribute:{self.cur_attr}>', 'eval')
else:
value = compile(f'f"{text}"', f'<attribute:{self.cur_attr}>', 'eval')
else:
value = text
self.current.attributes[self.cur_attr] = value
def visitRawName(self, ctx:PMLParser.RawNameContext):
self.cur_attr = ctx.getText()
self.current.attributes[self.cur_attr] = None
def visitRawValue(self, ctx:PMLParser.RawValueContext):
self.current.attributes[self.cur_attr] = ctx.getText()
def visitTagEnd(self, ctx: PMLParser.TagEndContext):
tag_name = ctx.children[1].getText()
if tag_name in SPECIAL_ELEMENTS:
tag_name = '@' + tag_name
match = False
while self.current:
if tag_name == self.current.tag_name:
match = True
break
self.current = self.current.parent
if not match:
raise IllegalStateException(f"close tag don't match {tag_name}")
self.current = self.current.parent
def visitMacroCommand(self, ctx: PMLParser.MacroCommandContext):
command = ctx.getText()
macro_chunks = re.search(r"^(\w+)\s+(.*)$", command)
if not macro_chunks:
tag_name = command.strip()
macro = ''
else:
tag_name = macro_chunks.group(1).strip()
macro = macro_chunks.group(2).strip()
# gen 'if' subtree
if tag_name == 'if':
parent = HTMLTemplate('#if', self.current)
self.current = HTMLTemplate('#choice', parent=parent)
self.current.macro = macro or "True"
elif tag_name == 'for':
parent = HTMLTemplate('#for', self.current)
parent.macro = macro
self.current = HTMLTemplate('#loop', parent=parent)
elif tag_name == 'elif':
self.current = HTMLTemplate('#choice', parent=self.current.parent)
self.current.macro = macro or "True"
elif tag_name == 'else':
self.current = HTMLTemplate('#else', parent=self.current.parent)
elif tag_name == 'set':
self.current = HTMLTemplate('#set', parent=self.current)
self.current.macro = macro
def visitMacroEnd(self, ctx: PMLParser.MacroEndContext):
macro_tag = '#'+ctx.children[1].getText().strip()
match = False
while self.current:
if macro_tag == self.current.tag_name:
match = True
break
self.current = self.current.parent
if not match:
raise IllegalStateException(f"macro close tag don't match {macro_tag}")
self.current = self.current.parent
def visitInlineMacro(self, ctx: PMLParser.InlineMacroContext):
macro = HTMLTemplate('@macro', parent=self.current)
text = ctx.children[1].getText()
code = compile(text, '<macro>', 'eval')
macro.macro = code
def visitErrorNode(self, node):
raise IllegalStateException(f'wrong node {node.getText()}')
class ErrorVisitor(ErrorListener):
def __init__(self, filename, error_callback):
super().__init__()
self.filename = filename
self.error_callback = error_callback
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
self.error_callback(f"{self.filename}: line {line}:{column} {msg}")
def load(filename: str, error_callback: typing.Callable[[str], None]) -> typing.Optional[HTMLTemplate]:
in_stream = FileStream(filename, encoding='utf-8')
lexer = PMLLexer(in_stream)
stream = CommonTokenStream(lexer)
parser = PMLParser(stream)
parser.removeErrorListeners()
parser.addErrorListener(ErrorVisitor(filename, error_callback))
tree = parser.process()
visitor = MyVisitor(filename)
try:
visitor.visit(tree)
except IllegalStateException as e:
error_callback(f'{filename}> {e}')
return None
except SyntaxError as e:
error_callback(f'{filename}> {e}')
return None
return visitor.root
def _search_component(path, name):
for root, dirs, files in os.walk(path):
for file in files: # type: str
if file.endswith('html'):
if os.path.basename(file) == f'{name}.html':
return os.path.join(root, file)
return None
def collect_template(session: Session, name) -> typing.Optional[HTMLTemplate]:
global templates
key = '/'.join([session.app_path, name])
if key in templates:
return templates[key]
path = _search_component(session.app_path, name)
if not path:
path = _search_component(COMPONENTS_PATH, name)
if not path:
# session.error(f'component {name} not found')
return None
template = load(path, session.error)
if template:
template.name = name
templates[key] = template
return template
class StyleVisitor(PMLParserVisitor):
parser = cssutils.CSSParser(validate=False, raiseExceptions=True)
def __init__(self, class_name: str):
self.class_name = class_name
self.styles: typing.List[str] = []
self.in_style = False
self.global_mode = False
def visitRawText(self, ctx: PMLParser.RawTextContext):
if not self.in_style:
return
text = ctx.getText()
text = '\n' * (ctx.start.line-1) + text
text = sass.compile(string=text, output_style='compact', include_paths=[CSS_PATH])
if self.global_mode:
self.styles.append(text)
else:
# color any selector and sub-selector with component-based class
base_class = f'.{self.class_name}'
# collect and cut all global marks to make css parser happy
global_marks = []
glo = ':global('
def parse_globals(text: str):
res = []
for line, s in enumerate(text.splitlines()):
while glo in s:
pos = s.index(glo)
left = pos + len(glo)
right = left
cnt = 1
while cnt > 0 and right < len(s):
if s[right] == ')':
cnt -= 1
elif s[right] == '(':
cnt += 1
elif s[right] == '{':
break
right += 1
if cnt > 0:
raise ValueError(':global pseudo-class should be closed')
global_marks.append((line + 1, left + 1))
s = s[:pos] + ' ' * len(glo) + s[left:right - 1] + ' ' + s[right:]
res.append(s)
return '\n'.join(res)
text = parse_globals(text)
# walk parse tree and inject base class
def go(l):
for node in l:
if type(node) == CSSMediaRule:
go(node.cssRules)
elif type(node) == CSSStyleRule:
for sel in node.selectorList:
lst = sel.seq
marked = False
i = 0
while i < len(lst):
token = lst[i]
def mark(shift: int, after: int):
nonlocal i, marked
if (token.line, token.col - shift) not in global_marks:
lst.insert(i + after, base_class, 'class')
i += 1
marked = True
if not marked:
if token.type in ('type-selector', 'universal'): # a, *
mark(0, 1)
elif token.type in ('class', 'id', 'pseudo-class'): # .Table, #id, :not
mark(1, 0)
elif token.type == 'pseudo-element': # ::selection
mark(2, 0)
elif token.type == 'attribute-start': # [...]
mark(0, 0)
elif token.type in ('descendant', 'child', 'adjacent-sibling', 'following-sibling'): # ' ', >, +, ~
marked = False
i += 1
sheet = self.parser.parseString(text)
go(sheet)
# first naive attempt, saved for history
# chunks = re.split(r'(?<=})', text)
# res = '\n'.join(f'.{self.class_name} {chunk.strip()}' for chunk in chunks if chunk.strip())
# recover css text with injections
self.styles.append(str(sheet.cssText, 'utf8'))
def visitRawName(self, ctx: PMLParser.RawNameContext):
name = ctx.getText()
if name == 'global':
self.global_mode = True
def visitRawTag(self, ctx: PMLParser.RawTagContext):
if ctx.getText().strip()[1:] == 'style':
self.in_style = True
self.global_mode = False
def visitRawCloseTag(self, ctx: PMLParser.RawCloseTagContext):
self.in_style = False
def visitErrorNode(self, node):
raise IllegalStateException(f'wrong node {node.getText()}')
def load_styles(name: str, filename: str):
in_stream = FileStream(filename, encoding='utf-8')
lexer = PMLLexer(in_stream)
stream = CommonTokenStream(lexer)
parser = PMLParser(stream)
tree = parser.process()
visitor = StyleVisitor(name)
visitor.visit(tree)
return '\n'.join(visitor.styles)
def collect_styles(app_path, error_callback: typing.Callable[[str], None]) -> str:
styles = []
for root, dirs, files in os.walk(app_path):
for file in files: # type: str
if file.endswith('html'):
name, ext = os.path.splitext(file)
path = os.path.join(root, file)
try:
res = load_styles(name, path)
except Exception as e:
error_callback(f'{path}> Style collector> {e}')
else:
if res:
styles.append(res)
return '\n'.join(styles)
|
StarcoderdataPython
|
4894712
|
#
# MIT License
#
# (C) Copyright 2019-2022 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
import json
import fixtures
import kubernetes.client
import mock
import requests
import responses
import testtools
from keycloak_setup import keycloak_localize
class TestKeycloakLocalize(testtools.TestCase):
def test_run(self):
cf_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize,
'_configure_ldap_user_federation')).mock
clu_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize,
'_create_local_users')).mock
clg_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize,
'_create_local_groups')).mock
cas_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_create_assignments')).mock
fetch_users_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_fetch_users')).mock
fetch_groups_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_fetch_groups')).mock
kl = keycloak_localize.KeycloakLocalize()
kl.run()
cf_mock.assert_called_once_with()
clu_mock.assert_called_once_with()
clg_mock.assert_called_once_with()
cas_mock.assert_called_once_with()
fetch_users_mock.assert_called_once_with()
fetch_groups_mock.assert_called_once_with()
def test_s3_client_property(self):
bc_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.boto3, 'client')).mock
kl = keycloak_localize.KeycloakLocalize(
user_export_storage_url=mock.sentinel.url,
user_export_storage_access_key=mock.sentinel.ak,
user_export_storage_secret_key=mock.sentinel.sk,
)
s3_client = kl._s3_client
self.assertIs(s3_client, kl._s3_client)
bc_mock.assert_called_once_with(
's3',
endpoint_url=mock.sentinel.url,
aws_access_key_id=mock.sentinel.ak,
aws_secret_access_key=mock.sentinel.sk,
)
def test_core_v1_property(self):
kc_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.kubernetes.client, 'CoreV1Api')).mock
kl = keycloak_localize.KeycloakLocalize()
core_v1 = kl._core_v1
self.assertIs(core_v1, kl._core_v1)
kc_mock.assert_called_once_with()
def test_create_assignments_no_assignments(self):
kl = keycloak_localize.KeycloakLocalize(local_role_assignments={})
kl._create_assignments()
def test_create_assignments_assignments(self):
# _create_assignments() calls _create_assignment for each assignment.
ca_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_create_assignment')).mock
assignments = {mock.sentinel.a1, mock.sentinel.a2}
kl = keycloak_localize.KeycloakLocalize(
local_role_assignments=assignments)
kl._create_assignments()
ca_mock.assert_any_call(mock.sentinel.a1)
ca_mock.assert_any_call(mock.sentinel.a2)
self.assertEqual(2, ca_mock.call_count)
def test_configure_ldap_user_federation_no_ldap(self):
# When LDAP isn't configured (no connection URL), _configure_ldap_ does
# nothing
fc_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize,
'_fetch_component_by_name')).mock
kl = keycloak_localize.KeycloakLocalize()
kl._configure_ldap_user_federation()
fc_mock.assert_not_called()
def test_configure_ldap_user_federation_already_exists(self):
# When the query indicates the federation was already created then
# nothing to do.
fc_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize,
'_fetch_component_by_name')).mock
fc_mock.return_value = {'id': 'something'}
kl = keycloak_localize.KeycloakLocalize(
ldap_connection_url=str(mock.sentinel.ldap_url)
)
kl._configure_ldap_user_federation()
fc_mock.assert_called_once_with(kl.ldap_federation_name)
def test_configure_ldap_user_federation_needs_creating(self):
# When the query indicates the federation doesn't exist then
# the federation is created.
fc_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize,
'_fetch_component_by_name')).mock
fc_mock.return_value = None
cl_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize,
'_create_ldap_user_federation')).mock
ruam_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize,
'_remove_ldap_user_attribute_mappers')).mock
cuam_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize,
'_create_ldap_user_attribute_mappers')).mock
clgm_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize,
'_create_ldap_group_mapper')).mock
clrm_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize,
'_create_ldap_role_mapper')).mock
sync_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize,
'_trigger_full_user_sync')).mock
kl = keycloak_localize.KeycloakLocalize(
ldap_connection_url=str(mock.sentinel.ldap_url)
)
kl._configure_ldap_user_federation()
fc_mock.assert_called_once_with(kl.ldap_federation_name)
cl_mock.assert_called_once_with()
ruam_mock.assert_called_once_with()
cuam_mock.assert_called_once_with()
clgm_mock.assert_called_once_with()
clrm_mock.assert_called_once_with()
sync_mock.assert_called_once_with()
def test_configure_ldap_user_federation_cleanup_on_error(self):
# When configuration hits a problem and an exception is raised there's
# an attempt to clean up and the original Exception is re-raised.
fc_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize,
'_fetch_component_by_name')).mock
fc_mock.return_value = None
cl_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize,
'_create_ldap_user_federation')).mock
ruam_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize,
'_remove_ldap_user_attribute_mappers')).mock
ruam_mock.side_effect = Exception()
dl_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize,
'_delete_ldap_user_federation')).mock
kl = keycloak_localize.KeycloakLocalize(
ldap_connection_url=str(mock.sentinel.ldap_url)
)
self.assertRaises(Exception, kl._configure_ldap_user_federation)
cl_mock.assert_called_once_with()
dl_mock.assert_called_once_with()
def test_create_local_users(self):
cu_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_create_local_user')).mock
local_users = [mock.sentinel.user1, mock.sentinel.user2]
kl = keycloak_localize.KeycloakLocalize(local_users=local_users)
kl._create_local_users()
cu_mock.assert_any_call(mock.sentinel.user1)
cu_mock.assert_any_call(mock.sentinel.user2)
self.assertEqual(2, cu_mock.call_count)
@responses.activate
def test_create_local_user_success(self):
url = 'http://keycloak.services:8080/keycloak/admin/realms/shasta/users'
responses.add(responses.POST, url, status=204, json={})
kl = keycloak_localize.KeycloakLocalize()
kl._kc_master_admin_client_cache = requests.Session()
user = {
'name': str(mock.sentinel.name),
'firstName': str(mock.sentinel.first_name),
'password': str(mock.sentinel.password),
'loginShell': str(mock.sentinel.login_shell),
'homeDirectory': str(mock.sentinel.home_directory),
'uidNumber': str(mock.sentinel.uid_number),
'gidNumber': str(mock.sentinel.gid_number),
}
kl._create_local_user(user)
exp_req_body = {
'username': str(mock.sentinel.name),
'enabled': True,
'firstName': str(mock.sentinel.first_name),
'credentials': [
{'type': 'password', 'value': str(mock.sentinel.password), },
],
'attributes': {
'loginShell': [str(mock.sentinel.login_shell), ],
'homeDirectory': [str(mock.sentinel.home_directory), ],
'uidNumber': [str(mock.sentinel.uid_number), ],
'gidNumber': [str(mock.sentinel.gid_number), ],
},
}
self.assertEqual(
exp_req_body, json.loads(responses.calls[0].request.body))
@responses.activate
def test_create_local_user_already_exists(self):
url = 'http://keycloak.services:8080/keycloak/admin/realms/shasta/users'
responses.add(responses.POST, url, status=409, json={})
kl = keycloak_localize.KeycloakLocalize()
kl._kc_master_admin_client_cache = requests.Session()
user = {
'name': str(mock.sentinel.name),
'firstName': str(mock.sentinel.first_name),
'password': str(<PASSWORD>),
'loginShell': str(mock.sentinel.login_shell),
'homeDirectory': str(mock.sentinel.home_directory),
'uidNumber': str(mock.sentinel.uid_number),
'gidNumber': str(mock.sentinel.gid_number),
}
kl._create_local_user(user)
# No exception is raised since 409 indicates already exists.
@responses.activate
def test_create_local_user_error(self):
url = 'http://keycloak.services:8080/keycloak/admin/realms/shasta/users'
responses.add(responses.POST, url, status=500, json={})
kl = keycloak_localize.KeycloakLocalize()
kl._kc_master_admin_client_cache = requests.Session()
user = {
'name': str(mock.sentinel.name),
'firstName': str(mock.sentinel.first_name),
'password': str(mock.sentinel.password),
'loginShell': str(mock.sentinel.login_shell),
'homeDirectory': str(mock.sentinel.home_directory),
'uidNumber': str(mock.sentinel.uid_number),
'gidNumber': str(mock.sentinel.gid_number),
}
self.assertRaises(requests.exceptions.HTTPError, kl._create_local_user, user)
def test_create_local_groups(self):
cg_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_create_local_group')).mock
local_groups = [mock.sentinel.group1, mock.sentinel.group2]
kl = keycloak_localize.KeycloakLocalize(local_groups=local_groups)
kl._create_local_groups()
cg_mock.assert_any_call(mock.sentinel.group1)
cg_mock.assert_any_call(mock.sentinel.group2)
self.assertEqual(2, cg_mock.call_count)
@responses.activate
def test_create_local_group_success(self):
am_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_add_member')).mock
url = 'http://keycloak.services:8080/keycloak/admin/realms/shasta/groups'
sample_id = '16178977-7389-49b2-b4d8-c42fe0b4bf8f'
sample_location_url = (
'https://keycloak.services:8080/keycloak/admin/realms/shasta/'
'groups/{}'.format(sample_id))
responses.add(
responses.POST, url, status=204, json={},
headers={'location': sample_location_url})
kl = keycloak_localize.KeycloakLocalize()
kl._kc_master_admin_client_cache = requests.Session()
group = {
'name': str(mock.sentinel.name),
'gid': str(mock.sentinel.gid),
'members': [str(mock.sentinel.user1), str(mock.sentinel.user2), ],
}
kl._create_local_group(group)
exp_req_body = {
'name': str(mock.sentinel.name),
'attributes': {
'cn': [str(mock.sentinel.name), ],
'gidNumber': [str(mock.sentinel.gid), ],
'memberUid': [str(mock.sentinel.user1), str(mock.sentinel.user2), ],
}
}
self.assertEqual(
exp_req_body, json.loads(responses.calls[0].request.body))
am_mock.assert_any_call(sample_id, str(mock.sentinel.user1))
am_mock.assert_any_call(sample_id, str(mock.sentinel.user2))
self.assertEqual(2, am_mock.call_count)
@responses.activate
def test_create_local_group_already_exists(self):
am_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_add_member')).mock
url = 'http://keycloak.services:8080/keycloak/admin/realms/shasta/groups'
# Keycloak responds with 409 Conflict when a group with the name already exists.
responses.add(responses.POST, url, status=409, json={})
kl = keycloak_localize.KeycloakLocalize()
kl._kc_master_admin_client_cache = requests.Session()
group = {
'name': str(mock.sentinel.name),
'gid': str(mock.sentinel.gid),
'members': [str(mock.sentinel.user1), str(mock.sentinel.user2), ],
}
kl._create_local_group(group)
self.assertEqual(0, am_mock.call_count)
@responses.activate
def test_create_local_group_error(self):
am_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_add_member')).mock
url = 'http://keycloak.services:8080/keycloak/admin/realms/shasta/groups'
responses.add(responses.POST, url, status=500, json={})
kl = keycloak_localize.KeycloakLocalize()
kl._kc_master_admin_client_cache = requests.Session()
group = {
'name': str(mock.sentinel.name),
'gid': str(mock.sentinel.gid),
'members': [str(mock.sentinel.user1), str(mock.sentinel.user2), ],
}
self.assertRaises(
requests.exceptions.HTTPError, kl._create_local_group, group)
self.assertEqual(0, am_mock.call_count)
@responses.activate
def test_add_member_user_exists(self):
fun_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_fetch_user_by_name')).mock
fun_mock.return_value = {'id': str(mock.sentinel.user_id), }
url = (
'http://keycloak.services:8080/keycloak/admin/realms/shasta/users/'
'{}/groups/{}'.format(mock.sentinel.user_id, mock.sentinel.group_id))
responses.add(responses.PUT, url)
kl = keycloak_localize.KeycloakLocalize()
kl._kc_master_admin_client_cache = requests.Session()
kl._add_member(
str(mock.sentinel.group_id), str(mock.sentinel.member_name))
self.assertEqual(1, len(responses.calls))
def test_add_member_no_user(self):
# When the user isn't found, _add_member raises UnrecoverableError.
fun_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_fetch_user_by_name')).mock
fun_mock.side_effect = keycloak_localize.NotFound
kl = keycloak_localize.KeycloakLocalize()
self.assertRaises(
keycloak_localize.UnrecoverableError, kl._add_member,
str(mock.sentinel.group_id), str(mock.sentinel.member_name))
def test_create_assignment_group(self):
fc_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize,
'_fetch_client_by_client_id')).mock
fr_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_fetch_client_role')).mock
cga_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize,
'_create_group_assignment')).mock
kl = keycloak_localize.KeycloakLocalize()
grp_assignment = {
'group': str(mock.sentinel.group),
'client': str(mock.sentinel.client),
'role': str(mock.sentinel.role),
}
kl._create_assignment(grp_assignment)
fc_mock.assert_called_once_with(str(mock.sentinel.client))
fr_mock.assert_called_once_with(
fc_mock.return_value, str(mock.sentinel.role))
cga_mock.assert_called_once_with(
str(mock.sentinel.group), fc_mock.return_value,
fr_mock.return_value)
def test_create_assignment_user(self):
fc_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize,
'_fetch_client_by_client_id')).mock
fr_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_fetch_client_role')).mock
cua_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize,
'_create_user_assignment')).mock
kl = keycloak_localize.KeycloakLocalize()
user_assignment = {
'user': str(mock.sentinel.user),
'client': str(mock.sentinel.client),
'role': str(mock.sentinel.role),
}
kl._create_assignment(user_assignment)
fc_mock.assert_called_once_with(str(mock.sentinel.client))
fr_mock.assert_called_once_with(
fc_mock.return_value, str(mock.sentinel.role))
cua_mock.assert_called_once_with(
str(mock.sentinel.user), fc_mock.return_value,
fr_mock.return_value)
@responses.activate
def test_create_group_assignment_group_exists(self):
group_name = str(mock.sentinel.group_name)
group_id = str(mock.sentinel.group_id)
client_id = str(mock.sentinel.client_id)
fg_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_fetch_group')).mock
fg_mock.return_value = {
'name': group_name,
'id': group_id,
}
url = (
'http://keycloak.services:8080/keycloak/admin/realms/shasta/groups/'
'{}/role-mappings/clients/{}'.format(group_id, client_id))
responses.add(responses.POST, url, status=204, json={})
kl = keycloak_localize.KeycloakLocalize()
kl._kc_master_admin_client_cache = requests.Session()
client = {
'id': client_id,
'name': str(mock.sentinel.client_name),
}
client_role = {
'id': str(mock.sentinel.role_id),
'name': str(mock.sentinel.role_name),
}
kl._create_group_assignment(group_name, client, client_role)
fg_mock.assert_called_once_with(group_name)
exp_req_body = [
{
'id': str(mock.sentinel.role_id),
'name': str(mock.sentinel.role_name),
'composite': False,
'clientRole': True,
'containerId': client_id,
},
]
self.assertEqual(
exp_req_body, json.loads(responses.calls[0].request.body))
def test_create_group_assignment_group_not_found(self):
# When the group isn't found, an UnrecoverableError is raised.
group_name = str(mock.sentinel.group_name)
client_id = str(mock.sentinel.client_id)
fg_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_fetch_group')).mock
fg_mock.side_effect = keycloak_localize.NotFound()
kl = keycloak_localize.KeycloakLocalize()
client = {
'id': client_id,
'name': str(mock.sentinel.client_name),
}
client_role = {
'id': str(mock.sentinel.role_id),
'name': str(mock.sentinel.role_name),
}
self.assertRaises(
keycloak_localize.UnrecoverableError, kl._create_group_assignment,
group_name, client, client_role)
@responses.activate
def test_create_user_assignment_user_found(self):
user_name = str(mock.sentinel.user_name)
user_id = str(mock.sentinel.user_id)
client_id = str(mock.sentinel.client_id)
fu_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_fetch_user_by_name')).mock
fu_mock.return_value = {
'name': user_name,
'id': user_id,
}
url = (
'http://keycloak.services:8080/keycloak/admin/realms/shasta/users/'
'{}/role-mappings/clients/{}'.format(user_id, client_id))
responses.add(responses.POST, url, status=204, json={})
kl = keycloak_localize.KeycloakLocalize()
kl._kc_master_admin_client_cache = requests.Session()
client = {
'id': client_id,
'name': str(mock.sentinel.client_name),
}
client_role = {
'id': str(mock.sentinel.role_id),
'name': str(mock.sentinel.role_name),
}
kl._create_user_assignment(user_name, client, client_role)
fu_mock.assert_called_once_with(user_name)
exp_req_body = [
{
'id': str(mock.sentinel.role_id),
'name': str(mock.sentinel.role_name),
'composite': False,
'clientRole': True,
'containerId': client_id,
},
]
self.assertEqual(
exp_req_body, json.loads(responses.calls[0].request.body))
def test_create_user_assignment_user_not_found(self):
# When the user doesn't exist an Unrecoverable error is raised.
user_name = str(mock.sentinel.user_name)
client_id = str(mock.sentinel.client_id)
fu_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_fetch_user_by_name')).mock
fu_mock.side_effect = keycloak_localize.NotFound()
kl = keycloak_localize.KeycloakLocalize()
client = {
'id': client_id,
'name': str(mock.sentinel.client_name),
}
client_role = {
'id': str(mock.sentinel.role_id),
'name': str(mock.sentinel.role_name),
}
self.assertRaises(
keycloak_localize.UnrecoverableError, kl._create_user_assignment,
user_name, client, client_role)
@responses.activate
def test_fetch_client_by_client_id(self):
client_id = str(mock.sentinel.client_id)
url = (
'http://keycloak.services:8080/keycloak/admin/realms/shasta/clients'
'?clientId={}'.format(client_id))
exp_client = {
'id': str(mock.sentinel.id),
}
resp_data = [
exp_client,
]
responses.add(responses.GET, url, json=resp_data)
kl = keycloak_localize.KeycloakLocalize()
kl._kc_master_admin_client_cache = requests.Session()
client = kl._fetch_client_by_client_id(client_id)
self.assertEqual(exp_client, client)
@responses.activate
def test_fetch_client_role(self):
client_id = str(mock.sentinel.client_id)
role_name = str(mock.sentinel.role_name)
url = (
'http://keycloak.services:8080/keycloak/admin/realms/shasta/'
'clients/{}/roles/{}'.format(client_id, role_name))
resp_data = {
'id': str(mock.sentinel.role_id),
}
responses.add(responses.GET, url, json=resp_data)
kl = keycloak_localize.KeycloakLocalize()
kl._kc_master_admin_client_cache = requests.Session()
client = {
'clientId': client_id,
'id': str(mock.sentinel.client_id),
}
client_role = kl._fetch_client_role(client, role_name)
self.assertEqual(resp_data, client_role)
@responses.activate
def test_fetch_group_found(self):
group_name = str(mock.sentinel.group_name)
url = (
'http://keycloak.services:8080/keycloak/admin/realms/shasta/'
'groups?search={}'.format(group_name))
exp_group = {
'name': group_name,
'id': str(mock.sentinel.id),
}
resp_data = [
exp_group,
]
responses.add(responses.GET, url, json=resp_data)
kl = keycloak_localize.KeycloakLocalize()
kl._kc_master_admin_client_cache = requests.Session()
group = kl._fetch_group(group_name)
self.assertEqual(exp_group, group)
@responses.activate
def test_fetch_group_not_found(self):
# When a group with the given name doesn't exist NotFound is raised.
group_name = str(mock.sentinel.group_name)
url = (
'http://keycloak.services:8080/keycloak/admin/realms/shasta/'
'groups?search={}'.format(group_name))
resp_data = []
responses.add(responses.GET, url, json=resp_data)
kl = keycloak_localize.KeycloakLocalize()
kl._kc_master_admin_client_cache = requests.Session()
self.assertRaises(
keycloak_localize.NotFound, kl._fetch_group, group_name)
@responses.activate
def test_fetch_user_by_name_found(self):
user_name = str(mock.sentinel.user_name)
url = (
'http://keycloak.services:8080/keycloak/admin/realms/shasta/'
'users?username={}'.format(user_name))
resp_user = {
'id': str(mock.sentinel.id),
}
resp_data = [
resp_user,
]
responses.add(responses.GET, url, json=resp_data)
kl = keycloak_localize.KeycloakLocalize()
kl._kc_master_admin_client_cache = requests.Session()
user = kl._fetch_user_by_name(user_name)
self.assertEqual(resp_user, user)
@responses.activate
def test_fetch_user_by_name_not_found(self):
# When there's no user with the name NotFound is raised.
user_name = str(mock.sentinel.user_name)
url = (
'http://keycloak.services:8080/keycloak/admin/realms/shasta/'
'users?username={}'.format(user_name))
resp_data = []
responses.add(responses.GET, url, json=resp_data)
kl = keycloak_localize.KeycloakLocalize()
kl._kc_master_admin_client_cache = requests.Session()
self.assertRaises(
keycloak_localize.NotFound, kl._fetch_user_by_name, user_name)
def test_create_ldap_user_federation_no_bind_dn(self):
cc_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_create_component')).mock
kl = keycloak_localize.KeycloakLocalize(
ldap_connection_url=str(mock.sentinel.ldap_url),
ldap_search_base=str(mock.sentinel.search_base),
)
kl._create_ldap_user_federation()
exp_config = {
'priority': ['1'],
'editMode': ['READ_ONLY'],
'syncRegistrations': ['false'],
'vendor': ['other'],
'usernameLDAPAttribute': ['uid'],
'rdnLDAPAttribute': ['uid'],
'uuidLDAPAttribute': ['uid'],
'userObjectClasses': ['posixAccount'],
'connectionUrl': [str(mock.sentinel.ldap_url), ],
'usersDn': [str(mock.sentinel.search_base), ],
'authType': ['none'],
'searchScope': ['2'],
'useTruststoreSpi': ['ldapsOnly'],
'connectionPooling': ['true'],
'pagination': ['true'],
'allowKerberosAuthentication': ['false'],
'batchSizeForSync': ['4000'],
'fullSyncPeriod': ['-1'],
'changedSyncPeriod': ['-1'],
'debug': ['true'],
}
cc_mock.assert_called_once_with(
name='shasta-user-federation-ldap',
provider_id='ldap',
provider_type='org.keycloak.storage.UserStorageProvider',
config=exp_config,
)
self.assertIs(kl._ldap_federation_object_id, cc_mock.return_value)
def test_create_ldap_user_federation_bind_dn(self):
cc_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_create_component')).mock
kl = keycloak_localize.KeycloakLocalize(
ldap_connection_url=str(mock.sentinel.ldap_url),
ldap_search_base=str(mock.sentinel.search_base),
ldap_bind_dn=str(mock.sentinel.bind_dn),
ldap_bind_credentials=str(mock.sentinel.bind_pw),
)
kl._create_ldap_user_federation()
exp_config = {
'priority': ['1'],
'editMode': ['READ_ONLY'],
'syncRegistrations': ['false'],
'vendor': ['other'],
'usernameLDAPAttribute': ['uid'],
'rdnLDAPAttribute': ['uid'],
'uuidLDAPAttribute': ['uid'],
'userObjectClasses': ['posixAccount'],
'connectionUrl': [str(mock.sentinel.ldap_url), ],
'usersDn': [str(mock.sentinel.search_base), ],
'authType': ['none'],
'searchScope': ['2'],
'useTruststoreSpi': ['ldapsOnly'],
'connectionPooling': ['true'],
'pagination': ['true'],
'allowKerberosAuthentication': ['false'],
'batchSizeForSync': ['4000'],
'fullSyncPeriod': ['-1'],
'changedSyncPeriod': ['-1'],
'debug': ['true'],
'bindDn': [str(mock.sentinel.bind_dn), ],
'bindCredential': [str(mock.sentinel.bind_pw), ]
}
cc_mock.assert_called_once_with(
name='shasta-user-federation-ldap',
provider_id='ldap',
provider_type='org.keycloak.storage.UserStorageProvider',
config=exp_config,
)
self.assertIs(kl._ldap_federation_object_id, cc_mock.return_value)
def test_delete_ldap_user_federation(self):
dc_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize,
'_delete_component')).mock
kl = keycloak_localize.KeycloakLocalize()
kl._ldap_federation_object_id = mock.sentinel.lfoi
kl._delete_ldap_user_federation()
dc_mock.assert_called_once_with(mock.sentinel.lfoi)
def test_remove_ldap_user_attribute_mappers(self):
rm_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize,
'_remove_ldap_user_attribute_mapper')).mock
user_attribute_mappers = [
str(mock.sentinel.uam1), str(mock.sentinel.uam2),
]
kl = keycloak_localize.KeycloakLocalize(
ldap_user_attribute_mappers_to_remove=user_attribute_mappers
)
kl._remove_ldap_user_attribute_mappers()
rm_mock.assert_any_call(str(mock.sentinel.uam1))
rm_mock.assert_any_call(str(mock.sentinel.uam2))
def test_remove_ldap_user_attribute_mapper_exists(self):
fc_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize,
'_fetch_component_by_name')).mock
fc_mock.return_value = {
'id': str(mock.sentinel.m_id),
}
dc_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize,
'_delete_component')).mock
kl = keycloak_localize.KeycloakLocalize()
kl._ldap_federation_object_id = str(mock.sentinel.fid)
kl._remove_ldap_user_attribute_mapper(str(mock.sentinel.uam_name))
fc_mock.assert_called_once_with(
str(mock.sentinel.uam_name), parent_id=str(mock.sentinel.fid))
dc_mock.assert_called_once_with(str(mock.sentinel.m_id))
def test_remove_ldap_user_attribute_mapper_no_exist(self):
fc_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize,
'_fetch_component_by_name')).mock
fc_mock.return_value = None
dc_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize,
'_delete_component')).mock
kl = keycloak_localize.KeycloakLocalize()
kl._remove_ldap_user_attribute_mapper(str(mock.sentinel.uam_name))
dc_mock.assert_not_called()
def test_create_ldap_user_attribute_mappers(self):
cla_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize,
'_create_ldap_user_attribute_mapper')).mock
user_attribute_mappers = [
str(mock.sentinel.uam1), str(mock.sentinel.uam2),
]
kl = keycloak_localize.KeycloakLocalize(
ldap_user_attribute_mappers=user_attribute_mappers
)
kl._create_ldap_user_attribute_mappers()
cla_mock.assert_any_call(str(mock.sentinel.uam1))
cla_mock.assert_any_call(str(mock.sentinel.uam2))
def test_create_ldap_user_attribute_mapper(self):
cc_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_create_component')).mock
kl = keycloak_localize.KeycloakLocalize()
kl._ldap_federation_object_id = str(mock.sentinel.lfoi)
kl._create_ldap_user_attribute_mapper(str(mock.sentinel.mapper_name))
cc_mock.assert_called_once_with(
name=str(mock.sentinel.mapper_name),
provider_id='user-attribute-ldap-mapper',
provider_type='org.keycloak.storage.ldap.mappers.LDAPStorageMapper',
parent_id=str(mock.sentinel.lfoi),
config={
'ldap.attribute': [
str(mock.sentinel.mapper_name),
],
'is.mandatory.in.ldap': [
'false',
],
'always.read.value.from.ldap': [
'false'
],
'read.only': [
'true'
],
'user.model.attribute': [
str(mock.sentinel.mapper_name),
],
},
)
@responses.activate
def test_create_component_no_parent_id(self):
url = (
'http://keycloak.services:8080/keycloak/admin/realms/shasta/'
'components')
sample_component_id = '16178977-7389-49b2-b4d8-c42fe0b4bf8f'
sample_location_url = (
'https://keycloak.services:8080/keycloak/admin/realms/shasta/'
'components/{}'.format(sample_component_id))
responses.add(
responses.POST, url, status=204, json={},
headers={'location': sample_location_url})
sample_config = {
str(mock.sentinel.attr1): str(mock.sentinel.val1),
}
kl = keycloak_localize.KeycloakLocalize()
kl._kc_master_admin_client_cache = requests.Session()
component_id = kl._create_component(
name=str(mock.sentinel.name),
provider_id=str(mock.sentinel.provider_id),
provider_type=str(mock.sentinel.provider_type),
config=sample_config
)
self.assertEqual(sample_component_id, component_id)
exp_req_body = {
'providerId': str(mock.sentinel.provider_id),
'providerType': str(mock.sentinel.provider_type),
'name': str(mock.sentinel.name),
'config': sample_config,
}
self.assertEqual(
exp_req_body, json.loads(responses.calls[0].request.body))
@responses.activate
def test_create_component_parent_id(self):
url = (
'http://keycloak.services:8080/keycloak/admin/realms/shasta/'
'components')
sample_component_id = '16178977-7389-49b2-b4d8-c42fe0b4bf8f'
sample_location_url = (
'https://keycloak.services:8080/keycloak/admin/realms/shasta/'
'components/{}'.format(sample_component_id))
responses.add(
responses.POST, url, status=204, json={},
headers={'location': sample_location_url})
sample_config = {
str(mock.sentinel.attr1): str(mock.sentinel.val1),
}
kl = keycloak_localize.KeycloakLocalize()
kl._kc_master_admin_client_cache = requests.Session()
component_id = kl._create_component(
name=str(mock.sentinel.name),
provider_id=str(mock.sentinel.provider_id),
provider_type=str(mock.sentinel.provider_type),
parent_id=str(mock.sentinel.parent_id),
config=sample_config
)
self.assertEqual(sample_component_id, component_id)
exp_req_body = {
'providerId': str(mock.sentinel.provider_id),
'providerType': str(mock.sentinel.provider_type),
'name': str(mock.sentinel.name),
'parentId': str(mock.sentinel.parent_id),
'config': sample_config,
}
self.assertEqual(
exp_req_body, json.loads(responses.calls[0].request.body))
@responses.activate
def test_fetch_component_by_name_found(self):
url = (
'http://keycloak.services:8080/keycloak/admin/realms/shasta/'
'components?name={}'.format(str(mock.sentinel.c_name)))
sample_component = {
'name': str(mock.sentinel.c_name),
'id': str(mock.sentinel.c_id),
# All the other fields.
}
responses.add(responses.GET, url, json=[sample_component])
kl = keycloak_localize.KeycloakLocalize()
kl._kc_master_admin_client_cache = requests.Session()
component = kl._fetch_component_by_name(str(mock.sentinel.c_name))
self.assertEqual(sample_component, component)
@responses.activate
def test_fetch_component_by_name_no_match(self):
url = (
'http://keycloak.services:8080/keycloak/admin/realms/shasta/'
'components?name={}'.format(str(mock.sentinel.c_name)))
responses.add(responses.GET, url, json=[])
kl = keycloak_localize.KeycloakLocalize()
kl._kc_master_admin_client_cache = requests.Session()
component = kl._fetch_component_by_name(str(mock.sentinel.c_name))
self.assertIsNone(component)
@responses.activate
def test_fetch_component_by_name_with_parent_id(self):
url = (
'http://keycloak.services:8080/keycloak/admin/realms/shasta/'
'components?name={}&parent={}'.format(
str(mock.sentinel.c_name), str(mock.sentinel.c_id)))
responses.add(responses.GET, url, json=[])
kl = keycloak_localize.KeycloakLocalize()
kl._kc_master_admin_client_cache = requests.Session()
# This raises if the parameter isn't on the URL
kl._fetch_component_by_name(
str(mock.sentinel.c_name), parent_id=str(mock.sentinel.c_id))
@responses.activate
def test_delete_component(self):
url = (
'http://keycloak.services:8080/keycloak/admin/realms/shasta/'
'components/{}'.format(str(mock.sentinel.c_id)))
responses.add(responses.DELETE, url, status=204)
kl = keycloak_localize.KeycloakLocalize()
kl._kc_master_admin_client_cache = requests.Session()
kl._delete_component(str(mock.sentinel.c_id))
self.assertEqual(1, len(responses.calls))
def test_create_ldap_group_mapper(self):
cc_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_create_component')).mock
kl = keycloak_localize.KeycloakLocalize(
ldap_search_base=str(mock.sentinel.search_base),
ldap_group_name_ldap_attr=str(mock.sentinel.group_name_ldap_attr),
ldap_group_object_class=str(mock.sentinel.group_object_class),
ldap_preserve_group_inheritance=str(mock.sentinel.preserve_group_inheritance),
ldap_group_membership_attribute=str(mock.sentinel.membership_ldap_attr),
ldap_group_membership_attr_type=str(mock.sentinel.membership_attr_type),
ldap_group_membership_ldap_attr=str(mock.sentinel.member_user_ldap_attr),
ldap_group_filter=str(mock.sentinel.groups_ldap_filter),
ldap_edit_mode=str(mock.sentinel.edit_mode),
ldap_user_roles_retrieve_strategy=str(mock.sentinel.user_roles_retrieve_strategy),
ldap_mapped_group_attrs=str(mock.sentinel.mapped_group_attributes),
ldap_groups_drop_during_sync=str(mock.sentinel.drop_groups_during_sync)
)
kl._ldap_federation_object_id = str(mock.sentinel.lfoi)
kl._create_ldap_group_mapper()
cc_mock.assert_called_once_with(
name='group-attribute-ldap-mapper',
provider_id='group-ldap-mapper',
provider_type='org.keycloak.storage.ldap.mappers.LDAPStorageMapper',
parent_id=str(mock.sentinel.lfoi),
config={
'groups.dn': [
str(mock.sentinel.search_base),
],
'group.name.ldap.attribute': [
str(mock.sentinel.group_name_ldap_attr),
],
'group.object.classes': [
str(mock.sentinel.group_object_class),
],
'preserve.group.inheritance': [
str(mock.sentinel.preserve_group_inheritance),
],
'membership.ldap.attribute': [
str(mock.sentinel.membership_ldap_attr),
],
'membership.attribute.type': [
str(mock.sentinel.membership_attr_type),
],
'membership.user.ldap.attribute': [
str(mock.sentinel.member_user_ldap_attr),
],
'groups.ldap.filter': [
str(mock.sentinel.groups_ldap_filter),
],
'mode': [
str(mock.sentinel.edit_mode),
],
'user.roles.retrieve.strategy': [
str(mock.sentinel.user_roles_retrieve_strategy),
],
'mapped.group.attributes': [
str(mock.sentinel.mapped_group_attributes),
],
'drop.non.existing.groups.during.sync': [
str(mock.sentinel.drop_groups_during_sync),
],
},
)
def test_create_ldap_role_mapper_no_dn(self):
# When the role_mapper_dn isn't set then the role mapper isn't added.
cc_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_create_component')).mock
kl = keycloak_localize.KeycloakLocalize(ldap_role_mapper_dn='')
kl._create_ldap_role_mapper()
cc_mock.assert_not_called()
def test_create_ldap_role_mapper_has_dn(self):
cc_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_create_component')).mock
kl = keycloak_localize.KeycloakLocalize(
ldap_role_mapper_dn=str(mock.sentinel.dn),
ldap_role_mapper_name_ldap_attr=str(mock.sentinel.ldap_attr),
ldap_role_mapper_object_class=str(mock.sentinel.object_class),
ldap_role_mapper_membership_ldap_attr=str(mock.sentinel.membership_ldap_attr),
ldap_role_mapper_membership_attr_type=str(mock.sentinel.membership_attr_type),
ldap_role_mapper_membership_user_ldap_attr=str(mock.sentinel.membership_user_ldap_attr),
ldap_role_mapper_roles_ldap_filter=str(mock.sentinel.roles_ldap_filter),
ldap_role_mapper_mode=str(mock.sentinel.mode),
ldap_role_mapper_retrieve_strategy=str(mock.sentinel.retrieve_strategy),
ldap_role_mapper_memberof_attr=str(mock.sentinel.memberof_attr),
ldap_role_mapper_use_realm_roles_mapping=str(mock.sentinel.use_realm_roles_mapping),
ldap_role_mapper_client_id=str(mock.sentinel.client_id)
)
kl._ldap_federation_object_id = str(mock.sentinel.lfoi)
kl._create_ldap_role_mapper()
cc_mock.assert_called_once_with(
name='role-mapper-shasta',
provider_id='role-ldap-mapper',
provider_type='org.keycloak.storage.ldap.mappers.LDAPStorageMapper',
parent_id=str(mock.sentinel.lfoi),
config={
'roles.dn': [str(mock.sentinel.dn)],
'role.name.ldap.attribute': [str(mock.sentinel.ldap_attr)],
'role.object.classes': [str(mock.sentinel.object_class)],
'membership.ldap.attribute': [str(mock.sentinel.membership_ldap_attr)],
'membership.attribute.type': [str(mock.sentinel.membership_attr_type)],
'membership.user.ldap.attribute': [str(mock.sentinel.membership_user_ldap_attr)],
'roles.ldap.filter': [str(mock.sentinel.roles_ldap_filter)],
'mode': [str(mock.sentinel.mode)],
'user.roles.retrieve.strategy': [str(mock.sentinel.retrieve_strategy)],
'memberof.ldap.attribute': [str(mock.sentinel.memberof_attr)],
'use.realm.roles.mapping': [str(mock.sentinel.use_realm_roles_mapping)],
'client.id': [str(mock.sentinel.client_id)],
},
)
@responses.activate
def test_trigger_full_user_sync_enabled(self):
example_federation_id = str(mock.sentinel.federation_id)
url = (
'http://keycloak.services:8080/keycloak/admin/realms/shasta/'
'user-storage/{}/sync?action=triggerFullSync'.format(
example_federation_id))
responses.add(
responses.POST, url, status=200,
json=[{'added': 1465, 'status': '1465 updated users'}])
kl = keycloak_localize.KeycloakLocalize(ldap_do_full_sync=True)
kl._kc_master_admin_client_cache = requests.Session()
kl._ldap_federation_object_id = example_federation_id
kl._trigger_full_user_sync()
self.assertEqual(url, responses.calls[0].request.url)
@responses.activate
def test_trigger_full_user_sync_disabled(self):
kl = keycloak_localize.KeycloakLocalize(ldap_do_full_sync=False)
kl._kc_master_admin_client_cache = requests.Session()
kl._trigger_full_user_sync() # Any request will fail
@responses.activate
def test_trigger_full_user_sync_error(self):
# When Keycloak returns a 500 error during the sync an
# UnrecoverableError is raised.
example_federation_id = str(mock.sentinel.federation_id)
url = (
'http://keycloak.services:8080/keycloak/admin/realms/shasta/'
'user-storage/{}/sync?action=triggerFullSync'.format(
example_federation_id))
responses.add(
responses.POST, url, status=500,
json=[{'error': 'unknown_error'}]) # This is the response when the bindDN is incorrect...
kl = keycloak_localize.KeycloakLocalize(ldap_do_full_sync=True)
kl._kc_master_admin_client_cache = requests.Session()
kl._ldap_federation_object_id = example_federation_id
self.assertRaises(
keycloak_localize.UnrecoverableError, kl._trigger_full_user_sync)
def test_fetch_users_once(self):
fup_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_fetch_users_page')).mock
fup_mock.return_value = [mock.sentinel.user, ]
fmt_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_format_user_passwd_entry')).mock
fmt_mock.return_value = str(mock.sentinel.user_fmt)
s3c_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_s3_client',
new_callable=mock.PropertyMock)).mock
cpc_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_create_passwd_configmaps')).mock
kl = keycloak_localize.KeycloakLocalize(
user_export_storage_bucket=mock.sentinel.bucket,
user_export_storage_passwd_object=mock.sentinel.passwd,
)
kl._fetch_users()
fup_mock.assert_called_once_with(0)
fmt_mock.assert_called_once_with(mock.sentinel.user)
exp_result = '\n'.join([str(mock.sentinel.user_fmt), ])
s3c_mock.return_value.upload_fileobj.assert_called_once_with(
mock.ANY,
mock.sentinel.bucket,
mock.sentinel.passwd,
ExtraArgs={'ACL': 'public-read'}
)
user_data_sent = s3c_mock.return_value.upload_fileobj.call_args[0][0].read()
self.assertEqual(exp_result, user_data_sent.decode('utf-8'))
cpc_mock.assert_called_once_with(exp_result)
def test_fetch_users_multi_pages(self):
fup_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_fetch_users_page')).mock
page1 = [mock.sentinel.user1, mock.sentinel.user2, mock.sentinel.user3]
page2 = [mock.sentinel.user4]
fup_mock.side_effect = [page1, page2, Exception()]
fmt_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_format_user_passwd_entry')).mock
fmt_mock.side_effect = [
str(mock.sentinel.user1_fmt),
None, # Simulate a user that couldn't be formatted.
str(mock.sentinel.user3_fmt),
str(mock.sentinel.user4_fmt),
Exception()
]
s3c_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_s3_client',
new_callable=mock.PropertyMock)).mock
cpc_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_create_passwd_configmaps')).mock
kl = keycloak_localize.KeycloakLocalize(
user_export_storage_bucket=mock.sentinel.bucket,
user_export_storage_passwd_object=mock.sentinel.passwd,
)
kl.fetch_users_page_size = len(page1)
kl._fetch_users()
fup_mock.assert_has_calls([mock.call(0), mock.call(3)])
fmt_calls = [
mock.call(mock.sentinel.user1), mock.call(mock.sentinel.user2),
mock.call(mock.sentinel.user3), mock.call(mock.sentinel.user4)]
fmt_mock.assert_has_calls(fmt_calls)
exp_result = '\n'.join([
str(mock.sentinel.user1_fmt), str(mock.sentinel.user3_fmt),
str(mock.sentinel.user4_fmt),
])
s3c_mock.return_value.upload_fileobj.assert_called_once_with(
mock.ANY,
mock.sentinel.bucket,
mock.sentinel.passwd,
ExtraArgs={'ACL': 'public-read'}
)
user_data_sent = s3c_mock.return_value.upload_fileobj.call_args[0][0].read()
self.assertEqual(exp_result, user_data_sent.decode('utf-8'))
cpc_mock.assert_called_once_with(exp_result)
@responses.activate
def test_fetch_users_page_some_users(self):
first = 0
max = 50
url = (
f'http://keycloak.services:8080/keycloak/admin/realms/shasta/'
f'users?first={first}&max={max}')
sample_user_1 = {'username': 'user1'}
sample_user_2 = {'username': 'user2'}
resp_data = [sample_user_1, sample_user_2, ]
responses.add(responses.GET, url, status=200, json=resp_data)
kl = keycloak_localize.KeycloakLocalize()
kl._kc_master_admin_client_cache = requests.Session()
res = kl._fetch_users_page(first)
self.assertEqual(res, resp_data)
@responses.activate
def test_fetch_users_page_no_users(self):
first = 0
max = 50
url = (
f'http://keycloak.services:8080/keycloak/admin/realms/shasta/'
f'users?first={first}&max={max}')
resp_data = []
responses.add(responses.GET, url, status=200, json=resp_data)
kl = keycloak_localize.KeycloakLocalize()
kl._kc_master_admin_client_cache = requests.Session()
res = kl._fetch_users_page(first)
self.assertEqual(res, resp_data)
def test_format_user_passwd_entry_user_name_source_username(self):
sample_user = {
'id': '23dfbd85-22c3-4515-8a97-655dcd574d2d',
'createdTimestamp': 1585236398589,
'username': 'test_user_1',
'enabled': True,
'totp': False,
'emailVerified': False,
'firstName': '<NAME>',
'federationLink': 'b4bf6a68-cf16-4f7e-ba04-1b69c2b7eed1',
'attributes': {
'loginShell': [
'/bin/bash'
],
'homeDirectory': [
'/home/users/Test.User.1'
],
'LDAP_ENTRY_DN': [
'uid=test_user_1,ou=People,dc=datacenter,dc=cray,dc=com'
],
'uidNumber': [
'5534'
],
'gidNumber': [
'12790'
],
'modifyTimestamp': [
'20170607185637Z'
],
'createTimestamp': [
'20170515145508Z'
],
'LDAP_ID': [
'test_user_1'
],
},
'disableableCredentialTypes': [],
'requiredActions': [],
'notBefore': 0,
'access': {
'manageGroupMembership': True,
'view': True,
'mapRoles': True,
'impersonate': True,
'manage': True
},
}
kl = keycloak_localize.KeycloakLocalize()
res = kl._format_user_passwd_entry(sample_user)
exp_res = 'test_user_1::5534:12790:First Name:/home/users/Test.User.1:/bin/bash'
self.assertEqual(exp_res, res)
def test_format_user_passwd_entry_user_name_source_homeDirectory(self):
sample_user = {
'id': '23dfbd85-22c3-4515-8a97-655dcd574d2d',
'createdTimestamp': 1585236398589,
'username': 'test_user_1',
'enabled': True,
'totp': False,
'emailVerified': False,
'firstName': '<NAME>',
'federationLink': 'b4bf6a68-cf16-4f7e-ba04-1b69c2b7eed1',
'attributes': {
'loginShell': [
'/bin/bash'
],
'homeDirectory': [
'/home/users/Test.User.1'
],
'LDAP_ENTRY_DN': [
'uid=test_user_1,ou=People,dc=datacenter,dc=cray,dc=com'
],
'uidNumber': [
'5534'
],
'gidNumber': [
'12790'
],
'modifyTimestamp': [
'20170607185637Z'
],
'createTimestamp': [
'20170515145508Z'
],
'LDAP_ID': [
'test_user_1'
],
},
'disableableCredentialTypes': [],
'requiredActions': [],
'notBefore': 0,
'access': {
'manageGroupMembership': True,
'view': True,
'mapRoles': True,
'impersonate': True,
'manage': True
},
}
kl = keycloak_localize.KeycloakLocalize(user_export_name_source='homeDirectory')
res = kl._format_user_passwd_entry(sample_user)
exp_res = 'Test.User.1::5534:12790:First Name:/home/users/Test.User.1:/bin/bash'
self.assertEqual(exp_res, res)
def test_format_user_passwd_entry_no_attributes(self):
sample_user = {
'username': 'test_user_1',
}
kl = keycloak_localize.KeycloakLocalize()
self.assertIsNone(kl._format_user_passwd_entry(sample_user))
def test_format_user_passwd_entry_no_uidNumber(self):
sample_user = {
'username': 'test_user_1',
'attributes': {
'loginShell': ['/bin/bash', ],
'homeDirectory': ['/home/users/Test.User.1', ],
'gidNumber': ['12790', ],
},
}
kl = keycloak_localize.KeycloakLocalize()
self.assertIsNone(kl._format_user_passwd_entry(sample_user))
def test_format_user_passwd_entry_no_gidNumber(self):
sample_user = {
'username': 'test_user_1',
'attributes': {
'loginShell': ['/bin/bash', ],
'homeDirectory': ['/home/users/Test.User.1', ],
'uidNumber': ['12345', ],
},
}
kl = keycloak_localize.KeycloakLocalize()
self.assertIsNone(kl._format_user_passwd_entry(sample_user))
def test_format_user_passwd_entry_no_homeDirectory(self):
sample_user = {
'username': 'test_user_1',
'attributes': {
'loginShell': ['/bin/bash', ],
'uidNumber': ['12345', ],
'gidNumber': ['12790', ],
},
}
kl = keycloak_localize.KeycloakLocalize()
self.assertIsNone(kl._format_user_passwd_entry(sample_user))
def test_format_user_passwd_entry_no_loginShell(self):
sample_user = {
'username': 'test_user_1',
'attributes': {
'homeDirectory': ['/home/users/Test.User.1', ],
'uidNumber': ['12345', ],
'gidNumber': ['12790', ],
},
}
kl = keycloak_localize.KeycloakLocalize()
self.assertIsNone(kl._format_user_passwd_entry(sample_user))
def test_create_passwd_configmaps(self):
ac_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_apply_configmap')).mock
namespaces = [
str(mock.sentinel.namespace1),
str(mock.sentinel.namespace2),
]
kl = keycloak_localize.KeycloakLocalize(
user_export_namespaces=namespaces,
user_export_passwd_configmap_name=str(mock.sentinel.name),
)
kl._create_passwd_configmaps(str(mock.sentinel.passwd))
ac_mock.assert_any_call(
str(mock.sentinel.name), str(mock.sentinel.namespace1),
'keycloak-users', str(mock.sentinel.passwd))
ac_mock.assert_any_call(
str(mock.sentinel.name), str(mock.sentinel.namespace2),
'keycloak-users', str(mock.sentinel.passwd))
self.assertEqual(2, ac_mock.call_count)
def test_fetch_groups_disabled(self):
kl = keycloak_localize.KeycloakLocalize(
user_export_groups=False,
)
kl._kc_master_admin_client_cache = requests.Session()
kl._fetch_groups() # This would raise exception if it tried to fetch.
@responses.activate
def test_fetch_groups_enabled(self):
s3c_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_s3_client',
new_callable=mock.PropertyMock)).mock
cgc_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_create_groups_configmaps')).mock
url = (
'http://keycloak.services:8080/keycloak/admin/realms/shasta/'
'groups?briefRepresentation=false&max=-1')
sample_group_1 = {
'id': '19279262-0f24-416e-9189-3dfb7a952b2e',
'name': str(mock.sentinel.g1_name),
'path': '/{}'.format(str(mock.sentinel.g1_name)),
'attributes': {
'cn': [
str(mock.sentinel.g1_cn),
],
'gidNumber': [
str(mock.sentinel.g1_id),
],
'memberUid': [
str(mock.sentinel.g1_u1),
str(mock.sentinel.g1_u2),
]
},
'realmRoles': [],
'clientRoles': {},
'subGroups': [],
}
sample_group_2 = {
'id': '19279262-0f24-416e-9189-3dfb7a952b2f',
'name': str(mock.sentinel.g2_name),
'path': '/{}'.format(str(mock.sentinel.g2_name)),
'attributes': {
'cn': [
str(mock.sentinel.g2_cn),
],
'gidNumber': [
str(mock.sentinel.g2_id),
],
'memberUid': [
str(mock.sentinel.g2_u1),
str(mock.sentinel.g2_u2),
]
},
'realmRoles': [],
'clientRoles': {},
'subGroups': [],
}
sample_group_3 = {
'id': '19279262-0f24-416e-9189-3dfb7a952b2f',
'name': str(mock.sentinel.g2_name),
'path': '/{}'.format(str(mock.sentinel.g2_name)),
'attributes': {
'cn': [
str(mock.sentinel.g2_cn),
],
'gidNumber': [
str(mock.sentinel.g2_id),
],
# Some entries didn't have memberUid for some reason.
},
'realmRoles': [],
'clientRoles': {},
'subGroups': [],
}
responses.add(
responses.GET, url, status=200,
json=[sample_group_1, sample_group_2, sample_group_3, ])
kl = keycloak_localize.KeycloakLocalize(
user_export_groups=True,
user_export_storage_bucket=mock.sentinel.bucket,
user_export_storage_groups_object=mock.sentinel.passwd,
)
kl._kc_master_admin_client_cache = requests.Session()
kl._fetch_groups()
exp_data = '\n'.join([
'{}::{}:{}'.format(
mock.sentinel.g1_cn, mock.sentinel.g1_id,
','.join([str(mock.sentinel.g1_u1), str(mock.sentinel.g1_u2)])),
'{}::{}:{}'.format(
mock.sentinel.g2_cn, mock.sentinel.g2_id,
','.join([str(mock.sentinel.g2_u1), str(mock.sentinel.g2_u2)])),
'{}::{}:'.format(mock.sentinel.g2_cn, mock.sentinel.g2_id,),
])
s3c_mock.return_value.upload_fileobj.assert_called_once_with(
mock.ANY,
mock.sentinel.bucket,
mock.sentinel.passwd,
ExtraArgs={'ACL': 'public-read'}
)
data_sent = s3c_mock.return_value.upload_fileobj.call_args[0][0].read()
self.assertEqual(exp_data, data_sent.decode('utf-8'))
cgc_mock.assert_called_once_with(exp_data)
def test_create_groups_configmaps(self):
ac_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_apply_configmap')).mock
namespaces = [
str(mock.sentinel.namespace1),
str(mock.sentinel.namespace2),
]
kl = keycloak_localize.KeycloakLocalize(
user_export_namespaces=namespaces,
user_export_groups_configmap_name=str(mock.sentinel.name),
)
kl._create_groups_configmaps(str(mock.sentinel.groups))
ac_mock.assert_any_call(
str(mock.sentinel.name), str(mock.sentinel.namespace1),
'keycloak-groups', str(mock.sentinel.groups))
ac_mock.assert_any_call(
str(mock.sentinel.name), str(mock.sentinel.namespace2),
'keycloak-groups', str(mock.sentinel.groups))
self.assertEqual(2, ac_mock.call_count)
def test_apply_configmap_exists(self):
fcm_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_fetch_configmap')).mock
scm_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_sync_configmap')).mock
kl = keycloak_localize.KeycloakLocalize()
kl._apply_configmap(
mock.sentinel.name, mock.sentinel.namespace, mock.sentinel.key_name,
mock.sentinel.data)
fcm_mock.assert_called_once_with(mock.sentinel.name, mock.sentinel.namespace)
scm_mock.assert_called_once_with(
mock.sentinel.name, mock.sentinel.namespace, mock.sentinel.key_name,
mock.sentinel.data, fcm_mock.return_value)
def test_apply_configmap_not_found(self):
fcm_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_fetch_configmap', return_value=None)).mock
ccm_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_create_configmap')).mock
kl = keycloak_localize.KeycloakLocalize()
kl._apply_configmap(
mock.sentinel.name, mock.sentinel.namespace, mock.sentinel.key_name,
mock.sentinel.data)
fcm_mock.assert_called_once_with(mock.sentinel.name, mock.sentinel.namespace)
ccm_mock.assert_called_once_with(
mock.sentinel.name, mock.sentinel.namespace, mock.sentinel.key_name,
mock.sentinel.data)
def test_fetch_configmap_exists(self):
c_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_core_v1',
new_callable=mock.PropertyMock)).mock
kl = keycloak_localize.KeycloakLocalize()
configmap = kl._fetch_configmap(
str(mock.sentinel.name), str(mock.sentinel.namespace))
c_mock.return_value.read_namespaced_config_map.assert_called_once_with(
str(mock.sentinel.name), str(mock.sentinel.namespace))
self.assertIs(
c_mock.return_value.read_namespaced_config_map.return_value,
configmap)
def test_fetch_configmap_not_found(self):
c_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_core_v1',
new_callable=mock.PropertyMock)).mock
c_mock.return_value.read_namespaced_config_map.side_effect = (
kubernetes.client.rest.ApiException(404))
kl = keycloak_localize.KeycloakLocalize()
configmap = kl._fetch_configmap(
str(mock.sentinel.name), str(mock.sentinel.namespace))
self.assertIsNone(configmap)
def test_fetch_configmap_error(self):
c_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_core_v1',
new_callable=mock.PropertyMock)).mock
c_mock.return_value.read_namespaced_config_map.side_effect = (
kubernetes.client.rest.ApiException(500))
kl = keycloak_localize.KeycloakLocalize()
self.assertRaises(
kubernetes.client.rest.ApiException,
kl._fetch_configmap, str(mock.sentinel.name), str(mock.sentinel.namespace))
def test_create_configmap(self):
c_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_core_v1',
new_callable=mock.PropertyMock)).mock
kl = keycloak_localize.KeycloakLocalize()
kl._create_configmap(
str(mock.sentinel.name), str(mock.sentinel.namespace),
str(mock.sentinel.key_name), str(mock.sentinel.data))
c_mock.return_value.create_namespaced_config_map.assert_called_once_with(
str(mock.sentinel.namespace), mock.ANY
)
cm_param = c_mock.return_value.create_namespaced_config_map.call_args[0][1]
self.assertEqual(str(mock.sentinel.name), cm_param.metadata.name)
self.assertEqual(str(mock.sentinel.namespace), cm_param.metadata.namespace)
self.assertEqual(
str(mock.sentinel.data), cm_param.data[str(mock.sentinel.key_name)])
def test_sync_configmap_no_change(self):
current_configmap = mock.MagicMock()
current_configmap.data = {
str(mock.sentinel.key_name): str(mock.sentinel.data),
}
kl = keycloak_localize.KeycloakLocalize()
kl._sync_configmap(
mock.sentinel.name, mock.sentinel.namespace,
str(mock.sentinel.key_name), str(mock.sentinel.data),
current_configmap)
def test_sync_configmap_changed(self):
c_mock = self.useFixture(fixtures.MockPatchObject(
keycloak_localize.KeycloakLocalize, '_core_v1',
new_callable=mock.PropertyMock)).mock
current_configmap = mock.MagicMock()
current_configmap.data = {
str(mock.sentinel.key_name): str(mock.sentinel.data2),
}
kl = keycloak_localize.KeycloakLocalize()
kl._sync_configmap(
mock.sentinel.name, mock.sentinel.namespace,
str(mock.sentinel.key_name), str(mock.sentinel.data),
current_configmap)
c_mock.return_value.patch_namespaced_config_map.assert_called_once_with(
mock.sentinel.name, mock.sentinel.namespace, mock.ANY)
cm_param = c_mock.return_value.patch_namespaced_config_map.call_args[0][2]
self.assertEqual(
str(mock.sentinel.data), cm_param.data[str(mock.sentinel.key_name)])
|
StarcoderdataPython
|
355295
|
import toolbox_extended as te
import toolbox_02450 as tb
import numpy as np
import pandas as pd
from exam_toolbox import *
import re
import os
class exam:
# ----------------------------------------------- OPG 1-----------------------------------------------
def opg1():
return "E"
# ----------------------------------------------- OPG 2-----------------------------------------------
def opg2():
return "E"
# ----------------------------------------------- OPG 3-----------------------------------------------
def opg3():
return "E"
# ----------------------------------------------- OPG 4-----------------------------------------------
def opg4():
return "E"
# ----------------------------------------------- OPG 5-----------------------------------------------
def opg5():
return "E"
# ----------------------------------------------- OPG 6-----------------------------------------------
def opg6():
return "E"
# ----------------------------------------------- OPG 7-----------------------------------------------
def opg7():
return "E"
# ----------------------------------------------- OPG 8-----------------------------------------------
def opg8():
return "E"
# ----------------------------------------------- OPG 9-----------------------------------------------
def opg9():
return "E"
# ----------------------------------------------- OPG 10-----------------------------------------------
def opg10():
return "E"
# ----------------------------------------------- OPG 11-----------------------------------------------
def opg11():
return "E"
# ----------------------------------------------- OPG 12-----------------------------------------------
def opg12():
return "E"
# ----------------------------------------------- OPG 13-----------------------------------------------
def opg13():
return "E"
# ----------------------------------------------- OPG 14-----------------------------------------------
def opg14():
print()
return "E"
# ----------------------------------------------- OPG 15-----------------------------------------------
def opg15():
return "E"
# ----------------------------------------------- OPG 16-----------------------------------------------
def opg16():
return "E"
# ----------------------------------------------- OPG 17-----------------------------------------------
def opg17():
return "E"
# ----------------------------------------------- OPG 18-----------------------------------------------
def opg18():
return "E"
# ----------------------------------------------- OPG 19-----------------------------------------------
def opg19():
return "E"
# ----------------------------------------------- OPG 20-----------------------------------------------
def opg20():
return "E"
# ----------------------------------------------- OPG 21-----------------------------------------------
def opg21():
return "E"
# ----------------------------------------------- OPG 22-----------------------------------------------
def opg22():
return "E"
# ----------------------------------------------- OPG 23-----------------------------------------------
def opg23():
return "E"
# ----------------------------------------------- OPG 24-----------------------------------------------
def opg24():
return "E"
# ----------------------------------------------- OPG 25-----------------------------------------------
def opg25():
return "E"
# ----------------------------------------------- OPG 26-----------------------------------------------
def opg26():
return "E"
# ----------------------------------------------- OPG 27-----------------------------------------------
def opg27():
return "E"
# -------------------------------- answers dataframe -------------------------------------------------
def answers(show=True, csv=False, excel=True):
ans = pd.DataFrame(
columns=["Student number: s183920"]
) # columns = ["OPG", "svar"])
ans.loc[0] = ""
ans.loc[1] = "Q01: {}".format(exam.opg1())
ans.loc[2] = "Q02: {}".format(exam.opg2())
ans.loc[3] = "Q03: {}".format(exam.opg3())
ans.loc[4] = "Q04: {}".format(exam.opg4())
ans.loc[5] = "Q05: {}".format(exam.opg5())
ans.loc[6] = "Q06: {}".format(exam.opg6())
ans.loc[7] = "Q07: {}".format(exam.opg7())
ans.loc[8] = "Q08: {}".format(exam.opg8())
ans.loc[9] = "Q09: {}".format(exam.opg9())
ans.loc[10] = "Q10: {}".format(exam.opg10())
ans.loc[11] = ""
ans.loc[12] = "Q11: {}".format(exam.opg11())
ans.loc[13] = "Q12: {}".format(exam.opg12())
ans.loc[14] = "Q13: {}".format(exam.opg13())
ans.loc[15] = "Q14: {}".format(exam.opg14())
ans.loc[16] = "Q15: {}".format(exam.opg15())
ans.loc[17] = "Q16: {}".format(exam.opg16())
ans.loc[18] = "Q17: {}".format(exam.opg17())
ans.loc[19] = "Q18: {}".format(exam.opg18())
ans.loc[20] = "Q19: {}".format(exam.opg19())
ans.loc[21] = "Q20: {}".format(exam.opg20())
ans.loc[22] = ""
ans.loc[23] = "Q21: {}".format(exam.opg21())
ans.loc[24] = "Q22: {}".format(exam.opg22())
ans.loc[25] = "Q23: {}".format(exam.opg23())
ans.loc[26] = "Q24: {}".format(exam.opg24())
ans.loc[27] = "Q25: {}".format(exam.opg25())
ans.loc[28] = "Q26: {}".format(exam.opg26())
ans.loc[29] = "Q27: {}".format(exam.opg27())
if excel:
ans.to_excel(re.sub(".py", "_answers.xlsx", __file__), index=False)
if csv:
ans.to_csv(re.sub(".py", "_answers.csv", __file__), index=False)
if show:
print(ans)
return ans
exam.answers()
|
StarcoderdataPython
|
6679866
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numbers
import tensorflow as tf
def tf_slicing(x, axis, slice_range, keep_dim=False):
target_dim = int(x.shape[axis])
ndim = len(x.shape)
begin = list(ndim * [0])
size = x.get_shape().as_list()
if isinstance(slice_range, (list, tuple)):
begin[axis] = slice_range[0]
size[axis] = slice_range[1] - slice_range[0]
elif isinstance(slice_range, numbers.Integral):
begin[axis] = slice_range
size[axis] = 1
else:
raise ValueError
x_slice = tf.slice(x, begin, size)
if size[axis] == 1 and not keep_dim:
x_slice = tf.squeeze(x_slice, axis)
return x_slice
def tf_split(x, num_or_size_splits, axis=0, num=None, keep_dims=False):
x_list = tf.split(x, num_or_size_splits, axis, num)
if not keep_dims:
x_list2 = [tf.squeeze(x_, axis) for x_ in x_list]
return x_list2
return x_list
|
StarcoderdataPython
|
292385
|
from .io import *
from .lever import *
from .address import *
|
StarcoderdataPython
|
120768
|
<filename>binocular/hrfc.py
from binocular.reservoir_hrfc import *
from binocular import pattern_functions
from matplotlib.pyplot import *
# from matplotlib2tikz import save as tikz_save
patterns = []
# for p in [53, 54, 10, 36]:
for p in [54, 36]:
patterns.append(pattern_functions.patterns[p])
reservoir = ReservoirHierarchicalRandomFeatureConceptor()
reservoir.run(patterns)
reservoir.recall()
reservoir.denoise()
### PLOTTING ###
allDriverPL, allRecallPL, NRMSE = utils.plot_interpolate_1d(patterns, reservoir.Y_recalls)
for i in range(len(patterns)):
subplot(len(patterns), 1, (i + 1))
# driver and recall
ylim([-1.1, 1.1])
text(0.4, -0.9, round(NRMSE[i], 4), bbox=dict(facecolor='white', alpha=1))
plot(allDriverPL[i, :], color='gray', linewidth=4.0)
plot(allRecallPL[i, :], color='black')
if (i + 1) * 3 - 1 == 2:
title('driver and recall')
print(np.shape(reservoir.Z))
figure()
subplot(4, 1, 1)
ylim([-0.1, 1.1])
plot(reservoir.all['hypo3'].T, label='hypo3')
legend()
subplot(4, 1, 2)
ylim([-0.1, 1.1])
plot(reservoir.all['hypo2'].T, label='hypo2')
legend()
subplot(4, 1, 3)
ylim([-0.1, 1.1])
plot(reservoir.all['hypo1'].T, label='hypo1')
legend()
subplot(4, 1, 4)
ylim([-0.1, 1.1])
plot(reservoir.all['trusts12'].T, 'b', label='trust12')
plot(reservoir.all['trusts23'].T, 'g', label='trust23')
legend()
title('hypotheses and trusts')
figure()
for i in range(len(patterns)):
subplot(len(patterns), 1, (i + 1))
l_idx = 4000 * (i + 1) - 40
r_idx = 4000 * (i + 1)
# original pattern
plot(reservoir.all['driver'][:, l_idx:r_idx].T, color='gray', linewidth=4.0, label='original pattern')
# recall
plot(reservoir.all['y3'][:, l_idx:r_idx].T, color='black', label='recall')
# pattern plus noise
plot(reservoir.all['driver'][:, l_idx:r_idx].T + reservoir.all['noise'][:, l_idx:r_idx].T, 'r', label='driver + noise')
title('denoising')
legend()
show()
|
StarcoderdataPython
|
1726353
|
<gh_stars>0
from setuptools import setup
setup(name='yeelight',
version='1.0',
description='Yeelight Smart Bult Python Package',
url='#',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['yeelight'],
zip_safe=False)
|
StarcoderdataPython
|
1833637
|
<gh_stars>0
import os, time, random
from behave import given, then, when
from lib.Config import Config
from support.Search_Page import Search
from selenium.webdriver.common.keys import Keys
cf = Config()
sp = Search()
baaqmd_qa_eng = cf.get_config('config/config.ini', 'search', 'baaqmd_qa_eng')
baaqmd_prod_main_eng = cf.get_config('config/config.ini', 'search', 'baaqmd_prod_main_eng')
@given(u"I navigate to baaqmd main page")
def step_impl(context):
#if (os.environ['location'] == "qa"):
# context.browser.get(baaqmd_qa_eng)
#elif (os.environ['location'] == "prod"):
context.browser.get(baaqmd_prod_main_eng)
@when(u'I perform search for "{query}" query')
def step_impl(context, query):
sp.search_field(context).clear()
sp.search_field(context).send_keys(query)
@then(u'I perform search for "{query2}" query2')
def step_impl(context, query2):
sp.search_field2(context).clear()
sp.search_field2(context).send_keys(query2)
@then(u'I click Login button')
def step_impl(context):
sp.login_button_click(context)
@then(u'I click Content Editor button from Launchpad')
def step_impl(context):
sp.content_editor_click(context)
@then(u'I perform search for "{id1}" id1')
def step_impl(context, id1):
sp.ce_search_field(context).clear()
sp.ce_search_field(context).send_keys(id1 + Keys.ENTER)
@then(u'I click Send SMS button in Air District Tools tab')
def step_impl(context):
sp.send_sms_click(context)
@then(u'I click Continue button in the SMS Message popup')
def step_impl(context):
jquery_frame = context.browser.find_element_by_id("jqueryModalDialogsFrame")
context.browser.switch_to_frame(jquery_frame)
sms_frame = sp.sms_frame(context)
context.browser.switch_to_frame(sms_frame)
sp.sms_continue_button_click(context)
|
StarcoderdataPython
|
4968892
|
# This file deletes a list of documents
# from the meta_container collection.
# ObjectId's to be deleted should be in ids.txt
# one id per line.
import pathmagic
from db_pool import *
from bson.objectid import ObjectId
def main():
collection = db[envget('db_metadata_collection')]
f = open("ids.txt", "r")
for idd in f.readlines():
clean_id = idd.replace('\n', '')
print str(clean_id)
collection.remove({"_id": ObjectId(clean_id)})
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
11283931
|
"""
@author: MatteoRaso
"""
from math import pi, sqrt
from random import uniform
from statistics import mean
from typing import Callable
def pi_estimator(iterations: int):
"""
An implementation of the Monte Carlo method used to find pi.
1. Draw a 2x2 square centred at (0,0).
2. Inscribe a circle within the square.
3. For each iteration, place a dot anywhere in the square.
a. Record the number of dots within the circle.
4. After all the dots are placed, divide the dots in the circle by the total.
5. Multiply this value by 4 to get your estimate of pi.
6. Print the estimated and numpy value of pi
"""
# A local function to see if a dot lands in the circle.
def is_in_circle(x: float, y: float) -> bool:
distance_from_centre = sqrt((x ** 2) + (y ** 2))
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
proportion = mean(
int(is_in_circle(uniform(-1.0, 1.0), uniform(-1.0, 1.0)))
for _ in range(iterations)
)
# The ratio of the area for circle to square is pi/4.
pi_estimate = proportion * 4
print(f"The estimated value of pi is {pi_estimate}")
print(f"The numpy value of pi is {pi}")
print(f"The total error is {abs(pi - pi_estimate)}")
def area_under_curve_estimator(
iterations: int,
function_to_integrate: Callable[[float], float],
min_value: float = 0.0,
max_value: float = 1.0,
) -> float:
"""
An implementation of the Monte Carlo method to find area under
a single variable non-negative real-valued continuous function,
say f(x), where x lies within a continuous bounded interval,
say [min_value, max_value], where min_value and max_value are
finite numbers
1. Let x be a uniformly distributed random variable between min_value to
max_value
2. Expected value of f(x) =
(integrate f(x) from min_value to max_value)/(max_value - min_value)
3. Finding expected value of f(x):
a. Repeatedly draw x from uniform distribution
b. Evaluate f(x) at each of the drawn x values
c. Expected value = average of the function evaluations
4. Estimated value of integral = Expected value * (max_value - min_value)
5. Returns estimated value
"""
return mean(
function_to_integrate(uniform(min_value, max_value)) for _ in range(iterations)
) * (max_value - min_value)
def area_under_line_estimator_check(
iterations: int, min_value: float = 0.0, max_value: float = 1.0
) -> None:
"""
Checks estimation error for area_under_curve_estimator function
for f(x) = x where x lies within min_value to max_value
1. Calls "area_under_curve_estimator" function
2. Compares with the expected value
3. Prints estimated, expected and error value
"""
def identity_function(x: float) -> float:
"""
Represents identity function
>>> [function_to_integrate(x) for x in [-2.0, -1.0, 0.0, 1.0, 2.0]]
[-2.0, -1.0, 0.0, 1.0, 2.0]
"""
return x
estimated_value = area_under_curve_estimator(
iterations, identity_function, min_value, max_value
)
expected_value = (max_value * max_value - min_value * min_value) / 2
print("******************")
print(f"Estimating area under y=x where x varies from {min_value} to {max_value}")
print(f"Estimated value is {estimated_value}")
print(f"Expected value is {expected_value}")
print(f"Total error is {abs(estimated_value - expected_value)}")
print("******************")
def pi_estimator_using_area_under_curve(iterations: int) -> None:
"""
Area under curve y = sqrt(4 - x^2) where x lies in 0 to 2 is equal to pi
"""
def function_to_integrate(x: float) -> float:
"""
Represents semi-circle with radius 2
>>> [function_to_integrate(x) for x in [-2.0, 0.0, 2.0]]
[0.0, 2.0, 0.0]
"""
return sqrt(4.0 - x**2)
estimated_value = area_under_curve_estimator(
iterations, function_to_integrate, 0.0, 2.0
)
print("******************")
print("Estimating pi using area_under_curve_estimator")
print(f"Estimated value is {estimated_value}")
print(f"Expected value is {pi}")
print(f"Total error is {abs(estimated_value - pi)}")
print("******************")
if __name__ == "__main__":
import doctest
doctest.testmod()
|
StarcoderdataPython
|
4943684
|
<gh_stars>1-10
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetDetectorRecipeResult',
'AwaitableGetDetectorRecipeResult',
'get_detector_recipe',
]
@pulumi.output_type
class GetDetectorRecipeResult:
"""
A collection of values returned by getDetectorRecipe.
"""
def __init__(__self__, compartment_id=None, defined_tags=None, description=None, detector=None, detector_recipe_id=None, detector_rules=None, display_name=None, effective_detector_rules=None, freeform_tags=None, id=None, owner=None, source_detector_recipe_id=None, state=None, system_tags=None, time_created=None, time_updated=None):
if compartment_id and not isinstance(compartment_id, str):
raise TypeError("Expected argument 'compartment_id' to be a str")
pulumi.set(__self__, "compartment_id", compartment_id)
if defined_tags and not isinstance(defined_tags, dict):
raise TypeError("Expected argument 'defined_tags' to be a dict")
pulumi.set(__self__, "defined_tags", defined_tags)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if detector and not isinstance(detector, str):
raise TypeError("Expected argument 'detector' to be a str")
pulumi.set(__self__, "detector", detector)
if detector_recipe_id and not isinstance(detector_recipe_id, str):
raise TypeError("Expected argument 'detector_recipe_id' to be a str")
pulumi.set(__self__, "detector_recipe_id", detector_recipe_id)
if detector_rules and not isinstance(detector_rules, list):
raise TypeError("Expected argument 'detector_rules' to be a list")
pulumi.set(__self__, "detector_rules", detector_rules)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if effective_detector_rules and not isinstance(effective_detector_rules, list):
raise TypeError("Expected argument 'effective_detector_rules' to be a list")
pulumi.set(__self__, "effective_detector_rules", effective_detector_rules)
if freeform_tags and not isinstance(freeform_tags, dict):
raise TypeError("Expected argument 'freeform_tags' to be a dict")
pulumi.set(__self__, "freeform_tags", freeform_tags)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if owner and not isinstance(owner, str):
raise TypeError("Expected argument 'owner' to be a str")
pulumi.set(__self__, "owner", owner)
if source_detector_recipe_id and not isinstance(source_detector_recipe_id, str):
raise TypeError("Expected argument 'source_detector_recipe_id' to be a str")
pulumi.set(__self__, "source_detector_recipe_id", source_detector_recipe_id)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if system_tags and not isinstance(system_tags, dict):
raise TypeError("Expected argument 'system_tags' to be a dict")
pulumi.set(__self__, "system_tags", system_tags)
if time_created and not isinstance(time_created, str):
raise TypeError("Expected argument 'time_created' to be a str")
pulumi.set(__self__, "time_created", time_created)
if time_updated and not isinstance(time_updated, str):
raise TypeError("Expected argument 'time_updated' to be a str")
pulumi.set(__self__, "time_updated", time_updated)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> str:
"""
compartmentId of detector recipe
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Mapping[str, Any]:
"""
Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace.bar-key": "value"}`
"""
return pulumi.get(self, "defined_tags")
@property
@pulumi.getter
def description(self) -> str:
"""
Description for DetectorRecipeDetectorRule
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def detector(self) -> str:
"""
detector for the rule
"""
return pulumi.get(self, "detector")
@property
@pulumi.getter(name="detectorRecipeId")
def detector_recipe_id(self) -> str:
return pulumi.get(self, "detector_recipe_id")
@property
@pulumi.getter(name="detectorRules")
def detector_rules(self) -> Sequence['outputs.GetDetectorRecipeDetectorRuleResult']:
"""
List of detector rules for the detector type for recipe - user input
"""
return pulumi.get(self, "detector_rules")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
displayName
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="effectiveDetectorRules")
def effective_detector_rules(self) -> Sequence['outputs.GetDetectorRecipeEffectiveDetectorRuleResult']:
"""
List of effective detector rules for the detector type for recipe after applying defaults
"""
return pulumi.get(self, "effective_detector_rules")
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Mapping[str, Any]:
"""
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
"""
return pulumi.get(self, "freeform_tags")
@property
@pulumi.getter
def id(self) -> str:
"""
Ocid for detector recipe
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def owner(self) -> str:
"""
Owner of detector recipe
"""
return pulumi.get(self, "owner")
@property
@pulumi.getter(name="sourceDetectorRecipeId")
def source_detector_recipe_id(self) -> str:
"""
Recipe Ocid of the Source Recipe to be cloned
"""
return pulumi.get(self, "source_detector_recipe_id")
@property
@pulumi.getter
def state(self) -> str:
"""
The current state of the resource.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="systemTags")
def system_tags(self) -> Mapping[str, Any]:
"""
System tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). System tags can be viewed by users, but can only be created by the system. Example: `{"orcl-cloud.free-tier-retained": "true"}`
"""
return pulumi.get(self, "system_tags")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> str:
"""
The date and time the detector recipe was created. Format defined by RFC3339.
"""
return pulumi.get(self, "time_created")
@property
@pulumi.getter(name="timeUpdated")
def time_updated(self) -> str:
"""
The date and time the detector recipe was updated. Format defined by RFC3339.
"""
return pulumi.get(self, "time_updated")
class AwaitableGetDetectorRecipeResult(GetDetectorRecipeResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDetectorRecipeResult(
compartment_id=self.compartment_id,
defined_tags=self.defined_tags,
description=self.description,
detector=self.detector,
detector_recipe_id=self.detector_recipe_id,
detector_rules=self.detector_rules,
display_name=self.display_name,
effective_detector_rules=self.effective_detector_rules,
freeform_tags=self.freeform_tags,
id=self.id,
owner=self.owner,
source_detector_recipe_id=self.source_detector_recipe_id,
state=self.state,
system_tags=self.system_tags,
time_created=self.time_created,
time_updated=self.time_updated)
def get_detector_recipe(detector_recipe_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDetectorRecipeResult:
"""
This data source provides details about a specific Detector Recipe resource in Oracle Cloud Infrastructure Cloud Guard service.
Returns a DetectorRecipe identified by detectorRecipeId
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_detector_recipe = oci.cloudguard.get_detector_recipe(detector_recipe_id=oci_cloud_guard_detector_recipe["test_detector_recipe"]["id"])
```
:param str detector_recipe_id: DetectorRecipe OCID
"""
__args__ = dict()
__args__['detectorRecipeId'] = detector_recipe_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:cloudguard/getDetectorRecipe:getDetectorRecipe', __args__, opts=opts, typ=GetDetectorRecipeResult).value
return AwaitableGetDetectorRecipeResult(
compartment_id=__ret__.compartment_id,
defined_tags=__ret__.defined_tags,
description=__ret__.description,
detector=__ret__.detector,
detector_recipe_id=__ret__.detector_recipe_id,
detector_rules=__ret__.detector_rules,
display_name=__ret__.display_name,
effective_detector_rules=__ret__.effective_detector_rules,
freeform_tags=__ret__.freeform_tags,
id=__ret__.id,
owner=__ret__.owner,
source_detector_recipe_id=__ret__.source_detector_recipe_id,
state=__ret__.state,
system_tags=__ret__.system_tags,
time_created=__ret__.time_created,
time_updated=__ret__.time_updated)
|
StarcoderdataPython
|
4867713
|
#!/usr/bin/env python
import os
import pandas as pd
import numpy as np
from sklearn.externals import joblib
from math import (
log,
exp
)
from matplotlib import pyplot as plt
import util
def assess_threshold_and_decide(
np_matrix_traj_by_time,
curve_owner,
state_no,
output_dir,
data_class,
):
fig = plt.figure(1)
ax = fig.add_subplot(111)
trial_amount = np_matrix_traj_by_time.shape[0]
from matplotlib.pyplot import cm
color=iter(cm.rainbow(np.linspace(0, 1, trial_amount)))
for row_no in range(np_matrix_traj_by_time.shape[0]):
c=next(color)
trial_name = curve_owner[row_no]
gradient = np_matrix_traj_by_time[row_no][0, 1:]-np_matrix_traj_by_time[row_no][0, :-1]
ax.plot(gradient.tolist()[0], color=c, label='trial \"%s\"'%curve_owner[row_no])
title = 'trial class \"%s\": graident of log-likelihood output by skill model %s'%(data_class, state_no, )
ax.set_title(title)
ax.set_xlabel('time step')
ax.set_ylabel('log probability')
ax.legend(loc='best')
fig.tight_layout()
fig.savefig(os.path.join(output_dir, title+".eps"), format="eps")
fig.savefig(os.path.join(output_dir, title+".png"), format="png")
plt.close(1)
def run(model_save_path,
figure_save_path,
threshold_c_value,
trials_group_by_folder_name,
data_class,
):
output_dir = os.path.join(
figure_save_path,
"gradient_of_log_likelihood_plot",
data_class,
)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
trials_group_by_folder_name = util.make_trials_of_each_state_the_same_length(trials_group_by_folder_name)
one_trial_data_group_by_state = trials_group_by_folder_name.itervalues().next()
state_amount = len(one_trial_data_group_by_state)
threshold_constant = 10
threshold_offset = 10
model_group_by_state = {}
for state_no in range(1, state_amount+1):
try:
model_group_by_state[state_no] = joblib.load(model_save_path+"/model_s%s.pkl"%(state_no,))
except IOError:
print 'model of state %s not found'%(state_no,)
continue
for state_no in model_group_by_state:
all_log_curves_of_this_state = []
curve_owner = []
for trial_name in trials_group_by_folder_name:
curve_owner.append(trial_name)
one_log_curve_of_this_state = []
one_log_curve_of_this_state = util.fast_log_curve_calculation(
trials_group_by_folder_name[trial_name][state_no],
model_group_by_state[state_no]
)
all_log_curves_of_this_state.append(one_log_curve_of_this_state)
# use np matrix to facilitate the computation of mean curve and std
np_matrix_traj_by_time = np.matrix(all_log_curves_of_this_state)
assess_threshold_and_decide(
np_matrix_traj_by_time,
curve_owner,
state_no,
output_dir,
data_class,
)
|
StarcoderdataPython
|
1794203
|
<reponame>demasterr/GH_PersonalityInference
import mysql.connector
import pandas as pd
from sklearn import preprocessing
class ReadDB:
def __init__(self, *, config, db_config):
self.config = config
self.db_config = db_config
METHOD_COLUMNS = {
'pi': {
'pi_openness': 'PI_Openness',
'pi_conscientiousness': 'PI_Conscientiousness',
'pi_extraversion': 'PI_Extraversion',
'pi_agreeableness': 'PI_Agreeableness',
'pi_neuroticism': 'PI_Neuroticism'
},
'yarkoni': {
'liwc_openness_yarkoni': 'LIWC_Openness_Yarkoni',
'liwc_conscientiousness_yarkoni': 'LIWC_Conscientiousness_Yarkoni',
'liwc_extraversion_yarkoni': 'LIWC_Extraversion_Yarkoni',
'liwc_agreeableness_yarkoni': 'LIWC_Agreeableness_Yarkoni',
'liwc_neuroticism_yarkoni': 'LIWC_Neuroticism_Yarkoni'
},
'golbeck': {
'liwc_openness_golbeck': 'LIWC_Openness_Golbeck',
'liwc_conscientiousness_golbeck': 'LIWC_Conscientiousness_Golbeck',
'liwc_extraversion_golbeck': 'LIWC_Extraversion_Golbeck',
'liwc_agreeableness_golbeck': 'LIWC_Agreeableness_Golbeck',
'liwc_neuroticism_golbeck': 'LIWC_Neuroticism_Golbeck'
}
}
PI_COLUMNS = ['PI_Openness', 'PI_Conscientiousness', 'PI_Extraversion', 'PI_Agreeableness',
'PI_Neuroticism']
YARKONI_COLUMNS = ['LIWC_Openness_Yarkoni', 'LIWC_Conscientiousness_Yarkoni',
'LIWC_Extraversion_Yarkoni', 'LIWC_Agreeableness_Yarkoni',
'LIWC_Neuroticism_Yarkoni']
GOLBECK_COLUMNS = ['LIWC_Openness_Golbeck', 'LIWC_Conscientiousness_Golbeck',
'LIWC_Extraversion_Golbeck', 'LIWC_Agreeableness_Golbeck',
'LIWC_Neuroticism_Golbeck']
def _create_select(self, where, methods):
select = ''
for method in methods:
for nth in range(0, 5):
select += self._create_select_sub(method, nth) + ',\n'
return select[:-2] + '\nFROM {}\n{}'.format(self.config['table'], where)
def _create_select_sub(self, method, nth):
trait = list(self.METHOD_COLUMNS[method].items())[nth]
return '{}{{}} as {}'.format(*trait)
def read_db(self, where, methods=('pi', 'yarkoni', 'golbeck'), raw=True, decimals=1,
user_id=True, scaling=True):
suffix = '_raw' if raw else ''
where = 'WHERE ' + where if where else ''
select = 'SELECT id as user_id,\n' if user_id else 'SELECT \n'
query = select + self._create_select(where, methods).format(*(suffix,) * 15)
results = self.execute_query(query)
cols = [column for sublist in
[list(self.METHOD_COLUMNS[method].values()) for method in methods] for column in
sublist]
if user_id:
cols.insert(0, 'user_id')
dataframe = pd.DataFrame(results, columns=cols)
dataframe = dataframe.apply(pd.to_numeric)
if user_id:
cols.remove('user_id')
if scaling:
scaler = preprocessing.MinMaxScaler(feature_range=(0, 100))
dataframe[cols] = scaler.fit_transform(dataframe[cols])
return dataframe.round(decimals=decimals), cols
def execute_query(self, query):
cnx = mysql.connector.connect(**self.db_config)
cursor = cnx.cursor()
cursor.execute(query)
results = cursor.fetchall()
cnx.close()
return results
|
StarcoderdataPython
|
9688685
|
<reponame>aletuf93/logproj
# -*- coding: utf-8 -*-
#import datetime as date
#import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import datetime as date
import numpy as np
# import stat packages
from fbprophet import Prophet
#from fbprophet.diagnostics import cross_validation
from fbprophet.plot import plot_plotly
from sklearn.metrics import mean_squared_error
#import graph packages
#import matplotlib.pyplot as plt
import plotly.offline as py
#import industrial packages
import logproj.stat_time_series as ts
from logproj.utilities import creaCartella
# %% PREDICTION FBPROPHET
def predictWithFBPROPHET(D_series, timeVariable, seriesVariable, prediction_results, titolo, samplingInterval='week', predictionsLength=52):
#D_time is a dataframe containing the timeseries and the values
#timeVariable is a string with the name of the column of the dataframe containing timestamps
#seriesVariable is a string with the name of the column of the dataframe containing values
#predictionsLength is an int with the number of periods to predict
#prediction_results is the path where to save the output
#samplingInterval if week it groups the series for week
# titolo is the title to save the output figure
# estraggo la serie temporale
timeSeries=pd.DataFrame(D_series[[timeVariable,seriesVariable]])
timeSeries_analysis=timeSeries.set_index(timeVariable).resample('D').sum()
timeSeries_analysis[timeVariable]=timeSeries_analysis.index.values
if samplingInterval=='month':
timeSeries_analysis=ts.raggruppaPerMese(timeSeries_analysis,timeVariable,seriesVariable,'sum')
elif samplingInterval=='week':
timeSeries_analysis=ts.raggruppaPerSettimana(timeSeries_analysis,timeVariable,seriesVariable,'sum')
elif samplingInterval=='day':
timeSeries_analysis=timeSeries_analysis[seriesVariable]
#prepare input dataframe
timeSeries_analysis=pd.DataFrame([timeSeries_analysis.index.values, timeSeries_analysis]).transpose()
timeSeries_analysis.columns=['ds','y']
m = Prophet()
m.fit(timeSeries_analysis)
#make predictions
future = m.make_future_dataframe(periods=predictionsLength)
#future.tail()
forecast = m.predict(future)
forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail()
#evaluate model goodness
MSE = mean_squared_error(timeSeries_analysis.y, forecast.yhat[0:len(timeSeries_analysis.y)])
# Output figure in matplotlib
forecast_fig = m.plot(forecast)
components_fig = m.plot_components(forecast)
#Output with plotly
#py.init_notebook_mode()
fig = plot_plotly(m, forecast) # This returns a plotly Figure
py.iplot(fig)
py.plot(fig, filename = f"{prediction_results}\\prophet_{titolo}.html", auto_open=False)
return m, forecast_fig, components_fig, MSE
# %% PREDICTIONS ARIMA
def predictWithARIMA(D_series, seriesVariable, samplingInterval='week', date_field='TIMESTAMP_IN',titolo='',signifAlpha=0.05,maxValuesSelected=2):
#this function applies predictions using ARIMA models
#D_series is the reference dataframe
#date_field is the string with the name of the column containing the datetime series
#seriesVariable is the string with the name of the column containing the series
#samplingInterval if week it groups the series for week
#signifAlpha is the significance level (0.1 , 0.05, 0.01) to accept or reject the null hypothesis of Dickey fuller
#maxValuesSelected int defining the number of significant lags to consider in ACF and PACF
#the function returns
# fig_CF with the PACF and ACF figure
#figure_forecast the forecast figure,
#figure_residuals the residual figure,
#resultModel the model resulting parameters
# estraggo la serie temporale
timeSeries=pd.DataFrame(D_series[[date_field,seriesVariable]])
timeSeries_analysis=timeSeries.set_index(date_field).resample('D').sum()
timeSeries_analysis[date_field]=timeSeries_analysis.index.values
if samplingInterval=='month':
timeSeries_analysis=ts.raggruppaPerMese(timeSeries_analysis,date_field,seriesVariable,'sum')
elif samplingInterval=='week':
timeSeries_analysis=ts.raggruppaPerSettimana(timeSeries_analysis,date_field,seriesVariable,'sum')
elif samplingInterval=='day':
timeSeries_analysis=timeSeries_analysis[seriesVariable]
#transform series to stationarity
seriesVariable='count_TIMESTAMP_IN'
stationary_series, stationary_model = ts.transformSeriesToStationary(timeSeries_analysis,signifAlpha=signifAlpha)
#aggiungere l'uscita del modello stazionario e il return
#se sono riuscito a frasformare la serie in stazionaria proseguo
if len(stationary_series)>1:
#detect ACF and PACF
fig_CF, D_acf_significant, D_pacf_significant = ts.ACF_PACF_plot(stationary_series)
params = ts.returnsignificantLags(D_pacf_significant, D_acf_significant, maxValuesSelected)
# Running ARIMA fit, consider that
figure_forecast, figure_residuals, resultModel = ts.SARIMAXfit(stationary_series, params)
return stationary_model, fig_CF, figure_forecast, figure_residuals, resultModel
else: #cannot make the series stationary, cannot use ARIMA
return [], [], [], [], []
# %%
def LOOP_PREDICT_SARIMA(D_time, date_field, qtyVariable, countVariable,prediction_results_path,filterVariable=[], samplingInterval='week'):
def nestedPredictionSARIMA(D_results, ss, nomecartella):
#create folder with results
_, current_dir_results = creaCartella(prediction_results_path,f"{str(nomecartella)}")
print(f"***********{ss}*************")
if len(ss)>0:
D_series=D_time[D_time[filterVariable]==ss]
else:
D_series=D_time
#save report txt
file = open(f"{current_dir_results}\\{reportFilename}.txt", "w")
file.write(f"{str(date.datetime.now())} STARTING PREDICTIONS \r")
file.close()
# set initial
tot_qty = np.nansum(D_series[qtyVariable])
tot_lns = np.nansum(D_series[countVariable])
QTY_STATIONARY_TRANSFORM=''
QTY_SARIMA_MODEL=''
QTY_SARIMA_ACCURACY = ''
LNS_STATIONARY_TRANSFORM=''
LNS_SARIMA_MODEL=''
LNS_SARIMA_ACCURACY = ''
if len(D_series)>=12: #I need at least 12 points (e.g. 1 year expressed in months)
# predict quantities
stationary_model, fig_CF, figure_forecast, figure_residuals, resultModel = predictWithARIMA(D_series,
seriesVariable=qtyVariable,
samplingInterval=samplingInterval,
date_field=date_field,
signifAlpha=0.05,
maxValuesSelected=2)
if len(stationary_model)>0:
#save figures
fig_CF.get_figure().savefig(f"{current_dir_results}\\{ss}_quantities_CF.png")
figure_forecast.savefig(f"{current_dir_results}\\{ss}_quantities_ARIMA_forecast.png")
figure_residuals.savefig(f"{current_dir_results}\\{ss}_quantities_ARIMA_residuals.png")
plt.close('all')
#save params
QTY_STATIONARY_TRANSFORM=stationary_model
QTY_SARIMA_MODEL=str({'p':resultModel['p'],'d':resultModel['d'],'q':resultModel['q']})
QTY_SARIMA_ACCURACY = resultModel['aic']
#save report txt
file = open(f"{current_dir_results}\\{reportFilename}.txt", "a")
file.write(f"{str(date.datetime.now())} quantities: Predictions built\r")
file.close()
else:
#save report txt
file = open(f"{current_dir_results}\\{reportFilename}.txt", "a")
file.write(f"{str(date.datetime.now())} quantities: no stationary series, no ARIMA Predictions built\r")
file.close()
#predict lines
stationary_model, fig_CF, figure_forecast, figure_residuals, resultModel = predictWithARIMA(D_series,
seriesVariable=countVariable,
samplingInterval=samplingInterval,
date_field=date_field,
signifAlpha=0.05,
maxValuesSelected=2)
if len(stationary_model) >0: # se le predizioni sono riuscite
#save figures
fig_CF.get_figure().savefig(f"{current_dir_results}\\{ss}_lines_CF.png")
figure_forecast.savefig(f"{current_dir_results}\\{ss}_lines_ARIMA_forecast.png")
figure_residuals.savefig(f"{current_dir_results}\\{ss}_lines_ARIMA_residuals.png")
plt.close('all')
LNS_STATIONARY_TRANSFORM=stationary_model
LNS_SARIMA_MODEL=str({'p':resultModel['p'],'d':resultModel['d'],'q':resultModel['q']})
LNS_SARIMA_ACCURACY = resultModel['aic']
#save report txt
file = open(f"{current_dir_results}\\{reportFilename}.txt", "a")
file.write(f"{str(date.datetime.now())} lines: Predictions built\r")
file.close()
else:
#save report txt
file = open(f"{current_dir_results}\\{reportFilename}.txt", "a")
file.write(f"{str(date.datetime.now())} lines: no stationary series, no ARIMA Predictions built\r")
file.close()
else:
#save report txt
file = open(f"{current_dir_results}\\{reportFilename}.txt", "w")
file.write(f"{str(date.datetime.now())} Not ehough input points to build a time series\r")
file.close()
# append results to dataframe
D_results=D_results.append(pd.DataFrame([[ss, tot_qty, tot_lns, QTY_STATIONARY_TRANSFORM, QTY_SARIMA_MODEL,
QTY_SARIMA_ACCURACY, LNS_STATIONARY_TRANSFORM, LNS_SARIMA_MODEL,
LNS_SARIMA_ACCURACY]], columns = D_results.columns))
return D_results
reportFilename='report_SARIMA'
resultscolumn=['SERVICETYPE','QUANTITIES','LINES','QTY_STATIONARY_TRANSFORM','QTY_SARIMA_MODEL','QTY_SARIMA_ACCURACY',
'LNS_STATIONARY_TRANSFORM','LNS_SARIMA_MODEL','LNS_SARIMA_ACCURACY']
D_results=pd.DataFrame(columns=resultscolumn)
print("CIAOOOO")
print(D_results)
# genero i trend globali
D_results = nestedPredictionSARIMA(D_results,[], nomecartella='globalResults')
if len(filterVariable)>0:
#itero sulle famiglie di prodotto
st = list(set(D_time[filterVariable]))
for ss in st:
#ss='zz'
try:
D_results = nestedPredictionSARIMA(D_results, ss, nomecartella=ss)
except Exception as e:
print(f"*=*=*=*=*=*=ERROR*=*=*= {e}")
#ss='zz'
# SAVE dataframe results
D_results.to_excel(f"{prediction_results_path}\\pred_results_SARIMA.xlsx")
return True
# %%
#PROD_PREDICT_SARIMA(D_time, qtyVariable, countVariable,prediction_results_path,filterVariable=[], samplingInterval='week'):
def LOOP_PREDICT_FBPROPHET(D_time, timeVariable, qtyVariable, countVariable, prediction_results_path, filterVariable=[], samplingInterval='week' ):
def nestedPredictionFBprophet(D_results, ss, nomecartella):
#create folder with results
_, current_dir_results = creaCartella(prediction_results_path,f"{str(nomecartella)}")
print(f"***********{ss}*************")
if len(ss)>0:
D_series=D_time[D_time[filterVariable]==ss]
else:
D_series=D_time
#save report txt
file = open(f"{current_dir_results}\\{reportFilename}.txt", "a")
file.write(f"{str(date.datetime.now())} STARTING PREDICTIONS \r")
file.close()
# set initial
tot_qty = np.nansum(D_series[qtyVariable])
tot_lns = np.nansum(D_series[countVariable])
MSE_QTY = ''
MSE_LNS = ''
if len(D_series)>=12: #I need at least 12 points (e.g. 1 year expressed in months)
# predict quantities
m, forecast_fig, components_fig, MSE_result = predictWithFBPROPHET(D_series,
timeVariable,
qtyVariable,
current_dir_results,
samplingInterval=samplingInterval, predictionsLength=52, titolo='qty')
forecast_fig.savefig(f"{current_dir_results}\\{ss}_quantities_FBPROPHET_forecast.png")
components_fig.savefig(f"{current_dir_results}\\{ss}_quantities_FBPROPHET_comp.png")
plt.close('all')
#save params
MSE_QTY=MSE_result
#save report txt
file = open(f"{current_dir_results}\\{reportFilename}.txt", "a")
file.write(f"{str(date.datetime.now())} quantities: Predictions built\r")
file.close()
# predict quantities
m, forecast_fig, components_fig, MSE_result = predictWithFBPROPHET(D_series, timeVariable, countVariable, current_dir_results, samplingInterval=samplingInterval, predictionsLength=52, titolo='lines')
forecast_fig.savefig(f"{current_dir_results}\\{ss}_quantities_FBPROPHET_forecast.png")
components_fig.savefig(f"{current_dir_results}\\{ss}_quantities_FBPROPHET_comp.png")
plt.close('all')
#save params
MSE_LNS=MSE_result
#save report txt
file = open(f"{current_dir_results}\\{reportFilename}.txt", "a")
file.write(f"{str(date.datetime.now())} quantities: Predictions built\r")
file.close()
else:
#save report txt
file = open(f"{current_dir_results}\\{reportFilename}.txt", "w")
file.write(f"{str(date.datetime.now())} Not ehough input points to build a time series\r")
file.close()
# append results to dataframe
D_results=D_results.append(pd.DataFrame([[ss, tot_qty, tot_lns, MSE_QTY, MSE_LNS]], columns = D_results.columns))
return D_results
reportFilename='report_fbProphet'
resultscolumn=['SERVICETYPE','QUANTITIES','LINES','MSE_QTY','MSE_LNS']
D_results=pd.DataFrame(columns=resultscolumn)
# genero i trend globali
D_results = nestedPredictionFBprophet(D_results,[], nomecartella='globalResults')
if len(filterVariable)>0:
#itero sulle famiglie di prodotto
st = list(set(D_time[filterVariable]))
for ss in st:
#ss='zz'
try:
D_results = nestedPredictionFBprophet(D_results, ss, nomecartella=ss)
except Exception as e:
print(f"*=*=*=*=*=*=ERROR*=*=*= {e}")
# SAVE dataframe results
D_results.to_excel(f"{prediction_results_path}\\pred_results_FBPROPHET.xlsx")
return True
|
StarcoderdataPython
|
3429197
|
<gh_stars>1-10
from .tracker import GAStatistics
|
StarcoderdataPython
|
9624973
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta, abstractmethod
from bs4 import BeautifulSoup
from bs4.element import Tag
from collections import defaultdict
from dataclasses import dataclass
from datetime import datetime
from dateutil import parser as dtparse
from enum import Enum
import re
import typing
from urllib import parse as urlparse
class SnowflakeError(Exception):
pass
# ignore comment for mypy: https://github.com/python/mypy/issues/5374
@dataclass # type: ignore[misc]
class Community(metaclass=ABCMeta):
netloc: str
@classmethod
def __all_subclasses__(cls) -> typing.List['Community']:
# TODO make this real
return [FTM(), MTF(), Genderqueer(), Transgender()]
@classmethod
def from_netloc(cls, netloc: str) -> 'Community':
communities: typing.List[Community] = cls.__all_subclasses__()
result: typing.List[Community] = [
c for c in communities if c.netloc == netloc
]
if not result:
raise SnowflakeError('Unknown community')
return result[0]
@abstractmethod
def comment_content(self, comment: Tag) -> str:
pass
@abstractmethod
def comment_published(self, comment: Tag) -> datetime:
pass
@abstractmethod
def entry_content(self, tree: BeautifulSoup) -> str:
pass
@abstractmethod
def entry_list_prev_link(self, tree: BeautifulSoup) -> typing.Optional[str]:
pass
@abstractmethod
def entry_published(self, tree: BeautifulSoup) -> datetime:
pass
@abstractmethod
def entry_username(self, tree: BeautifulSoup) -> str:
pass
@abstractmethod
def is_comment_deleted(self, comment: Tag) -> bool:
pass
@abstractmethod
def is_comment_zipped(self, comment: Tag) -> bool:
pass
def to_entry_url(self, id: str) -> str:
return f'https://{self.netloc}.livejournal.com/{id}.html'
@dataclass
class FTM(Community):
netloc: str = 'ftm'
def comment_content(self, comment: Tag) -> str:
return comment.select_one('.comment-text').text
def comment_published(self, comment: Tag) -> datetime:
return datetime.strptime(
comment.select_one('.comment-permalink').text,
'%Y-%m-%d %I:%M %p (UTC)')
def entry_content(self, tree: BeautifulSoup) -> str:
return tree.select_one('.entry-text .entry-content').text
def entry_list_prev_link(self, tree: BeautifulSoup) -> typing.Optional[str]:
ul = tree.find('ul', class_=re.compile('page-nav'))
if not ul:
return None
link = ul.find('a', text='Next 10')
return link['href'] if link else None
def entry_published(self, tree: BeautifulSoup) -> datetime:
return dtparse.parse(
tree.select_one('.entry-text .entry-date abbr')['title'])
def entry_username(self, tree) -> str:
return tree.select_one('.entry-text .username b').text
def is_comment_deleted(self, comment: Tag) -> bool:
return 'deleted' in comment['class']
def is_comment_zipped(self, comment: Tag) -> bool:
return not comment.select_one('.comment-permalink')
@dataclass
class MTF(Community):
netloc: str = 'mtf'
def comment_content(self, comment: Tag) -> str:
return comment.select('div')[1].text
def comment_published(self, comment: Tag) -> datetime:
return datetime.strptime(
comment.find(title=re.compile('journal')).text,
'%Y-%m-%d %I:%M %p (UTC)')
def entry_content(self, tree: BeautifulSoup) -> str:
st = tree.select('article.entry-content')
if st:
return st[0].text
raw = tree.select_one('table.s2-entrytext tr:nth-child(2)').text
return re.sub(rf'{self.netloc}\[\S+\]', '', raw, count=1)
def entry_list_prev_link(self, tree: BeautifulSoup) -> typing.Optional[str]:
el = tree.find("a", href=True, text="earlier")
return el['href'] if el else None
def entry_published(self, tree: BeautifulSoup) -> datetime:
st = tree.select('article time')
if st:
return dtparse.parse(st[0].text)
contents = tree.select_one('table.s2-entrytext td.index').contents
raw = "%s %s" % (contents[0], contents[1])
raw = re.sub(r'\<\/?b\>', '', raw)
return dtparse.parse(re.sub(r'\[|\|', '', raw))
def entry_username(self, tree: BeautifulSoup) -> str:
st = tree.select('article dl.author dt')
if st:
return st[0]['lj:user']
return tree.select('table.s2-entrytext td font')[1].text
def is_comment_deleted(self, comment: Tag) -> bool:
return ((not comment.has_attr('class') or
'ljcmt_full' not in comment['class']) and
comment.text == '(Deleted comment)') or bool(
comment.select('.b-leaf-deleted'))
def is_comment_zipped(self, comment: Tag) -> bool:
return (not comment.has_attr('class') or
'ljcmt_full' not in comment['class'])
@dataclass
class Genderqueer(MTF):
netloc: str = 'genderqueer'
def to_entry_url(self, id: str) -> str:
return f'https://{self.netloc}.livejournal.com/{id}.html?nojs=1'
def comment_content(self, comment: Tag) -> str:
return comment.select_one('.b-leaf-article').text
def comment_published(self, comment: Tag) -> datetime:
ts = comment.select_one('div.comment[data-updated-ts]')
if ts:
return datetime.fromtimestamp(int(ts['data-updated-ts']))
raise SnowflakeError(f'no timestamp: {comment}')
def is_comment_zipped(self, comment: Tag) -> bool:
return bool(comment.select('.b-leaf-collapsed'))
@dataclass
class Transgender(Genderqueer):
netloc: str = 'transgender'
def entry_list_prev_link(self, tree: BeautifulSoup) -> typing.Optional[str]:
el = tree.select_one('.j-page-nav-item-prev a[href]')
return el['href'] if el else None
def find_community(html: Tag) -> Community:
self_url = html.select_one('meta[property="og:url"]')['content']
parsed = urlparse.urlparse(self_url)
return Community.from_netloc(parsed.netloc.split(".")[0])
|
StarcoderdataPython
|
11322118
|
#Adding directory to the path where Python searches for modules
import os
import sys
cmd_folder = os.path.dirname('/home/arvind/Documents/Me/My_Projects/Git/Crypto/modules/')
sys.path.insert(0, cmd_folder)
#Importing common crypto module
import common
import block
'''
- Edit the ciphertext using key, nonce, counter, offset and newtext
- Now think of an attacker who has ciphertext, can choose offset and newtext but NOT the key, nonce or counter. This attacker must be able to recover plaintext
Questions:
- What does "but hold on to it" mean for the random key I am not supposed to know?
- Internally the program should use the key for decryption, but attacker doesn't have this
- Should I be using random nonces during encryption or does it not matter, since attacker won't have it anyway.
- Doesn't matter
- If nonces are random per block, isn't that the correct way to implement CTR? Why is this breakable?
- nonces are generated per-message, not per-block. if you generate them per block you have to transmit a list of nonces that's as long as your original message
'''
if __name__ == "__main__":
filename= '25.txt'
content= common.openfile(filename)
key= '71e6efcfb44e362b6e14f7abbecf5503'
nonce = '0'*8
enc_string= block.ctr_encrypt_string(''.join(content), key, nonce)
plaintext= ''
for offset in range(0, len(enc_string)):
for guess in range(0,127):
t1= block.decrypt_ctr_byte(enc_string[offset], offset, chr(guess))
if t1 is not None:
plaintext += chr(guess)
break
else:
continue
print plaintext
|
StarcoderdataPython
|
1817979
|
import dsz
MENU_TEXT = 'List all network interfaces'
def main():
dsz.ui.Echo('Running network interface commands...', dsz.GOOD)
dsz.control.echo.Off()
dsz.cmd.Run('background log devicequery -deviceclass net', dsz.RUN_FLAG_RECORD)
dsz.cmd.Run('background log performance -data NetworkInterface', dsz.RUN_FLAG_RECORD)
dsz.control.echo.On()
if (__name__ == '__main__'):
main()
|
StarcoderdataPython
|
1650079
|
#codigo que se genera para poder copiarlo a mathlab
code='''
splX = spline(1:length(a),a,1:0.1:length(a));
splY = spline(1:length(b),b,1:0.1:length(b));
fill(splX, splY, [0.6824 0.8353 0.5059])
hold on
'''
#en el archivo in estan los puntos que extraimos en Geogebra
fichero = open('in')
lista=fichero.readlines() #'x\n'
pares = []
#procesamos el fichero, separando los puntos en x e y
for i in range(0,len(lista),2):
x = list(map(float, lista[i].rstrip().split()))
y = list(map(float, lista[i+1].rstrip().split()))
print('a= [', end=' ')
for i in x:
print(i, end=' ')
print('];')
print('b=[', end=' ')
for i in y:
print(i, end=' ')
print('];')
print(code)
# el resultado final es el codigo de mathlag que podemos copiar y que mostrara una figura que hayamos escogido
fichero.close()
|
StarcoderdataPython
|
11275150
|
<filename>file_formats/gff_intersect.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# https://github.com/shenwei356/bio_scripts
# Author : <NAME>
# Contact : <EMAIL>
# LastUpdate : 2015-06-26
from __future__ import print_function, division
import argparse
import os
import shutil
import sys
import gzip
from collections import defaultdict, Counter
from bx.intervals.intersection import Intersecter, Interval
parser = argparse.ArgumentParser(description="gff intersect",
epilog="https://github.com/shenwei356/bio_scripts")
parser.add_argument('query', type=str, help='query gff file')
parser.add_argument('subject', type=str, help='subject gff file')
parser.add_argument('-e', '--embeded', action='store_true',
help='see what genes (query) contained in specific regions (subject)')
parser.add_argument('-c', '--cover', action='store_true',
help='see what genes (query) containing specific regions (subject)')
parser.add_argument('-s', '--split', action='store_true',
help='split results into multiple files')
parser.add_argument('-o', '--split-dir', type=str,
help='directory for split results')
parser.add_argument('-eu', '--extend-upstream', type=int, default=0,
help='extend N bases in the upstream [0]')
parser.add_argument('-ed', '--extend-downstream', type=int, default=0,
help='extend N bases in the downstream [0]')
args = parser.parse_args()
if args.extend_upstream and args.extend_upstream <= 0:
sys.stderr.write('value of option --extend-upstream should be greater than 0\n')
sys.exit(1)
if args.extend_downstream and args.extend_downstream <= 0:
sys.stderr.write('value of option --extend-downstream should be greater than 0\n')
sys.exit(1)
if args.cover and args.embeded:
sys.stderr.write('only one of option -e/--embeded and -c/--cover allowed\n')
sys.exit(1)
sys.stderr.write('building tree from {}\n'.format(args.subject))
trees = dict()
with gzip.open(args.subject) if args.subject.endswith('.gz') else open(args.subject) as fh:
genome = ''
for line in fh:
if line.isspace() or line[0] == '#':
continue
data = line.rstrip().split('\t')
if len(data) != 9:
sys.stderr.write('number of columns != 9: {}'.format(line))
g, start, end, strand = data[0], int(data[3]), int(data[4]), data[6]
if g != genome:
genome = g
trees[genome] = Intersecter()
if strand == '+':
start -= args.extend_upstream
end += args.extend_downstream
else:
start -= args.extend_downstream
end += args.extend_upstream
if not args.embeded and strand == '-': # complement strand
start, end = -end, -start
trees[genome].add_interval(Interval(start, end, value=data))
if args.split:
if args.split_dir is None:
outdir = '{}.intersect@{}'.format(os.path.normpath(os.path.basename(args.query)),
os.path.normpath(os.path.basename(args.subject)))
else:
outdir = args.split_dir
if os.path.exists(outdir):
shutil.rmtree(outdir)
os.makedirs(outdir)
sys.stderr.write('querying\n')
with gzip.open(args.query) if args.query.endswith('.gz') else open(args.query) as fh:
for line in fh:
if line.isspace() or line[0] == '#':
continue
data = line.rstrip().split('\t')
if len(data) != 9:
sys.stderr.write('number of columns != 9: {}'.format(line))
genome, start, end, strand, product = data[0], int(data[3]), int(data[4]), data[6], data[8]
if genome not in trees:
continue
overlaps = trees[genome].find(start, end)
if len(overlaps) == 0:
continue
overlap_data, stats = list(), Counter()
for x in overlaps:
s, e = x.start, x.end
if args.embeded:
strand2 = '.'
elif s > 0:
strand2 = '+'
else: # complement strand
s, e = -x.end, -x.start
strand2 = '-'
overlap, t = 0, ''
if s <= start:
if e >= end:
# start ======== end
# s ------------- e
overlap = end - start + 1
t = 'embed'
if args.cover:
continue
else:
# start ======== end
# s ------ e
if args.embeded or args.cover:
continue
overlap = e - start + 1
t = 'overlap.downstream' if strand == '+' else 'overlap.upstream'
else:
if e >= end:
# start ======== end
# s ------ e
if args.embeded or args.cover:
continue
overlap = end - s + 1
t = 'overlap.upstream' if strand == '+' else 'overlap.downstream'
else:
# start ======== end
# s --- e
if args.embeded:
continue
overlap = e - s + 1
t = 'cover'
if args.embeded or args.cover:
frame = '.'
elif strand == '+':
frame = abs(s - start) % 3
else:
frame = abs(e - end) % 3
stats[t] += 1
if args.embeded or args.cover:
overlap_data.append(x.value)
else:
overlap_data.append([str(i) for i in
[data[0], s, e, strand2, overlap, round(100 * overlap / (end - start + 1), 1), t, frame,
x.value[-1]]])
if len(overlap_data) == 0:
continue
if args.split:
fh_out = open(os.path.join(outdir, '{}_{}..{}..{}_{}.gff'.format(genome,
start, end, strand, product.replace('/', '_').replace('"', ''))), 'wt')
fh_out.write('# {}'.format(line))
else:
fh_out = sys.stdout
fh_out.write('>{}'.format(line))
if args.embeded or args.cover:
sorted_overlap_data = sorted(overlap_data, key=lambda o: (o[0], o[1]))
else:
fh_out.write('# summary: {}\n'.format(stats))
fh_out.write(
'\t'.join(['chr', 'start', 'end', 'strand', 'overlap', 'overlap%', 'type', 'frame', 'attribute']) + '\n')
sorted_overlap_data = sorted(overlap_data, key=lambda o: (o[6], o[7], -float(o[5])))
for overlap in sorted_overlap_data:
fh_out.write('\t'.join(overlap) + '\n')
if args.split:
fh_out.close()
|
StarcoderdataPython
|
297767
|
# Importing non-modules that are not used explicitly
from .update import UpdateApplicationClass # noqa
|
StarcoderdataPython
|
1836169
|
<reponame>rootless4real/rpi_ai
import config
import os
# Get Google Tasks
def getTasks(dayBool):
if not config.tasksLoaded:
loadTasks()
config.tasksLoaded = True
if dayBool==0:
myTasks = ""
numTasks = 0
with open("tasks_today.txt") as f:
for line in f:
a=line.split(".")
b=a[1].split(":")
c=b[0].strip()
if numTasks == 0:
myTasks = c
else:
myTasks = myTasks + ". Also, you have to " + c
numTasks = numTasks+1
if numTasks==0:
return "You do not have to do anything next today! Relax, man!"
else:
return "You have " + str(numTasks) + " things to do today. You have to " + str(myTasks)
if dayBool==1:
myTasks = ""
numTasks = 0
with open("tasks_nextweek.txt") as f:
for line in f:
a=line.split(".")
b=a[1].split(":")
c=b[0].strip()
if numTasks == 0:
myTasks = c
else:
myTasks = myTasks + ". Also, you have to " + c
numTasks = numTasks+1
if numTasks==0:
return "You do not have to do anything next week! Relax, man!"
else:
return "You have " + str(numTasks) + " things to do next week. You have to " + str(myTasks)
def loadTasks():
os.system("python myTasks2.py -l 'Zack-todo' > current_tasks.txt")
os.system("rm tasks_today.txt")
os.system("touch tasks_today.txt")
today = datetime.date.today()
todayStr = today.strftime("%Y-%m-%d")
os.system("cat current_tasks.txt | grep " + todayStr + " > tasks_today.txt")
os.system("rm tasks_nextweek.txt")
os.system("touch tasks_nextweek.txt")
for x in range(1,7):
today = datetime.date.today()+datetime.timedelta(days=x)
todayStr = today.strftime("%Y-%m-%d")
os.system("cat current_tasks.txt | grep " + todayStr + " >> tasks_nextweek.txt")
|
StarcoderdataPython
|
3297630
|
#!/usr/bin/env python3
import math
import pickle
import sys
word_uses = {}
for line in sys.stdin:
word, uses = line.split()
word_uses[word] = int(uses)
total_words = sum(word_uses.values())
word_freq_log = {}
for word, uses in word_uses.items():
word_freq_log[word] = math.log(float(uses) / total_words)
pickle.dump(word_freq_log, sys.stdout.buffer)
|
StarcoderdataPython
|
9699287
|
from Skin import Skin
from Node import Node
from Animation import Animation
from Scene import Scene
from Mesh import Mesh
from Material import Material
|
StarcoderdataPython
|
6666215
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import os
import random
import sys
import pandas as pd
import numpy as np
# test dictionary of dictionaries similar to json file
swe_dict = {"mem111":"Desi","mem112":"Laura"}
test_dict = dict({"SWE":swe_dict,"NDNYC":{"mem111":"Desi","mem113":"Megan"}})
test_dict.update({"Chambers":{"mem112":"Jacob","mem113":"Megan"}})
this_member = {"connections":{"mem111":"Desi","mem112":"Jacob","mem113":"Megan"}}
def add_attribute (category, attribute, name):
attribute_list = category.keys()
if attribute in attribute_list:
category[attribute].update(name)
else:
category.update({attribute:name})
def category_list (category):
my_dict={}
for key in category.keys():
count = len(category[key])
my_dict.update({key:count})
return my_dict
def get_group_info (category, attribute, member):
if len(member["connections"])<len(category[attribute]):
small=member["connections"]
large = category[attribute]
else:
small= category[attribute]
large= member["connections"]
in_group = []
for friend in small.keys():
if friend in large.keys():
in_group.append(large[friend])
return print("SubCategory: {} /n Total Members: {} /n Connections: {}".format(attribute, len(in_group), in_group))
""" need to figure out how to print the name of a dictionary instead of the
full text
"""
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.