id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
3352908 | import csv
import time
import numpy as np
import torch
from torch import nn
from common.eval_test import evaluate
def arch_search_valid(model, train_data, test_data, corrupt_func, optimizer, lr_scheduler, clip_value=1., batchsize=16,
lam=2, valid_rate=0.5, gpu_id=0, period=None, out_model='out_model.model', log_file='log.csv'):
if period is None:
period = {'max_ite': 200000, 'save': 4000, 'verbose_ite': 100}
start = time.time()
if gpu_id >= 0:
model = model.cuda(gpu_id)
loss_func = nn.MSELoss()
# Data loader
valid_size = int(valid_rate * len(train_data))
print('#(train / valid / test) = (%d, %d, %d)' % (len(train_data) - valid_size, valid_size, len(test_data)))
inds = list(range(len(train_data)))
np.random.shuffle(inds)
train_loader = torch.utils.data.DataLoader(train_data, batchsize,
sampler=torch.utils.data.SubsetRandomSampler(inds[:-valid_size]))
valid_loader = torch.utils.data.DataLoader(train_data, batchsize,
sampler=torch.utils.data.SubsetRandomSampler(inds[-valid_size:]))
# log header
with open(log_file, 'w') as fp:
writer = csv.writer(fp, lineterminator='\n')
header_list = ['epoch', 'iteration', 'train_time', 'lr', 'train_loss', 'valid_loss', 'test_MSE', 'test_PSNR',
'test_SSIM', 'model_params']
header_list += model.asng.log_header(theta_log=True)
writer.writerow(header_list)
train_time = train_loss = valid_loss = 0.
epoch = ite = n = 0
losses = np.zeros(lam)
while ite < period['max_ite']:
epoch += 1
for train_batch, valid_batch in zip(train_loader, valid_loader):
ite_start = time.time()
ite += 1
# ---------- One iteration of the training loop ----------
model.train()
lr_scheduler.step()
X, _ = train_batch
Xv, _ = valid_batch
if gpu_id >= 0:
X = X.cuda(gpu_id)
Xv = Xv.cuda(gpu_id)
optimizer.zero_grad() # Clear gradient
loss_mean = 0.
# Sampling
c_cat, c_int = model.asng.sampling(lam)
# Update weights
for i in range(lam):
# Calculate the prediction of the network
Y = model(c_cat[i], c_int[i], corrupt_func(X))
loss = loss_func(Y, X) # Calculate the MSE loss
loss_mean += loss / lam
train_loss += loss.item()
loss_mean.backward() # Calculate the gradient
del loss_mean
nn.utils.clip_grad_norm_(model.parameters(), clip_value) # Gradient clipping
optimizer.step() # Update the trainable parameters
# Sampling
c_cat, c_int = model.asng.sampling(lam)
# Update theta
with torch.no_grad():
for i in range(lam):
Y = model(c_cat[i], c_int[i], corrupt_func(Xv))
loss = loss_func(Y, Xv).item()
valid_loss += loss
losses[i] = loss
model.p_model_update([c_cat, c_int], losses)
n += lam
train_time += time.time() - ite_start
# --------------------- until here ---------------------
is_save = ite % period['save'] == 0
if ite % period['verbose_ite'] == 0 or is_save or ite == period['max_ite'] or ite == 1:
# Display the training loss
print('epoch:{} iteration:{} elapse_time:{:.04f} lr:{:e} cur_train_loss:{:.04f} cur_valid_loss:{:.04f} '
'delta:{} theta_cat_converge:{:.04f}'
.format(epoch, ite, (time.time() - start) / 60, lr_scheduler.get_lr()[0],
train_loss / n, valid_loss / n, model.asng.delta, model.asng.theta_cat.max(axis=1).mean()))
# Check the test loss
if is_save or ite == period['max_ite']:
# Testing
test_res = evaluate(model, test_data, corrupt_func, gpu_id=gpu_id, batchsize=batchsize)
max_params = np.sum(np.prod(param.size()) for param in model.parameters())
model_params = model.get_params_mle()
print('test_MSE:{:.04f} test_PSNR:{:.04f} test_SSIM:{:.04f} param_num:{} param_ratio:{:.04f} active_num:{}'.
format(test_res['MLE_MSE'], test_res['MLE_PSNR'], test_res['MLE_SSIM'], model_params,
model_params / max_params, int(model.is_active.sum())))
print(model.mle_network_string(sep=' ') + '\n')
# Save log
with open(log_file, 'a') as fp:
writer = csv.writer(fp, lineterminator='\n')
log_list = [epoch, ite, train_time, lr_scheduler.get_lr()[0], train_loss / n, valid_loss / n,
test_res['MLE_MSE'], test_res['MLE_PSNR'], test_res['MLE_SSIM'], model_params]
log_list += model.asng.log(theta_log=True)
writer.writerow(log_list)
train_loss = valid_loss = 0.
n = 0
if ite >= period['max_ite']:
break
# Save model
if out_model is not None:
torch.save(model.state_dict(), out_model)
return model, train_time
def train(model, train_data, test_data, corrupt_func, optimizer, lr_scheduler, clip_value=1., batchsize=16,
gpu_id=0, period=None, out_model='out_model.model', log_file='log.csv'):
if period is None:
period = {'max_ite': 200000, 'save': 4000, 'verbose_ite': 100}
start = time.time()
if gpu_id >= 0:
model = model.cuda(gpu_id)
loss_func = nn.MSELoss()
# Data loader
train_loader = torch.utils.data.DataLoader(train_data, batchsize, shuffle=True, drop_last=False)
# log header
with open(log_file, 'w') as fp:
writer = csv.writer(fp, lineterminator='\n')
header_list = ['epoch', 'iteration', 'train_time', 'lr', 'train_loss', 'test_MSE', 'test_PSNR', 'test_SSIM',
'model_params']
writer.writerow(header_list)
train_time = train_loss = 0.
epoch = ite = n = 0
while ite < period['max_ite']:
epoch += 1
for X, _ in train_loader:
ite_start = time.time()
ite += 1
# ---------- One iteration of the training loop ----------
model.train()
lr_scheduler.step()
if gpu_id >= 0:
X = X.cuda(gpu_id)
optimizer.zero_grad() # Clear gradient
# Calculate the prediction of the network
Y = model.forward_mle(corrupt_func(X))
loss = loss_func(Y, X) # Calculate the MSE loss
train_loss += loss.item()
n += 1
loss.backward() # Calculate the gradient
nn.utils.clip_grad_norm_(model.parameters(), clip_value) # Gradient clipping
optimizer.step() # Update the trainable parameters
train_time += time.time() - ite_start
# --------------------- until here ---------------------
is_save = ite % period['save'] == 0
if ite % period['verbose_ite'] == 0 or is_save or ite == period['max_ite'] or ite == 1:
# Display the training loss
print('epoch:{} iteration:{} elapse_time:{:.04f} lr:{:e} cur_loss:{:.04f}'
.format(epoch, ite, (time.time() - start) / 60, lr_scheduler.get_lr()[0], loss))
# Check the test loss
if is_save or ite == period['max_ite']:
# Testing
test_res = evaluate(model, test_data, corrupt_func, gpu_id=gpu_id, batchsize=batchsize)
model_params = model.get_params_mle()
print('test_MSE:{:.04f} test_PSNR:{:.04f} test_SSIM:{:.04f} param_num:{} active_num:{}'.
format(test_res['MLE_MSE'], test_res['MLE_PSNR'], test_res['MLE_SSIM'], model_params,
int(model.is_active.sum())))
print(model.mle_network_string(sep=' ') + '\n')
# Save log
with open(log_file, 'a') as fp:
writer = csv.writer(fp, lineterminator='\n')
log_list = [epoch, ite, train_time, lr_scheduler.get_lr()[0], train_loss / n, test_res['MLE_MSE'],
test_res['MLE_PSNR'], test_res['MLE_SSIM'], model_params]
writer.writerow(log_list)
train_loss = 0.
n = 0
if ite >= period['max_ite']:
break
# Save model
if out_model is not None:
torch.save(model.state_dict(), out_model)
return model, train_time
| StarcoderdataPython |
3357465 | """Pipeline code to run alignments and prepare BAM files.
This works as part of the lane/flowcell process step of the pipeline.
"""
from collections import namedtuple
import os
import toolz as tz
from bcbio import bam, utils
from bcbio.bam import cram
from bcbio.ngsalign import (bowtie, bwa, tophat, bowtie2,
novoalign, snap, star)
# Define a next-generation sequencing tool to plugin:
# align_fn -- runs an aligner and generates SAM output
# galaxy_loc_file -- name of a Galaxy location file to retrieve
# the genome index location
# bam_align_fn -- runs an aligner on a BAM file
# remap_index_fn -- Function that will take the location provided
# from galaxy_loc_file and find the actual location of the index file.
# This is useful for indexes that don't have an associated location file
# but are stored in the same directory structure.
NgsTool = namedtuple("NgsTool", ["align_fn", "bam_align_fn",
"galaxy_loc_file", "remap_index_fn"])
BASE_LOCATION_FILE = "sam_fa_indices.loc"
TOOLS = {
"bowtie": NgsTool(bowtie.align, None, bowtie.galaxy_location_file, None),
"bowtie2": NgsTool(bowtie2.align, None,
bowtie2.galaxy_location_file, bowtie2.remap_index_fn),
"bwa": NgsTool(bwa.align_pipe, bwa.align_bam, bwa.galaxy_location_file, None),
"novoalign": NgsTool(novoalign.align_pipe, novoalign.align_bam,
novoalign.galaxy_location_file, novoalign.remap_index_fn),
"tophat": NgsTool(tophat.align, None,
bowtie2.galaxy_location_file, bowtie2.remap_index_fn),
"samtools": NgsTool(None, None, BASE_LOCATION_FILE, None),
"snap": NgsTool(snap.align, snap.align_bam, snap.galaxy_location_file, snap.remap_index_fn),
"star": NgsTool(star.align, None, None, star.remap_index_fn),
"tophat2": NgsTool(tophat.align, None,
bowtie2.galaxy_location_file, bowtie2.remap_index_fn)}
metadata = {"support_bam": [k for k, v in TOOLS.iteritems() if v.bam_align_fn is not None]}
def align_to_sort_bam(fastq1, fastq2, aligner, data):
"""Align to the named genome build, returning a sorted BAM file.
"""
names = data["rgnames"]
align_dir_parts = [data["dirs"]["work"], "align", names["sample"]]
if data.get("disambiguate"):
align_dir_parts.append(data["disambiguate"]["genome_build"])
align_dir = utils.safe_makedir(apply(os.path.join, align_dir_parts))
aligner_indexes = os.path.commonprefix(tz.get_in(("reference", aligner, "indexes"), data))
if aligner_indexes.endswith("."):
aligner_indexes = aligner_indexes[:-1]
ref_file = tz.get_in(("reference", "fasta", "base"), data)
if fastq1.endswith(".bam"):
data = _align_from_bam(fastq1, aligner, aligner_indexes, ref_file,
names, align_dir, data)
else:
data = _align_from_fastq(fastq1, fastq2, aligner, aligner_indexes, ref_file,
names, align_dir, data)
if data["work_bam"] and utils.file_exists(data["work_bam"]):
bam.index(data["work_bam"], data["config"])
for extra in ["-sr", "-disc"]:
extra_bam = utils.append_stem(data['work_bam'], extra)
if utils.file_exists(extra_bam):
bam.index(extra_bam, data["config"])
return data
def _align_from_bam(fastq1, aligner, align_ref, sam_ref, names, align_dir, data):
assert not data.get("align_split"), "Do not handle split alignments with BAM yet"
config = data["config"]
qual_bin_method = config["algorithm"].get("quality_bin")
if (qual_bin_method == "prealignment" or
(isinstance(qual_bin_method, list) and "prealignment" in qual_bin_method)):
out_dir = utils.safe_makedir(os.path.join(align_dir, "qualbin"))
fastq1 = cram.illumina_qual_bin(fastq1, sam_ref, out_dir, config)
align_fn = TOOLS[aligner].bam_align_fn
if align_fn is None:
raise NotImplementedError("Do not yet support BAM alignment with %s" % aligner)
out = align_fn(fastq1, align_ref, names, align_dir, data)
if isinstance(out, dict):
assert "work_bam" in out
return out
else:
data["work_bam"] = out
return data
def _align_from_fastq(fastq1, fastq2, aligner, align_ref, sam_ref, names,
align_dir, data):
"""Align from fastq inputs, producing sorted BAM output.
"""
config = data["config"]
align_fn = TOOLS[aligner].align_fn
out = align_fn(fastq1, fastq2, align_ref, names, align_dir, data)
# handle align functions that update the main data dictionary in place
if isinstance(out, dict):
assert "work_bam" in out
return out
# handle output of raw SAM files that need to be converted to BAM
else:
work_bam = bam.sam_to_bam(out, config)
data["work_bam"] = bam.sort(work_bam, config)
return data
| StarcoderdataPython |
1786696 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# import needed libraries
import glob
import hashlib
import logging.config
import os
import os.path
import pickle
from rdflib import Graph, Namespace, BNode, Literal, URIRef # type: ignore
from rdflib.namespace import RDF, RDFS, OWL # type: ignore
from tqdm import tqdm # type: ignore
from typing import Any, Dict, IO, List, Optional, Tuple, Union
from pkt_kg.utils import *
# set global attributes
obo = Namespace('http://purl.obolibrary.org/obo/')
pkt = Namespace('https://github.com/callahantiff/PheKnowLator/pkt/')
pkt_bnode = Namespace('https://github.com/callahantiff/PheKnowLator/pkt/bnode/')
# logging
log_dir, log, log_config = 'builds/logs', 'pkt_build_log.log', glob.glob('**/logging.ini', recursive=True)
try:
if not os.path.exists(log_dir): os.mkdir(log_dir)
except FileNotFoundError:
log_dir, log_config = '../builds/logs', glob.glob('../builds/logging.ini', recursive=True)
if not os.path.exists(log_dir): os.mkdir(log_dir)
logger = logging.getLogger(__name__)
logging.config.fileConfig(log_config[0], disable_existing_loggers=False, defaults={'log_file': log_dir + '/' + log})
class KGConstructionApproach(object):
"""Class stores different methods that can be used to construct knowledge graph edges.
Two Construction Approaches:
(1) Instance-based: Adds edge data that is not from an ontology by connecting each non-ontology data node to
an instances of existing ontology class node;
(2) Subclass-based: Adds edge data that is not from an ontology by connecting each non-ontology data node to
an existing ontology class node.
Attributes:
write_location: A string pointing to the 'resources' directory.
Raises:
TypeError: If graph is not an rdflib.graph object.
TypeError: If edge_info and edge_dict are not dictionary objects.
ValueError: If graph, edge_info, edge_dict, or subclass_dict files are empty.
OSError: If there is no subclass_dict file in the resources/construction_approach directory.
"""
def __init__(self, write_location: str) -> None:
self.subclass_dict: Dict = dict()
self.subclass_error: Dict = dict()
# WRITE LOCATION
if write_location is None:
log_str = 'write_location must contain a valid filepath.'; logger.error('ValueError: ' + log_str)
raise ValueError(log_str)
else: self.write_location = write_location
# LOADING SUBCLASS DICTIONARY
file_name = self.write_location + '/construction_*/*.pkl'
if len(glob.glob(file_name)) == 0:
log_str = 'subclass_construction_map.pkl does not exist!'; logger.error('OSError: ' + log_str)
raise OSError(log_str)
elif os.stat(glob.glob(file_name)[0]).st_size == 0:
log_str = 'The input file: {} is empty'.format(glob.glob(file_name)[0])
logger.error('TypeError: ' + log_str); raise TypeError(log_str)
else:
with open(glob.glob(file_name)[0], 'rb') as filepath: # type: IO[Any]
self.subclass_dict = pickle.load(filepath, encoding='bytes')
def maps_node_to_class(self, edge_type: str, entity: str) -> Optional[List]:
"""Takes an entity and checks whether or not it exists in a dictionary of subclass content, such that keys
are non-class entity identifiers (e.g. Reactome identifiers) and values are sets of ontology class identifiers
mapped to that non-class entity. For example:
{'R-HSA-5601843': {'PW_0000001'}, 'R-HSA-77584': {'PW_0000001', 'GO_0008334'}}
The motivation for verifying whether a non-ontology node identifier is not in the subclass_map is to try and
catch errors in the edge sources used to generate the edges. For example, there were genes taken from CTD
that were tagged in the downloaded data as being human, that were not actually human and the way that we
caught that error was by checking against the identifiers included in the subclass_map dict.
Args:
edge_type: A string containing the edge_type (e.g. "gene-pathway").
entity: A string containing a node identifier (e.g. "R-HSA-5601843").
Returns:
None if the non-class entity is not in the subclass_dict, otherwise a list of mappings between the
non-class entity node is returned.
"""
if entity not in self.subclass_dict.keys():
if self.subclass_error and edge_type in self.subclass_error.keys():
if entity not in self.subclass_error[edge_type]: self.subclass_error[edge_type] += [entity]
else: self.subclass_error[edge_type] = [entity]
subclass_map = None
else: subclass_map = self.subclass_dict[entity]
return subclass_map
@staticmethod
def subclass_core_constructor(node1: URIRef, node2: URIRef, relation: URIRef, inv_relation: URIRef) -> Tuple:
"""Core subclass-based edge construction method. Constructs a single edge between to ontology classes as well as
verifies if the user wants an inverse edge created and if so, then this edge is also added to the knowledge
graph. Note that a Bnode is used for subclass construction versus the UUID hash + pkt namespace that is used
for instance-based construction.
Note. We explicitly type each node and each relation/inverse relation. This may seem redundant, but it is
needed in order to ensure consistency between the data after applying the OWL API to reformat the data.
Args:
node1: A URIRef or BNode object containing a subject node.
node2: A URIRef or BNode object containing a object node.
relation: A URIRef object containing an owl:ObjectProperty.
inv_relation: A string containing an inverse relation identifier (i.e. RO_0002200) or None (i.e.
indicating no inverse relation).
Returns:
A list of tuples representing new edges to add to the knowledge graph.
"""
rel_core = n3(node1) + n3(relation) + n3(node2)
u1 = URIRef(pkt + 'N' + hashlib.md5(rel_core.encode()).hexdigest())
u2 = URIRef(pkt_bnode + 'N' + hashlib.md5((rel_core + n3(OWL.Restriction)).encode()).hexdigest())
new_edge_inverse_rel: Tuple = tuple()
new_edge_rel_only: Tuple = ((node1, RDF.type, OWL.Class),
(u1, RDFS.subClassOf, node1),
(u1, RDF.type, OWL.Class),
(u1, RDFS.subClassOf, u2),
(u2, RDF.type, OWL.Restriction),
(u2, OWL.someValuesFrom, node2),
(node2, RDF.type, OWL.Class),
(u2, OWL.onProperty, relation),
(relation, RDF.type, OWL.ObjectProperty))
if inv_relation:
inv_rel_core = n3(node2) + n3(inv_relation) + n3(node1)
u3 = URIRef(pkt + 'N' + hashlib.md5(inv_rel_core.encode()).hexdigest())
u4 = URIRef(pkt_bnode + 'N' + hashlib.md5((inv_rel_core + n3(OWL.Restriction)).encode()).hexdigest())
new_edge_inverse_rel = ((node2, RDF.type, OWL.Class),
(u3, RDFS.subClassOf, node2),
(u3, RDF.type, OWL.Class),
(u3, RDFS.subClassOf, u4),
(u4, RDF.type, OWL.Restriction),
(u4, OWL.someValuesFrom, node1),
(node1, RDF.type, OWL.Class),
(u4, OWL.onProperty, inv_relation),
(inv_relation, RDF.type, OWL.ObjectProperty))
return new_edge_rel_only + new_edge_inverse_rel
def subclass_constructor(self, edge_info: Dict, edge_type: str) -> List:
"""Adds edges for the subclass construction approach.
Assumption: All ontology class nodes use the obo namespace.
Note. We explicitly type each node as a owl:Class and each relation/inverse relation as a owl:ObjectProperty.
This may seem redundant, but it is needed in order to ensure consistency between the data after applying the
OWL API to reformat the data.
Args:
edge_info: A dict of information needed to add edge to graph, for example:
{'n1': 'class', 'n2': 'class','rel': 'RO_0002606', 'inv_rel': 'RO_0002615',
'uri': ['https://www.ncbi.nlm.nih.gov/gene/', 'http://purl.obolibrary.org/obo/'],
'edges': ['CHEBI_81395', 'DOID_12858']}
edge_type: A string containing the name of the edge_type (e.g. "gene-disease", "chemical-gene").
Returns:
edges: A set of tuples containing new edges to add to the knowledge graph.
"""
res = finds_node_type(edge_info); uri1, uri2 = edge_info['uri']; edges: List = []
rel = URIRef(obo + edge_info['rel'])
irel = URIRef(obo + edge_info['inv_rel']) if edge_info['inv_rel'] is not None else None
if res['cls1'] and res['cls2']: # class-class edges
edges = list(self.subclass_core_constructor(URIRef(res['cls1']), URIRef(res['cls2']), rel, irel))
elif res['cls1'] and res['ent1']: # entity-class/class-entity edges
x = res['ent1'].replace(uri2, '') if edge_info['n1'] == 'class' else res['ent1'].replace(uri1, '')
mapped_node = self.maps_node_to_class(edge_type, x)
if mapped_node: # get entity mappings to current classes from subclass_construction_map
edges = [x for y in [((URIRef(res['ent1']), RDFS.subClassOf, URIRef(obo + i)),) +
((URIRef(obo + i), RDF.type, OWL.Class),) for i in mapped_node] for x in y]
ent_order = ['cls1', 'ent1'] if edge_info['n1'] == 'class' else ['ent1', 'cls1'] # determine node order
edges += self.subclass_core_constructor(URIRef(res[ent_order[0]]), URIRef(res[ent_order[1]]), rel, irel)
else: # entity-entity edges
mapped_node1 = self.maps_node_to_class(edge_type, res['ent1'].replace(uri1, ''))
mapped_node2 = self.maps_node_to_class(edge_type, res['ent2'].replace(uri2, ''))
if mapped_node1 and mapped_node2: # get entity mappings to current classes from subclass_construction_map
edges += [x for y in [((URIRef(res['ent1']), RDFS.subClassOf, URIRef(obo + i)),) +
((URIRef(obo + i), RDF.type, OWL.Class),) for i in mapped_node1] for x in y]
edges += [x for y in [((URIRef(res['ent2']), RDFS.subClassOf, URIRef(obo + i)),) +
((URIRef(obo + i), RDF.type, OWL.Class),) for i in mapped_node2] for x in y]
edges += self.subclass_core_constructor(URIRef(res['ent1']), URIRef(res['ent2']), rel, irel)
return edges
@staticmethod
def instance_core_constructor(node1: URIRef, node2: URIRef, relation: URIRef, inv_relation: URIRef) -> Tuple:
"""Core instance-based edge construction method. Constructs a single edge between two ontology classes as
well as verifies if the user wants an inverse edge created and if so, then this edge is also added to the
knowledge graph.
Note. We explicitly type each node and each relation/inverse relation. This may seem redundant, but it is
needed in order to ensure consistency between the data after applying the OWL API to reformat the data.
Args:
node1: A URIRef or BNode object containing a subject node.
node2: A URIRef or BNode object containing a object node.
relation: A URIRef object containing an owl:ObjectProperty.
inv_relation: A string containing the identifier for an inverse relation (i.e. RO_0002200) or None
(i.e. indicator of no inverse relation).
Returns:
A list of tuples representing new edges to add to the knowledge graph.
"""
# select hash relation - if rel and inv rel take first in alphabetical order else use rel
rels = sorted([relation, inv_relation])[0] if inv_relation is not None else [relation][0]
rel_core = n3(node1) + n3(rels) + n3(node2)
u1 = URIRef(pkt + 'N' + hashlib.md5((rel_core + 'subject').encode()).hexdigest())
u2 = URIRef(pkt + 'N' + hashlib.md5((rel_core + 'object').encode()).hexdigest())
new_edge_inverse_rel: Tuple = tuple()
new_edge_rel_only: Tuple = ((u1, RDF.type, node1), (u1, RDF.type, OWL.NamedIndividual),
(u2, RDF.type, node2), (u2, RDF.type, OWL.NamedIndividual),
(u1, relation, u2), (relation, RDF.type, OWL.ObjectProperty))
if inv_relation: new_edge_inverse_rel = ((u2, inv_relation, u1), (inv_relation, RDF.type, OWL.ObjectProperty))
return new_edge_rel_only + new_edge_inverse_rel
def instance_constructor(self, edge_info: Dict, edge_type: str) -> List:
"""Adds edges for the instance construction approach.
Assumption: All ontology class nodes use the obo namespace.
Args:
edge_info: A dict of information needed to add edge to graph, for example:
{'n1': 'class', 'n2': 'class','rel': 'RO_0002606', 'inv_rel': 'RO_0002615',
'uri': ['https://www.ncbi.nlm.nih.gov/gene/', 'http://purl.obolibrary.org/obo/'],
'edges': ['CHEBI_81395', 'DOID_12858']}
edge_type: A string containing the name of the edge_type (e.g. "gene-disease", "chemical-gene").
Returns:
edges: A set of tuples containing new edges to add to the knowledge graph.
"""
res = finds_node_type(edge_info); uri1, uri2 = edge_info['uri']; edges: List = []
rel = URIRef(obo + edge_info['rel'])
irel = URIRef(obo + edge_info['inv_rel']) if edge_info['inv_rel'] is not None else None
if res['cls1'] and res['cls2']: # class-class edges
edges = list(self.instance_core_constructor(URIRef(res['cls1']), URIRef(res['cls2']), rel, irel))
elif res['cls1'] and res['ent1']: # class-entity/entity-class edges
x = res['ent1'].replace(uri2, '') if edge_info['n1'] == 'class' else res['ent1'].replace(uri1, '')
mapped_node = self.maps_node_to_class(edge_type, x)
if mapped_node: # get entity mappings to current classes from subclass_construction_map
edges = [x for y in [((URIRef(res['ent1']), RDFS.subClassOf, URIRef(obo + i)),) +
((URIRef(obo + i), RDF.type, OWL.Class),) +
((URIRef(res['ent1']), RDF.type, OWL.Class),) for i in mapped_node] for x in y]
ent_order = ['cls1', 'ent1'] if edge_info['n1'] == 'class' else ['ent1', 'cls1'] # determine node order
edges += self.instance_core_constructor(URIRef(res[ent_order[0]]), URIRef(res[ent_order[1]]), rel, irel)
else: # entity-entity edges
mapped_node1 = self.maps_node_to_class(edge_type, res['ent1'].replace(uri1, ''))
mapped_node2 = self.maps_node_to_class(edge_type, res['ent2'].replace(uri2, ''))
if mapped_node1 and mapped_node2: # get entity mappings to current classes from subclass_construction_map
edges += [x for y in [((URIRef(res['ent1']), RDFS.subClassOf, URIRef(obo + i)),) +
((URIRef(obo + i), RDF.type, OWL.Class),) +
((URIRef(res['ent1']), RDF.type, OWL.Class),) for i in mapped_node1] for x in y]
edges += [x for y in [((URIRef(res['ent2']), RDFS.subClassOf, URIRef(obo + i)),) +
((URIRef(obo + i), RDF.type, OWL.Class),) +
((URIRef(res['ent2']), RDF.type, OWL.Class),) for i in mapped_node2] for x in y]
edges += self.instance_core_constructor(URIRef(res['ent1']), URIRef(res['ent2']), rel, irel)
return edges
| StarcoderdataPython |
3239848 | <filename>src/decks/migrations/0007_auto_20200804_2018.py
# Generated by Django 3.0.8 on 2020-08-04 20:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('decks', '0006_auto_20200804_1827'),
]
operations = [
migrations.AlterField(
model_name='deck',
name='publish_status',
field=models.CharField(choices=[('x', 'Private'), ('f', 'Followers Only'), ('o', 'Everyone')], default='x', max_length=1),
),
]
| StarcoderdataPython |
44871 | <filename>crop_video.py<gh_stars>10-100
import cv2
import numpy as np
'''
Loads two videos and generates an interface to crop these to equal length
and being synced in time.
Specify:
path1: path to first video
path2: path to second video
vidname: name of the instance to be created
'''
path1 = "videos_original\out_a_full.mp4"
path2 = "videos_original\out_b_full.mp4"
vidname = 'outside'
cap1 = cv2.VideoCapture(path1)
nbr_frames1 = int(cap1.get(cv2.CAP_PROP_FRAME_COUNT))-1
cap2 = cv2.VideoCapture(path2)
nbr_frames2 = int(cap2.get(cv2.CAP_PROP_FRAME_COUNT))-1
_, f1 = cap1.read()
_, f2 = cap2.read()
height,width,channels = f1.shape
# Find a starting point
while True:
f = np.hstack((f1, f2))
f = cv2.resize(f, (0, 0), fx=0.5, fy=0.5)
cv2.imshow('',f)
k = cv2.waitKey(0) & 0xFF
if k==49: # 1 is pressed
_, f1 = cap1.read()
nbr_frames1 -= 1
elif k==50: # 2 is pressed
_, f2 = cap2.read()
nbr_frames2 -= 1
else:
break
# Create and save two equally long clips
clip1 = cv2.VideoWriter('videos/' + vidname + '1.mp4',cv2.VideoWriter_fourcc(*'mp4v'), 30.0, (width,height))
clip2 = cv2.VideoWriter('videos/' + vidname + '2.mp4',cv2.VideoWriter_fourcc(*'mp4v'), 30.0, (width,height))
for i in range(min([nbr_frames1, nbr_frames2])):
_, f1 = cap1.read()
_, f2 = cap2.read()
clip1.write(f1)
clip2.write(f2)
clip1.release()
clip2.release()
cap1.release()
cap2.release()
cv2.destroyAllWindows()
| StarcoderdataPython |
3222165 | from django import forms
from .models import Image, Profile, Comment
class NewImageForm(forms.ModelForm):
class Meta:
model = Image
exclude = ['user', 'post_date', 'liker', 'profile']
class NewProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = ['user', 'followers', 'following']
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
exclude = ['user', 'image']
| StarcoderdataPython |
4834989 | import warnings
from pathlib import Path
from threading import RLock
from typing import Union
import dask.dataframe as dd
from cachetools import LRUCache, cached
from cbgen import bgen_file, bgen_metafile
from cbgen.typing import Partition
from dask.delayed import delayed
from pandas import DataFrame
from ._environment import BGEN_READER_CACHE_HOME
from ._file import (
assert_file_exist,
assert_file_readable,
is_file_writable,
path_to_filename,
)
def create_metafile(
bgen_filepath: Union[str, Path],
metafile_filepath: Union[str, Path],
verbose: bool = True,
):
"""
Create variants metadata file.
Variants metadata file helps speed up subsequent reads of the associated
bgen file.
Parameters
----------
bgen_filepath : str
Bgen file path.
metafile_file : str
Metafile file path.
verbose : bool
``True`` to show progress; ``False`` otherwise.
Examples
--------
.. doctest::
>>> import os
>>> from bgen_reader import create_metafile, example_filepath
>>>
>>> filepath = example_filepath("example.32bits.bgen")
>>> metafile_filepath = filepath.with_suffix(".metafile")
>>>
>>> try:
... create_metafile(filepath, metafile_filepath, verbose=False)
... finally:
... if metafile_filepath.exists():
... os.remove(metafile_filepath)
"""
bgen_filepath = Path(bgen_filepath)
metafile_filepath = Path(metafile_filepath)
assert_file_exist(bgen_filepath)
assert_file_readable(bgen_filepath)
if metafile_filepath.exists():
raise ValueError(f"File {metafile_filepath} already exists.")
with bgen_file(bgen_filepath) as bgen:
bgen.create_metafile(metafile_filepath, verbose)
_metafile_nowrite_dir = """\
You don't have permission to write `{filepath}`. This might prevent speeding-up the reading process
in future runs.
"""
def infer_metafile_filepath(bgen_filepath: Path, suffix: str = ".metafile") -> Path:
"""
Infer metafile filepath.
The resulting file name will the file name of ``bgen_filepath`` with the appended ``suffix``.
The root directory of the resulting filepath will be the directory of ``bgen_filepath`` if
the user has appropriate permissions. It falls back to the directory
BGEN_READER_CACHE_HOME / "metafile"
if necessary.
"""
metafile = bgen_filepath.with_suffix(bgen_filepath.suffix + suffix)
if metafile.exists():
try:
assert_file_readable(metafile)
return metafile
except RuntimeError as e:
warnings.warn(str(e), UserWarning)
return BGEN_READER_CACHE_HOME / "metafile" / path_to_filename(metafile)
else:
if is_file_writable(metafile):
return metafile
warnings.warn(_metafile_nowrite_dir.format(filepath=metafile), UserWarning)
return BGEN_READER_CACHE_HOME / "metafile" / path_to_filename(metafile)
def create_variants(filepath: Path, nvariants: int, npartitions: int, part_size: int):
dfs = []
index_base = 0
divisions = []
for i in range(npartitions):
divisions.append(index_base)
d = delayed(_read_partition)(filepath, i)
dfs.append(d)
index_base += part_size
divisions.append(nvariants - 1)
meta = [
("id", str),
("rsid", str),
("chrom", str),
("pos", int),
("nalleles", int),
("allele_ids", str),
("vaddr", int),
]
df = dd.from_delayed(dfs, meta=dd.utils.make_meta(meta), divisions=divisions)
return df
cache = LRUCache(maxsize=3)
lock = RLock()
@cached(cache, lock=lock)
def _read_partition(filepath: Path, partition: int) -> DataFrame:
with bgen_metafile(filepath) as mf:
part: Partition = mf.read_partition(partition)
v = part.variants
data = {
"id": v.id.astype(str),
"rsid": v.rsid.astype(str),
"chrom": v.chromosome.astype(str),
"pos": v.position.astype(int),
"nalleles": v.nalleles.astype(int),
"allele_ids": v.allele_ids.astype(str),
"vaddr": v.offset.astype(int),
}
df = DataFrame(data)
return df[["id", "rsid", "chrom", "pos", "nalleles", "allele_ids", "vaddr"]]
| StarcoderdataPython |
4822478 | """
2015-2016 <NAME> <EMAIL>
"""
import random
import numpy as np
import matplotlib.pyplot as plt
from dataset.data_utils import get_cifar10_data
from classifiers import Softmax
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
if __name__ == "__main__":
X_train, y_train, X_val, y_val, X_test, y_test = get_cifar10_data()
results = {}
best_val = -1
best_softmax = None
learning_rates = [1e-7, 5e-7]
regularization_strengths = [5e4, 1e8]
def accuracy(ethalon, pred):
return np.average(ethalon == pred)
################################################################################
# TODO: #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save #
# the best trained softmax classifer in best_softmax. #
################################################################################
"""
best_val = 0.
for rate in np.linspace(learning_rates[0], learning_rates[1], 3):
for reg_str in np.linspace(regularization_strengths[0], regularization_strengths[1], 3):
print('rate: {}, reg: {}'.format(rate, reg_str))
sm = Softmax()
sm.train(X_train, y_train, rate, reg_str)
pred_train, pred_test = None, None
pred_train = sm.predict(X_train)
pred_val = sm.predict(X_val)
if best_val < accuracy(y_val, pred_val):
best_val = accuracy(y_val, pred_val)
results[(rate, reg_str)] = (accuracy(y_train, pred_train), accuracy(y_val, pred_val))
"""
################################################################################
# END OF YOUR CODE #
################################################################################
"""
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print 'lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy)
print 'best validation accuracy achieved during cross-validation: %f' % best_val
"""
rate = 5e-7
reg_str = 5e4
print('rate: {}, reg: {}'.format(rate, reg_str))
sm = Softmax()
sm.train(X_train, y_train, rate, reg_str)
pred_train, pred_test = None, None
pred_train = sm.predict(X_train)
pred_val = sm.predict(X_val)
print('train acc: {}, val acc: {}'.format(accuracy(y_train, pred_train), accuracy(y_val, pred_val))) | StarcoderdataPython |
98054 | <reponame>Snewmy/swordie<gh_stars>1-10
# Knight Stronghold: Secret Grove
# Quest: Rescue Neinhart
from net.swordie.ms.enums import WeatherEffNoticeType
KNIGHT_DISTRICT_4 = 271030400
WATCHMAN = 8610016
ENEMY_SPAWNS = [(635, 208), (159, 208), (59, 208), (-313, 208)]
sm.showWeatherNotice("Defeat all the monsters surrounding Neinheart to rescue him!", WeatherEffNoticeType.SnowySnowAndSprinkledFlowerAndSoapBubbles, 10000)
sm.setInstanceTime(600, KNIGHT_DISTRICT_4, 3)
for coords in ENEMY_SPAWNS:
for z in range(3):
sm.spawnMob(WATCHMAN, coords[0] + z, coords[1], False) # we add z so they dont spawn all clumped together | StarcoderdataPython |
1666137 | #!/usr/bin/python
#-!- coding: utf-8 -!-
""" This is WiFi Car control class. """
import pigpio,thread,time
import RPi.GPIO as GPIO
class WifiCar:
distance=None # The distance measure approximate 10 times every second.
PG=None # pigpio object.
LED_RED_PIN=12
LED_BLUE_PIN=16
LED_GREEN_PIN=20
SERVO_POWER_PIN=13
SERVO_UPDOWN_PIN=5
SERVO_LEFTRIGHT_PIN=6
SERVO_CENTER=(1500,1620) # Left/Right, Up/Down center
SERVO_RANGE=((700,-700),(300,-600)) # servo move range.
L298N_INPUT1_PIN=27
L298N_INPUT2_PIN=22
L298N_INPUT3_PIN=23
L298N_INPUT4_PIN=24
L298N_ENABLEA_PIN=18
L298N_ENABLEB_PIN=25
ULTRASONIC_TRIG_PIN=4
ULTRASONIC_ECHO_PIN=17
def __init__(self, address=None ):
if address:
self.PG=pigpio.pi( address )
else:
self.PG=pigpio.pi()
# set L298N state.
self.PG.set_PWM_dutycycle( self.L298N_ENABLEA_PIN, 0 )
self.PG.set_PWM_frequency( self.L298N_ENABLEA_PIN, 400)
self.PG.set_PWM_range( self.L298N_ENABLEA_PIN, 1000 )
self.PG.set_PWM_dutycycle( self.L298N_ENABLEB_PIN, 0 )
self.PG.set_PWM_frequency( self.L298N_ENABLEB_PIN, 400)
self.PG.set_PWM_range( self.L298N_ENABLEB_PIN, 1000 )
self.forward(0)
# GPIO for ultrasonic distance measure.
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.ULTRASONIC_TRIG_PIN, GPIO.OUT)
GPIO.setup(self.ULTRASONIC_ECHO_PIN, GPIO.IN)
GPIO.output(self.ULTRASONIC_TRIG_PIN, GPIO.HIGH)
# Read distance front of car every 0.1 seconds.
thread.start_new_thread( self.check_distance, () )
daemon_running=True # Mark of thread running.
def __del__(self):
self.close()
def feed_dog(self ,pin ):
'''Use a virtual GPIO pin as dog. '''
self.PG.write( pin,not self.PG.read(pin) )
def check_distance(self ):
run_counter=0
counter_max=5;
while self.daemon_running:
if not self.ultrasonic_lock.locked():
self.ultrasonic_distance()
# flash green led as distance mark. When long than 2 meter, flash very slow. nearer than faster.
if not self.distance or self.distance>150:
counter_max=12
elif self.distance>60:
counter_max=6
elif self.distance>30:
counter_max=4
elif self.distance>15:
counter_max=2
# If car is forwarding. then slow down the car.
# self.slowdown()
else:
counter_max=1
# If car is forwarding. stop the car.
self.stop(forward=True)
if run_counter==0:
self.led('green',True)
else:
self.led('green',False)
run_counter+=1
if run_counter>=counter_max: run_counter=0
time.sleep(0.1)
def reset(self ):
# power on servo and set it to the center.
self.PG.set_servo_pulsewidth( self.SERVO_LEFTRIGHT_PIN, self.SERVO_CENTER[0] )
self.PG.set_servo_pulsewidth( self.SERVO_UPDOWN_PIN, self.SERVO_CENTER[1] )
self.PG.write(self.SERVO_POWER_PIN, 0) # power on servo.
self.forward(0)# stop the car.
self.led('red',False)
self.led('blue',False)
self.led('green',False)
def led(self, color, light=True):
''' color is one of 'red','blue','green' '''
if color and 'red'==color.lower():
self.PG.write( self.LED_RED_PIN, not light )
if color and 'blue'==color.lower():
self.PG.write( self.LED_BLUE_PIN, not light )
if color and 'green'==color.lower():
self.PG.write( self.LED_GREEN_PIN, not light )
def inverse_led(self, color):
''' Change the LED light on or off '''
if color and 'red'==color.lower():
self.PG.write( self.LED_RED_PIN, not self.PG.read( self.LED_RED_PIN ) )
if color and 'blue'==color.lower():
self.PG.write( self.LED_BLUE_PIN, not self.PG.read( self.LED_BLUE_PIN ) )
if color and 'green'==color.lower():
self.PG.write( self.LED_GREEN_PIN, not self.PG.read( self.LED_GREEN_PIN ) )
# A thread lock for single run of ultrasonic.
ultrasonic_lock=thread.allocate_lock()
def ultrasonic_distance(self ):
''' Return distance in centimetre. '''
self.ultrasonic_lock.acquire()
GPIO.output( self.ULTRASONIC_TRIG_PIN, GPIO.LOW ) # High pulse 10uS
time.sleep( 0.00001 )
GPIO.output( self.ULTRASONIC_TRIG_PIN, GPIO.HIGH )
start=time.time();stop=None; i=0 # Wait echo raise up.
while GPIO.input( self.ULTRASONIC_ECHO_PIN )==0 and i<100:start=time.time();i+=1
# wait echo falling down. max 1500 about 6 meter, out of sensor range.
while GPIO.input( self.ULTRASONIC_ECHO_PIN )==1 and i<1500:stop=time.time();i+=1
if stop and i<1500: # Too far
self.distance=(stop-start)*34000/2; # There is not temperature calibration.
else:
self.distance=None
self.ultrasonic_lock.release()
return self.distance
def left_wheel(self, speed):
if speed>1000:speed=1000
if speed<-1000:speed=-1000
if speed==0:
self.PG.write( self.L298N_INPUT3_PIN, 0 )
self.PG.write( self.L298N_INPUT4_PIN, 0 )
self.PG.set_PWM_dutycycle( self.L298N_ENABLEB_PIN, 1000 )
elif speed>0:
self.PG.write( self.L298N_INPUT3_PIN, 0 )
self.PG.write( self.L298N_INPUT4_PIN, 1 )
self.PG.set_PWM_dutycycle( self.L298N_ENABLEB_PIN, abs(speed) )
else:
self.PG.write( self.L298N_INPUT3_PIN, 1 )
self.PG.write( self.L298N_INPUT4_PIN, 0 )
self.PG.set_PWM_dutycycle( self.L298N_ENABLEB_PIN, abs(speed) )
def right_wheel(self, speed):
if speed>1000:speed=1000
if speed<-1000:speed=-1000
if speed==0:
self.PG.write( self.L298N_INPUT1_PIN, 0 )
self.PG.write( self.L298N_INPUT2_PIN, 0 )
self.PG.set_PWM_dutycycle( self.L298N_ENABLEA_PIN, 1000 )
elif speed>0:
self.PG.write( self.L298N_INPUT1_PIN, 0 )
self.PG.write( self.L298N_INPUT2_PIN, 1 )
self.PG.set_PWM_dutycycle( self.L298N_ENABLEA_PIN, abs(speed) )
else:
self.PG.write( self.L298N_INPUT1_PIN, 1 )
self.PG.write( self.L298N_INPUT2_PIN, 0 )
self.PG.set_PWM_dutycycle( self.L298N_ENABLEA_PIN, abs(speed) )
def is_forwarding(self ):
l=self.PG.get_PWM_dutycycle( self.L298N_ENABLEA_PIN )
r=self.PG.get_PWM_dutycycle( self.L298N_ENABLEB_PIN )
if self.PG.read( self.L298N_INPUT3_PIN )==1 and self.PG.read( self.L298N_INPUT4_PIN )==0:
l=-l # left move backward
if self.PG.read( self.L298N_INPUT1_PIN )==1 and self.PG.read( self.L298N_INPUT1_PIN )==0:
r=-r # right move backward
return l+r>0
def slowdown(self ):
du=self.PG.get_PWM_dutycycle( self.L298N_ENABLEB_PIN )
self.PG.set_PWM_dutycycle( self.L298N_ENABLEB_PIN, du*0.5 )
def forward(self, speed=350, direction=0 ): # from -1000~1000. 0 mean stop.
''' Move car forward. '''
if speed<=0 or self.distance>30:
self.left_wheel ( speed+direction )
self.right_wheel( speed-direction )
else: # Before hit something. slow down or stop.
if self.distance<15:
speed=0
self.stop()
elif self.distance<30:
speed*=(self.distance-15)/15
if speed<0:speed=0
self.left_wheel ( speed+direction )
self.right_wheel( speed-direction )
def stop(self, forward=False ):
if forward:
if self.is_forwarding():
self.stop()
else:
self.left_wheel(0)
self.right_wheel(0)
def backward(self, speed=350 ): # from -1000~1000
self.forward(-speed)
def rotate(self, degree ):
""" Rotate the car in degree """
pass
def servo_horizontal(self, hor ):
''' hor from -1000 to 1000 '''
if hor<-1000: hor=-1000
if hor> 1000: hor= 1000
if hor==0:
self.PG.set_servo_pulsewidth( self.SERVO_LEFTRIGHT_PIN, self.SERVO_CENTER[0] )
elif hor>0:
self.PG.set_servo_pulsewidth( self.SERVO_LEFTRIGHT_PIN, self.SERVO_CENTER[0]+int(self.SERVO_RANGE[0][1]*float(hor)/1000) )
else:
self.PG.set_servo_pulsewidth( self.SERVO_LEFTRIGHT_PIN, self.SERVO_CENTER[0]-int(self.SERVO_RANGE[0][0]*float(hor)/1000) )
def servo_vertical(self, ver ):
if ver<-1000: ver=-1000
if ver> 1000: ver= 1000
if ver==0:
self.PG.set_servo_pulsewidth( self.SERVO_UPDOWN_PIN, self.SERVO_CENTER[1] )
elif ver>0:
self.PG.set_servo_pulsewidth( self.SERVO_UPDOWN_PIN, self.SERVO_CENTER[1]+int(self.SERVO_RANGE[1][1]*float(ver)/1000) )
else:
self.PG.set_servo_pulsewidth( self.SERVO_UPDOWN_PIN, self.SERVO_CENTER[1]-int(self.SERVO_RANGE[1][0]*float(ver)/1000) )
gpio_cleaned=False
def close(self ):
self.daemon_running=False
if not self.gpio_cleaned:
self.gpio_cleaned=True
self.ultrasonic_lock.acquire()
GPIO.cleanup()
self.ultrasonic_lock.release()
| StarcoderdataPython |
150203 | <gh_stars>0
# Copyright 2012-2015 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import socket
from requestbuilder import Arg, MutuallyExclusiveArgList
from requestbuilder.exceptions import ArgumentError
from euca2ools.commands.ec2 import EC2Request, parse_ports
class _ModifySecurityGroupRule(EC2Request):
"""
The basis for security group-editing commands
"""
ARGS = [Arg('group', metavar='GROUP', route_to=None,
help='name or ID of the security group to modify (required)'),
Arg('--egress', action='store_true', route_to=None,
help='''[VPC only] manage an egress rule, which controls
traffic leaving the group'''),
Arg('-P', '--protocol', dest='IpPermissions.1.IpProtocol',
metavar='PROTOCOL', default='tcp',
help='''the protocol to affect (Non-VPC: tcp, udp, icmp)
(VPC only: tcp, udp, icmp, -1/all, other protocol numbers)
(default: tcp)'''),
Arg('-p', '--port-range', dest='port_range', metavar='RANGE',
route_to=None, help='''range of ports (specified as "from-to")
or a single port number (required for tcp and udp)'''),
Arg('-t', '--icmp-type-code', dest='icmp_type_code',
metavar='TYPE:CODE', route_to=None, help='''ICMP type and
code (specified as "type:code") (required for icmp)'''),
MutuallyExclusiveArgList(
Arg('-s', '--cidr', metavar='CIDR',
dest='IpPermissions.1.IpRanges.1.CidrIp',
help='''IP range (default: 0.0.0.0/0)'''),
# ^ default is added by main()
Arg('-o', dest='target_group', metavar='GROUP', route_to=None,
help='''[Non-VPC only] name of a security group with which
to affect network communication''')),
Arg('-u', metavar='ACCOUNT',
dest='IpPermissions.1.Groups.1.UserId',
help='''ID of the account that owns the security group
specified with -o''')]
def process_cli_args(self):
self.process_port_cli_args()
# noinspection PyExceptionInherit
def configure(self):
EC2Request.configure(self)
if (self.args['group'].startswith('sg-') and
len(self.args['group']) == 11):
# The check could probably be a little better, but meh. Fix if
# needed.
self.params['GroupId'] = self.args['group']
else:
if self.args['egress']:
raise ArgumentError('egress rules must use group IDs, not '
'names')
self.params['GroupName'] = self.args['group']
target_group = self.args.get('target_group')
if target_group is not None:
if target_group.startswith('sg-') and len(target_group) == 11:
# Same note as above
self.params['IpPermissions.1.Groups.1.GroupId'] = target_group
else:
if self.args['egress']:
raise ArgumentError('argument -o: egress rules must use '
'group IDs, not names')
self.params['IpPermissions.1.Groups.1.GroupName'] = target_group
protocol = self.args.get('IpPermissions.1.IpProtocol')
if str(protocol).lower() in ('icmp', 'tcp', 'udp', '1', '6', '17'):
from_port, to_port = parse_ports(
protocol, self.args.get('port_range'),
self.args.get('icmp_type_code'))
self.params['IpPermissions.1.FromPort'] = from_port
self.params['IpPermissions.1.ToPort'] = to_port
elif str(protocol).lower() in ('all', '-1'):
self.params['IpPermissions.1.IpProtocol'] = -1
elif not str(protocol).isdigit():
try:
self.params['IpPermissions.1.IpProtocol'] = \
socket.getprotobyname(protocol)
except socket.error:
raise ArgumentError('argument -P: no such protocol: {0}'
.format(protocol))
if (not self.args.get('IpPermissions.1.IpRanges.1.GroupName') and
not self.args.get('IpPermissions.1.IpRanges.1.CidrIp')):
# Default rule target is the entire Internet
self.params['IpPermissions.1.IpRanges.1.CidrIp'] = '0.0.0.0/0'
if (self.params.get('IpPermissions.1.Groups.1.GroupName') and
not self.args.get('IpPermissions.1.Groups.1.UserId')):
raise ArgumentError('argument -u is required when -o names a '
'security group by name')
def print_result(self, _):
print self.tabify(['GROUP', self.args.get('group')])
perm_str = ['PERMISSION', self.args.get('group'), 'ALLOWS',
self.params.get('IpPermissions.1.IpProtocol'),
self.params.get('IpPermissions.1.FromPort'),
self.params.get('IpPermissions.1.ToPort')]
if self.params.get('IpPermissions.1.Groups.1.UserId'):
perm_str.append('USER')
perm_str.append(self.params.get('IpPermissions.1.Groups.1.UserId'))
if self.params.get('IpPermissions.1.Groups.1.GroupName'):
perm_str.append('NAME')
perm_str.append(self.params.get(
'IpPermissions.1.Groups.1.GroupName'))
if self.params.get('IpPermissions.1.Groups.1.GroupId'):
perm_str.append('ID')
perm_str.append(self.params.get(
'IpPermissions.1.Groups.1.GroupId'))
if self.params.get('IpPermissions.1.IpRanges.1.CidrIp'):
perm_str.extend(['FROM', 'CIDR'])
perm_str.append(self.params.get(
'IpPermissions.1.IpRanges.1.CidrIp'))
print self.tabify(perm_str)
class AuthorizeSecurityGroupRule(_ModifySecurityGroupRule):
DESCRIPTION = 'Add a rule to a security group that allows traffic to pass'
@property
def action(self):
if self.args['egress']:
return 'AuthorizeSecurityGroupEgress'
else:
return 'AuthorizeSecurityGroupIngress'
class RevokeSecurityGroupRule(_ModifySecurityGroupRule):
DESCRIPTION = 'Remove a rule from a security group'
@property
def action(self):
if self.args['egress']:
return 'RevokeSecurityGroupEgress'
else:
return 'RevokeSecurityGroupIngress'
| StarcoderdataPython |
29383 | import pytest
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
@pytest.mark.parametrize('name', [
('nodejs'),
])
def test_packages_are_installed(host, name):
package = host.package(name)
assert package.is_installed
@pytest.mark.parametrize('path,user,group', [
('/usr/bin/node', 'root', 'root'),
('/usr/bin/ncu', 'root', 'root'),
('/usr/bin/yarn', 'root', 'root'),
])
def test_binaries_are_installed(host, path, user, group):
binary = host.file(path)
assert binary.exists
assert binary.is_file
assert binary.user == user
assert binary.group == group
| StarcoderdataPython |
4832298 | from tornado_jinja2 import Jinja2Loader
import unittest
import jinja2
class LoaderTest(unittest.TestCase):
templates_path = 'test/templates/'
def setUp(self):
self.jinja2_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(self.templates_path))
self.loader = Jinja2Loader(self.jinja2_env)
self.template_obj = self.loader.load('page.html')
def test_load_template(self):
self.assertIsInstance(self.template_obj, jinja2.Template)
def test_generete_html(self):
html_code = self.template_obj.generate(name='hi')
self.assertIn('hi', html_code)
def test_get_jinja2_environment(self):
self.assertIs(self.loader.jinja2_environment, self.loader._jinja2_env)
self.assertIs(self.loader.jinja2_environment, self.template_obj.environment)
def test_set_jinja2_environment(self):
env = jinja2.Environment()
env.loader = jinja2.FileSystemLoader(self.templates_path)
self.loader.jinja2_environment = env
template_obj2 = self.loader.load('page.html')
self.assertIs(template_obj2.environment, env)
def test_cached_jinja2_environment(self):
template_obj2 = self.loader.load('page.html')
self.assertIs(self.template_obj.environment, template_obj2.environment)
def test_check_jinja2_environment(self):
self.loader.jinja2_environment = None
self.assertRaises(TypeError, self.loader.load, 'page.html')
class LoaderLegacyTest(LoaderTest):
def setUp(self):
self.loader = Jinja2Loader(self.templates_path)
self.template_obj = self.loader.load('page.html')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
156588 | <gh_stars>1-10
from torch.utils.model_zoo import load_url
from .model import Model
model_urls = {
"dfl": "https://github.com/zheniu/stochastic-cslr-ckpt/raw/main/dfl.pth",
"sfl": "https://github.com/zheniu/stochastic-cslr-ckpt/raw/main/sfl.pth",
}
def load_model(use_sfl=True, pretrained=True):
model = Model(
vocab_size=1232,
dim=512,
max_num_states=5 if use_sfl else 2,
use_sfl=use_sfl,
)
if pretrained:
model.load_state_dict(load_url(model_urls["sfl" if use_sfl else "dfl"]))
return model
| StarcoderdataPython |
1635923 | <gh_stars>1-10
"""FezHat 1.1 tools."""
import smbus
from RPi import GPIO
class Pins(object):
"""Store the address of the sensors."""
SWITCH_LEFT = 18
SWITCH_RIGHT = 22
ANALOG_1 = 1
ANALOG_2 = 2
ANALOG_3 = 3
ANALOG_6 = 6
ANALOG_7 = 7
LED = 24
class Fezhat(object):
"""Access to most sensors on the Fez Hat board."""
def __init__(self, bus_id, address):
"""Access the bus on the board."""
self._bus_id = bus_id
self._address = address
self._bus = smbus.SMBus(bus_id)
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(Pins.LED, GPIO.OUT)
GPIO.setup(Pins.SWITCH_LEFT, GPIO.IN)
GPIO.setup(Pins.SWITCH_RIGHT, GPIO.IN)
def read(self, channel):
"""Read data on the channel."""
chn_adr = 0b10000100 | ((channel / 2) if (channel % 2 == 0) else ((channel / 2) + 4)) << 4
self._bus.write_byte(self._address, chn_adr)
return self._bus.read_byte(self._address)
@property
def temperature(self):
"""Get temperature in Celsius."""
return (((3300 / 255) * self.read(4)) - 400) / 19.5
@property
def light(self):
"""Get light sensor value 0.0-1.0."""
return self.read(5) / 255.0
@property
def buttons(self):
"""Check which button is pressed."""
switch_0 = GPIO.input(Pins.SWITCH_LEFT) == GPIO.LOW
switch_1 = GPIO.input(Pins.SWITCH_RIGHT) == GPIO.LOW
if switch_0 and switch_1:
return 3
elif switch_0:
return 1
elif switch_1:
return 2
elif not switch_0 and not switch_1:
return 0
else:
raise Exception('Something went wrong with the buttons.')
def led(self, state):
"""Change the state of the LED."""
if not state in (0, 1):
raise Exception('LED can only be set to 1 or 0.')
if state == 1:
GPIO.output(Pins.LED, GPIO.HIGH)
else:
GPIO.output(Pins.LED, GPIO.LOW)
| StarcoderdataPython |
1780409 | <gh_stars>0
#!/usr/bin/env python
import os, base64, tempfile, io
from os import path
from setuptools import setup, Command
from distutils.command.build_scripts import build_scripts
from setuptools.dist import Distribution as _Distribution
LONG="""
Versioneer is a tool to automatically update version strings (in setup.py and
the conventional 'from PROJECT import _version' pattern) by asking your
version-control system about the current tree.
"""
# as nice as it'd be to versioneer ourselves, that sounds messy.
VERSION = "0.16+dev"
def ver(s):
return s.replace("@VERSIONEER-VERSION@", VERSION)
def get(fn, add_ver=False, unquote=False, do_strip=False, do_readme=False):
with open(fn) as f:
text = f.read()
# If we're in Python <3 and have a separate Unicode type, we would've
# read a non-unicode string. Else, all strings will be unicode strings.
try:
__builtins__.unicode
except AttributeError:
pass
else:
text = text.decode('ASCII')
if add_ver:
text = ver(text)
if unquote:
text = text.replace("%", "%%")
if do_strip:
lines = [line for line in text.split("\n")
if not line.endswith("# --STRIP DURING BUILD")]
text = "\n".join(lines)
if do_readme:
text = text.replace("@README@", get("README.md"))
return text
def u(s): # so u("foo") yields unicode on all of py2.6/py2.7/py3.2/py3.3
return s.encode("ascii").decode("ascii")
def get_vcs_list():
project_path = path.join(path.abspath(path.dirname(__file__)), 'src')
return [filename
for filename
in os.listdir(project_path)
if path.isdir(path.join(project_path, filename))]
def generate_long_version_py(VCS):
s = io.StringIO()
s.write(get("src/%s/long_header.py" % VCS, add_ver=True, do_strip=True))
for piece in ["src/subprocess_helper.py",
"src/from_parentdir.py",
"src/%s/from_keywords.py" % VCS,
"src/%s/from_vcs.py" % VCS,
"src/render.py",
"src/%s/long_get_versions.py" % VCS]:
s.write(get(piece, unquote=True, do_strip=True))
return s.getvalue()
def generate_versioneer_py():
s = io.StringIO()
s.write(get("src/header.py", add_ver=True, do_readme=True))
s.write(get("src/subprocess_helper.py", do_strip=True))
for VCS in get_vcs_list():
s.write(u("LONG_VERSION_PY['%s'] = '''\n" % VCS))
s.write(generate_long_version_py(VCS))
s.write(u("'''\n"))
s.write(get("src/%s/from_keywords.py" % VCS, do_strip=True))
s.write(get("src/%s/from_vcs.py" % VCS, do_strip=True))
s.write(get("src/%s/install.py" % VCS, do_strip=True))
s.write(get("src/from_parentdir.py", do_strip=True))
s.write(get("src/from_file.py", add_ver=True, do_strip=True))
s.write(get("src/render.py", do_strip=True))
s.write(get("src/get_versions.py", do_strip=True))
s.write(get("src/cmdclass.py", do_strip=True))
s.write(get("src/setupfunc.py", do_strip=True))
return s.getvalue().encode("utf-8")
class make_versioneer(Command):
description = "create standalone versioneer.py"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
with open("versioneer.py", "w") as f:
f.write(generate_versioneer_py().decode("utf8"))
return 0
class make_long_version_py_git(Command):
description = "create standalone _version.py (for git)"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
assert os.path.exists("versioneer.py")
long_version = generate_long_version_py("git")
with open("git_version.py", "w") as f:
f.write(long_version %
{"DOLLAR": "$",
"STYLE": "pep440",
"TAG_PREFIX": "tag-",
"PARENTDIR_PREFIX": "parentdir_prefix",
"VERSIONFILE_SOURCE": "versionfile_source",
})
return 0
class my_build_scripts(build_scripts):
def run(self):
v = generate_versioneer_py()
v_b64 = base64.b64encode(v).decode("ascii")
lines = [v_b64[i:i+60] for i in range(0, len(v_b64), 60)]
v_b64 = "\n".join(lines)+"\n"
with open("src/installer.py") as f:
s = f.read()
s = ver(s.replace("@VERSIONEER-INSTALLER@", v_b64))
tempdir = tempfile.mkdtemp()
installer = os.path.join(tempdir, "versioneer")
with open(installer, "w") as f:
f.write(s)
self.scripts = [installer]
rc = build_scripts.run(self)
os.unlink(installer)
os.rmdir(tempdir)
return rc
# python's distutils treats module-less packages as binary-specific (not
# "pure"), so "setup.py bdist_wheel" creates binary-specific wheels. Override
# this so we get cross-platform wheels instead. More info at:
# https://bitbucket.org/pypa/wheel/issue/116/packages-with-only-filesdata_files-get
class Distribution(_Distribution):
def is_pure(self): return True
setup(
name = "versioneer",
license = "public domain",
version = VERSION,
description = "Easy VCS-based management of project version strings",
author = "<NAME>",
author_email = "<EMAIL>",
url = "https://github.com/warner/python-versioneer",
# "fake" is replaced with versioneer-installer in build_scripts. We need
# a non-empty list to provoke "setup.py build" into making scripts,
# otherwise it skips that step.
scripts = ["fake"],
long_description = LONG,
distclass=Distribution,
cmdclass = { "build_scripts": my_build_scripts,
"make_versioneer": make_versioneer,
"make_long_version_py_git": make_long_version_py_git,
},
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
],
)
| StarcoderdataPython |
3227519 | import sys
from pyautocad import Autocad
from win32com import client
import math
import os
import psutil
# filepath = "E:\work\软件\软件\图档\8GBY112_C.dwg"
filepath = sys.argv[1]
def getAllPid():
pid_dict={}
pids = psutil.pids()
for pid in pids:
p = psutil.Process(pid)
pid_dict[pid]=p.name()
return pid_dict
# def kill(pid):
# try:
# kill_pid = os.kill(pid, signal.SIGABRT)
# print '已杀死pid为%s的进程, 返回值是:%s' % (pid, kill_pid)
# except Exception as e:
# print '没有如此进程!!!'
if __name__ == "__main__":
print (sys.argv)
f = open("E:\\aa.txt", mode = 'a+')
f.write(str(sys.argv))
# wincad = client.Dispatch("AutoCAD.Application")
cadWorker = Autocad(create_if_not_exists=True)
print(filepath)
cadWorker.Application.Documents.Open(filepath)
# cadWorker.ActiveDocument.SaveAs("E:\work\软件\软件\图档\8GBY112_d.dwg", 60)
# dic = getAllPid()
# os.system("taskkill /F /IM acad.exe")
| StarcoderdataPython |
3262063 | <gh_stars>0
class Equipment:
ID = 0
def __init__(self, name):
self.name = name
Equipment.ID += 1
self.id = Equipment.ID
def __repr__(self):
return f"Equipment <{self.id}> {self.name}"
@staticmethod
def get_next_id():
return Equipment.ID + 1
| StarcoderdataPython |
1656168 | import struct
class APP_ID:
APP_CS_LOGIN = 0xB0
APP_SC_LOGIN_OK = 0xA0
APP_SC_LOGIN_NOK = 0xA1
app_cs_structs = {
APP_ID.APP_CS_LOGIN : 'sBs', # username, hashed?, password
}
app_sc_structs = {
APP_ID.APP_SC_LOGIN_OK : '', # token
APP_ID.APP_SC_LOGIN_NOK : '',
}
class ID:
#generic constants
HON_CHAT_PORT = 11031
HON_PROTOCOL_VERSION = 64 # 0x3F (WAS 63 BEFORE)
HON_STATUS_OFFLINE = 0
HON_STATUS_ONLINE = 3
HON_STATUS_INLOBBY = 4
HON_STATUS_INGAME = 5
HON_FLAGS_PREPURCHASED =0x40
HON_FLAGS_CHAT_NONE =0x00
HON_FLAGS_CHAT_OFFICER =0x01
HON_FLAGS_CHAT_LEADER =0x02
HON_FLAGS_CHAT_ADMINISTRATOR =0x03
HON_FLAGS_CHAT_STAFF =0x04
#- Client -> Server
HON_CS_PONG = 0x2A01
HON_CS_CHANNEL_MSG = 0x03
HON_CS_WHISPER = 0x08
HON_CS_AUTH_INFO = 0x0C00
HON_CS_BUDDY_ADD_NOTIFY = 0x0D
HON_CS_JOIN_GAME = 0x10
HON_CS_CLAN_MESSAGE = 0x13
HON_CS_PM = 0x1C
HON_CS_JOIN_CHANNEL = 0x1E
HON_CS_WHISPER_BUDDIES = 0x20
HON_CS_LEAVE_CHANNEL = 0x22
HON_CS_USER_INFO = 0x2A
HON_CS_UPDATE_TOPIC = 0x30
HON_CS_CHANNEL_KICK = 0x31
HON_CS_CHANNEL_BAN = 0x32
HON_CS_CHANNEL_UNBAN = 0x33
HON_CS_CHANNEL_SILENCE_USER = 0x38
HON_CS_CHANNEL_PROMOTE = 0x3A
HON_CS_CHANNEL_DEMOTE = 0x3B
HON_CS_CHANNEL_AUTH_ENABLE = 0x3E
HON_CS_CHANNEL_AUTH_DISABLE = 0x3F
HON_CS_CHANNEL_AUTH_ADD = 0x40
HON_CS_CHANNEL_AUTH_DELETE = 0x41
HON_CS_CHANNEL_AUTH_LIST = 0x42
HON_CS_CHANNEL_SET_PASSWORD = <PASSWORD>
HON_CS_JOIN_CHANNEL_PASSWORD = <PASSWORD>
HON_CS_CLAN_ADD_MEMBER = 0x47
HON_CS_CLAN_REMOVE_MEMBER = 0x17
HON_CS_CHANNEL_EMOTE = 0x65
HON_CS_CHANNEL_ROLL = 0x64
HON_CS_BUDDY_ACCEPT = 0xB3
HON_CS_START_MM_GROUP = 0x0C0A
HON_CS_INVITE_TO_MM = 0x0C0D
HON_CS_KICK_FROM_MM = 0x0D00
HON_CS_GLOBAL_MESSAGE = 0x39
#- Server -> Client
HON_SC_AUTH_ACCEPTED = 0x1c00
HON_SC_PING = 0x2A00
HON_SC_CHANNEL_MSG = 0x03
HON_SC_CHANGED_CHANNEL = 0x04
HON_SC_JOINED_CHANNEL = 0x05
HON_SC_LEFT_CHANNEL = 0x06
HON_SC_WHISPER = 0x08
HON_SC_WHISPER_FAILED = 0x09
HON_SC_INITIAL_STATUS = 0x0B
HON_SC_UPDATE_STATUS = 0xC
HON_SC_CLAN_MESSAGE = 0x13
HON_SC_LOOKING_FOR_CLAN = 0x18
HON_SC_PM = 0x1C
HON_SC_PM_FAILED = 0x1D
HON_SC_WHISPER_BUDDIES = 0x20
HON_SC_MAX_CHANNELS = 0x21
HON_SC_USER_INFO_NO_EXIST = 0x2B
HON_SC_USER_INFO_OFFLINE = 0x2C
HON_SC_USER_INFO_ONLINE = 0x2D
HON_SC_USER_INFO_IN_GAME = 0x2E
HON_SC_CHANNEL_UPDATE = 0x2F
HON_SC_UPDATE_TOPIC = 0x30
HON_SC_CHANNEL_KICK = 0x31
HON_SC_CHANNEL_BAN = 0x32
HON_SC_CHANNEL_UNBAN = 0x33
HON_SC_CHANNEL_BANNED = 0x34
HON_SC_CHANNEL_SILENCED = 0x35
HON_SC_CHANNEL_SILENCE_LIFTED = 0x36
HON_SC_CHANNEL_SILENCE_PLACED = 0x37
HON_SC_GLOBAL_MESSAGE = 0x39
HON_SC_CHANNEL_PROMOTE = 0x3A
HON_SC_CHANNEL_DEMOTE = 0x3B
HON_SC_CHANNEL_AUTH_ENABLE = 0x3E
HON_SC_CHANNEL_AUTH_DISABLE = 0x3F
HON_SC_CHANNEL_AUTH_ADD = 0x40
HON_SC_CHANNEL_AUTH_DELETE = 0x41
HON_SC_CHANNEL_AUTH_LIST = 0x42
HON_SC_CHANNEL_PASSWORD_CHANGED = 0x43
HON_SC_CHANNEL_ADD_AUTH_FAIL = 0x44
HON_SC_CHANNEL_DEL_AUTH_FAIL = 0x45
HON_SC_JOIN_CHANNEL_PASSWORD = 0x46
HON_SC_CLAN_MEMBER_ADDED = 0x4E #not sure about that
HON_SC_CLAN_MEMBER_CHANGE = 0x50
HON_SC_NAME_CHANGE = 0x5A
HON_SC_CHANNEL_EMOTE = 0x65
HON_SC_CHANNEL_ROLL = 0x64
HON_SC_TOTAL_ONLINE = 0x68
HON_SC_REQUEST_NOTIFICATION = 0xB2
HON_SC_NOTIFICATION = 0xB4
HON_SC_TMM_GROUP_JOIN = 0xC0E
HON_SC_TMM_GROUP_CHANGE = 0xD03
FILTER=''.join([(len(repr(chr(x)))==3) and chr(x) or '.' for x in range(256)])
def dump(src, length=16):
N=0; result=''
while src:
s,src = src[:length],src[length:]
hexa = ' '.join(["%02X"%ord(x) for x in s])
s = s.translate(FILTER)
result += "%04X %-*s %s\n" % (N, length*3, hexa, s)
N+=length
return result
chat_packets = [ID.HON_SC_PM,ID.HON_SC_WHISPER,ID.HON_SC_CHANNEL_MSG,ID.HON_SC_CHANNEL_ROLL,ID.HON_SC_CHANNEL_EMOTE]
cs_structs = {
ID.HON_CS_AUTH_INFO : 'IsssIIsBBBBBss',
ID.HON_CS_PONG : '',
ID.HON_CS_JOIN_CHANNEL : 's',
ID.HON_CS_PM : 'ss',
ID.HON_CS_WHISPER : 'ss',
ID.HON_CS_CHANNEL_MSG : 'sI',
ID.HON_CS_CHANNEL_EMOTE : 'sI',
ID.HON_CS_CHANNEL_ROLL : 'sI',
ID.HON_CS_JOIN_CHANNEL : 's',
ID.HON_CS_LEAVE_CHANNEL : 's',
ID.HON_CS_USER_INFO : 's',
ID.HON_CS_START_MM_GROUP : 'sHsssH',
ID.HON_CS_INVITE_TO_MM : 's',
ID.HON_CS_CHANNEL_KICK : 'II',
ID.HON_CS_CHANNEL_BAN : 'Is',
ID.HON_CS_CHANNEL_UNBAN : 'Is',
ID.HON_CS_CHANNEL_SILENCE_USER : 'IsI',
ID.HON_CS_UPDATE_TOPIC : 'Is',
ID.HON_CS_CHANNEL_AUTH_ENABLE : 'I',
ID.HON_CS_CHANNEL_AUTH_DISABLE : 'I',
ID.HON_CS_CHANNEL_AUTH_ADD : 'Is',
ID.HON_CS_CHANNEL_AUTH_DELETE : 'Is',
ID.HON_CS_CHANNEL_PROMOTE : 'II',
ID.HON_CS_CHANNEL_DEMOTE : 'II',
ID.HON_CS_CLAN_ADD_MEMBER : 's',
ID.HON_CS_CLAN_MESSAGE : 's',
ID.HON_CS_GLOBAL_MESSAGE : 's',
ID.HON_CS_CLAN_REMOVE_MEMBER : 'I',
ID.HON_CS_KICK_FROM_MM : 'B',
}
sc_structs = {
ID.HON_SC_PING : '',
ID.HON_SC_PM : None, # parse_pm,
ID.HON_SC_WHISPER : 'ss',
ID.HON_SC_CHANNEL_MSG : 'IIs',
ID.HON_SC_CHANNEL_EMOTE : 'IIs',
ID.HON_SC_CHANNEL_ROLL : 'IIs',
ID.HON_SC_CHANGED_CHANNEL : None, # parse_channel_join,
ID.HON_SC_INITIAL_STATUS : None, # parse_initiall_statuses,
ID.HON_SC_UPDATE_STATUS : None, # parse_user_status,
ID.HON_SC_JOINED_CHANNEL : 'IsIBBsss', #chat_id,nick,id,status,flags,chatsymbol,shield,icon
ID.HON_SC_CLAN_MEMBER_ADDED : 'I',
ID.HON_SC_CLAN_MEMBER_CHANGE : 'IBI', #whom,wat,who (theli, promoted to officer, by visions)
ID.HON_SC_NAME_CHANGE : 'Is',
ID.HON_SC_CLAN_MESSAGE : 'Is',
ID.HON_SC_LEFT_CHANNEL : 'II',
ID.HON_SC_TOTAL_ONLINE : 'Is',
ID.HON_SC_USER_INFO_NO_EXIST : 's',
ID.HON_SC_USER_INFO_OFFLINE : 'ss',
ID.HON_SC_USER_INFO_IN_GAME : 'sss',
ID.HON_SC_CHANNEL_PROMOTE : 'III',
ID.HON_SC_CHANNEL_DEMOTE : 'III'
}
def recv_packet(sock):
print "RECV"
length = sock.recv(2);
length = struct.unpack('H', length)[0]
packet = sock.recv(length)
print dump(packet)
return packet
def app_send_client(sock, *args):
packet = app_pack_client(*args)
print "SEND"
sock.sendall(struct.pack('<H', len(packet)))
sock.sendall(packet)
print dump(packet)
def app_send_srv(sock, *args):
packet = app_pack_srv(*args)
print "SEND"
sock.sendall(struct.pack('<H', len(packet)))
sock.sendall(packet)
print dump(packet)
def app_pack_srv(packet_id, *args):
return pack_common(app_sc_structs, packet_id, *args)
def app_pack_client(packet_id, *args):
return pack_common(app_cs_structs, packet_id, *args)
def pack_common(s, packet_id, *args):
args = list(args)
fmt = list(s[packet_id])
for i, f in enumerate(fmt):
if f == 's':
#print (args[i].__class__.__name__)
if isinstance(args[i], unicode):
args[i] = args[i].encode('utf-8')
fmt[i] = '{0}s'.format(1 + len(args[i]))
fmt = ''.join(fmt)
packet = struct.pack('<H' + fmt, packet_id, *args)
return packet
def pack(packet_id, *args):
args = list(args)
fmt = list(cs_structs[packet_id])
for i, f in enumerate(fmt):
if f == 's':
#print (args[i].__class__.__name__)
if isinstance(args[i], unicode):
args[i] = args[i].encode('utf-8')
fmt[i] = '{0}s'.format(1 + len(args[i]))
fmt = ''.join(fmt)
packet = struct.pack('<H' + fmt, packet_id, *args)
return packet
def parse_part(data,fmt):
res = []
for f in fmt:
if f == 's':
i = data.index('\0')
res.append(data[:i].decode("utf-8"))
#print res
data = data[i+1:]
else:
f = '<' + f
i = struct.calcsize(f)
res.append(struct.unpack(f,data[:i])[0])
data = data[i:]
return res,data
def parse_packet(data):
packet_id,data = parse_part(data,'H')
packet_id = packet_id[0]
origin = [packet_id,None,None]
if packet_id in sc_structs:
if hasattr(sc_structs[packet_id],'__call__'):
return sc_structs[packet_id](packet_id,data)
fmt = list(sc_structs[packet_id])
res,data = parse_part(data,fmt)
data = res
if packet_id in chat_packets:
origin[1] = data[0]
if packet_id in [ID.HON_SC_CHANNEL_MSG,ID.HON_SC_CHANNEL_EMOTE,ID.HON_SC_CHANNEL_ROLL]:
origin[2] = data[1]
data = data[2]
else:
data = data[1]
#else:
#print 'unknown packet'
#print(origin)
#print(dump(data))
#except:pass
return origin,data
| StarcoderdataPython |
3380428 | <gh_stars>10-100
from signalflowgrapher.commands.command_handler import Command
from signalflowgrapher.model.model import ObservableGraph, CurvedBranch
class ChangeBranchWeightCommand(Command):
def __init__(self,
branch: CurvedBranch,
weight: str,
graph: ObservableGraph):
self.__branch = branch
self.__old_weight = branch.weight
self.__new_weight = weight
self.__graph = graph
def undo(self):
self.__graph.set_branch_weight(self.__branch, self.__old_weight)
def redo(self):
self.__graph.set_branch_weight(self.__branch, self.__new_weight)
| StarcoderdataPython |
193709 | <gh_stars>1-10
import unittest
import mock
import requests
import responses
from kerlescan import inventory_service_interface
from kerlescan.exceptions import ItemNotReturned, ServiceError
from drift import app
from . import fixtures
class InventoryServiceTests(unittest.TestCase):
def setUp(self):
test_connexion_app = app.create_app()
test_flask_app = test_connexion_app.app
self.client = test_flask_app.test_client()
self.mock_logger = mock.Mock()
self.mock_counters = {
"systems_compared_no_sysprofile": mock.MagicMock(),
"inventory_service_requests": mock.MagicMock(),
"inventory_service_exceptions": mock.MagicMock(),
}
def _create_response_for_systems(self, service_hostname, system_uuids):
url_template = "http://%s/api/inventory/v1/hosts/%s"
responses.add(
responses.GET,
url_template % (service_hostname, system_uuids),
body=fixtures.FETCH_SYSTEMS_INV_SVC,
status=requests.codes.ok,
content_type="application/json",
)
def _create_response_for_system_tags(self, service_hostname, system_uuids):
url_template = "http://%s/api/inventory/v1/hosts/%s/tags"
responses.add(
responses.GET,
url_template % (service_hostname, system_uuids),
body=fixtures.FETCH_SYSTEM_TAGS,
status=requests.codes.ok,
content_type="application/json",
)
def _create_response_for_system_profiles(self, service_hostname, system_uuids):
url_template = "http://%s/api/inventory/v1/hosts/%s/system_profile"
responses.add(
responses.GET,
url_template % (service_hostname, system_uuids),
body=fixtures.FETCH_SYSTEM_PROFILES_INV_SVC,
status=requests.codes.ok,
content_type="application/json",
)
def _create_500_response_for_systems(self, service_hostname, system_uuids):
url_template = "http://%s/api/inventory/v1/hosts/%s"
responses.add(
responses.GET,
url_template % (service_hostname, system_uuids),
body="I am error",
status=requests.codes.INTERNAL_SERVER_ERROR,
content_type="application/json",
)
def _create_500_response_for_system_profiles(self, service_hostname, system_uuids):
url_template = "http://%s/api/inventory/v1/hosts/%s/system_profile"
responses.add(
responses.GET,
url_template % (service_hostname, system_uuids),
body="I am error",
status=requests.codes.INTERNAL_SERVER_ERROR,
content_type="application/json",
)
@responses.activate
def test_fetch_systems_with_profiles(self):
systems_to_fetch = [
"243926fa-262f-11e9-a632-c85b761454fa",
"264fb5b2-262f-11e9-9b12-c85b761454fa",
]
self._create_response_for_systems(
"inventory_svc_url_is_not_set", ",".join(systems_to_fetch)
)
self._create_response_for_system_profiles(
"inventory_svc_url_is_not_set", ",".join(systems_to_fetch)
)
self._create_response_for_system_tags(
"inventory_svc_url_is_not_set", ",".join(systems_to_fetch)
)
systems = inventory_service_interface.fetch_systems_with_profiles(
systems_to_fetch, "my-auth-key", self.mock_logger, self.mock_counters
)
found_system_ids = {system["id"] for system in systems}
self.assertSetEqual(found_system_ids, set(systems_to_fetch))
@responses.activate
def test_fetch_systems_missing_system(self):
systems_to_fetch = [
"243926fa-262f-11e9-a632-c85b761454fa",
"264fb5b2-262f-11e9-9b12-c85b761454fa",
"269a3da8-262f-11e9-8ee5-c85b761454fa",
]
self._create_response_for_systems(
"inventory_svc_url_is_not_set", ",".join(systems_to_fetch)
)
self._create_response_for_system_profiles(
"inventory_svc_url_is_not_set", ",".join(systems_to_fetch)
)
self._create_response_for_system_tags(
"inventory_svc_url_is_not_set", ",".join(systems_to_fetch)
)
with self.assertRaises(ItemNotReturned) as cm:
inventory_service_interface.fetch_systems_with_profiles(
systems_to_fetch, "my-auth-key", self.mock_logger, self.mock_counters
)
self.assertEqual(
cm.exception.message,
"ids [269a3da8-262f-11e9-8ee5-c85b761454fa] not available to display",
)
@responses.activate
def test_fetch_systems_backend_service_error(self):
systems_to_fetch = [
"243926fa-262f-11e9-a632-c85b761454fa",
"264fb5b2-262f-11e9-9b12-c85b761454fa",
"269a3da8-262f-11e9-8ee5-c85b761454fa",
]
self._create_500_response_for_systems(
"inventory_svc_url_is_not_set", ",".join(systems_to_fetch)
)
self._create_500_response_for_system_profiles(
"inventory_svc_url_is_not_set", ",".join(systems_to_fetch)
)
with self.assertRaises(ServiceError) as cm:
inventory_service_interface.fetch_systems_with_profiles(
systems_to_fetch, "my-auth-key", self.mock_logger, self.mock_counters
)
self.assertEqual(cm.exception.message, "Error received from backend service")
| StarcoderdataPython |
3379031 | <gh_stars>1-10
"""conf URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from common.views import SidebarView
from django.conf import settings
# from drf_spectacular.views import SpectacularAPIView, SpectacularRedocView, SpectacularSwaggerView
from django.conf.urls.static import static
from django.contrib import admin
from django.conf.urls import url
from django.urls import include, path, re_path
from django.views.generic import TemplateView
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from rest_framework import permissions
SchemaView = get_schema_view(
openapi.Info(
title="Progress Tracker API",
default_version="1.0.0",
description="Tracks the progress of past interns",
# terms_of_service="https://www.google.com/policies/terms/",
# contact=openapi.Contact(email="<EMAIL>"),
# license=openapi.License(name="BSD License"),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
path("admin/", admin.site.urls),
path("api/v1/", include("common.urls")),
path("api/v1/", include("syncapp.urls")),
path("api/v1/", include("deals.urls")),
path("api/v1/", include("prospect.urls")),
path("api/v1/", include("email_template.urls")),
# path("api/v1/api-auth/", include("rest_framework.urls")),
path("sidebar", SidebarView.as_view(), name="sidebar"),
# DOCUMENTATION
# path('api/v1/schema/', SpectacularAPIView.as_view(), name='schema'),
# path('api/v1/swagger-docs/', SpectacularSwaggerView.as_view(url_name='schema'), name='swagger-ui'),
# path('api/v1/redoc/', SpectacularRedocView.as_view(url_name='schema'), name='redoc'),
]
# urlpatterns += static("zuri-root-config.js", document_root="react-spa/dist/zuri-root-config.js")
# urlpatterns += static("/static/zuri-zuri-plugin-company-sales-prospects.js", document_root="react-spa/epictetus/dist/zuri-zuri-plugin-company-sales-prospects.js")
urlpatterns += [
url(
r"^swagger(?P<format>\.json|\.yaml)$",
SchemaView.without_ui(cache_timeout=0),
name="schema-json",
),
url(
r"^api/v1/docs/$",
SchemaView.with_ui("swagger", cache_timeout=0),
name="schema-swagger-ui",
),
url(
r"^redoc/$", SchemaView.with_ui("redoc", cache_timeout=0), name="schema-redoc"
),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += [re_path(r"^.*", TemplateView.as_view(template_name="index.html"))]
| StarcoderdataPython |
3218274 | <reponame>pl8787/textnet-release
#-*-coding:utf8-*-
import copy, os
from gen_conf_file import *
from dataset_cfg import *
def gen_gate_bilstm(d_mem, init, lr, dataset):
# print "ORC: left & right lstm share parameters"
is_share = False
net = {}
# dataset = 'tb_fine'
# dataset = 'mr'
if dataset == 'mr':
net['cross_validation'] = 10
ds = DatasetCfg(dataset)
g_filler = gen_uniform_filter_setting(init)
zero_filler = gen_zero_filter_setting()
# g_updater = gen_adadelta_setting()
g_updater = gen_adagrad_setting(lr = lr, l2 = 0., batch_size = ds.train_batch_size)
g_layer_setting = {}
g_layer_setting['no_bias'] = True
g_layer_setting['w_filler'] = g_filler
g_layer_setting['u_filler'] = g_filler
g_layer_setting['b_filler'] = zero_filler
g_layer_setting['w_updater'] = g_updater
g_layer_setting['u_updater'] = g_updater
g_layer_setting['b_updater'] = g_updater
net['net_name'] = 'gate_bilstm'
net['need_reshape'] = True
net_cfg_train, net_cfg_valid, net_cfg_test = {}, {}, {}
net['net_config'] = [net_cfg_train, net_cfg_valid, net_cfg_test]
net_cfg_train["tag"] = "Train"
net_cfg_train["max_iters"] = (ds.n_train * 10)/ ds.train_batch_size
net_cfg_train["display_interval"] = (ds.n_train/ds.train_batch_size)/300
net_cfg_train["out_nodes"] = ['acc']
net_cfg_valid["tag"] = "Valid"
net_cfg_valid["max_iters"] = int(ds.n_valid/ds.valid_batch_size)
net_cfg_valid["display_interval"] = (ds.n_train/ds.train_batch_size)/3
net_cfg_valid["out_nodes"] = ['acc']
net_cfg_test["tag"] = "Test"
net_cfg_test["max_iters"] = int(ds.n_test/ds.test_batch_size)
net_cfg_test["display_interval"] = (ds.n_train/ds.train_batch_size)/3
net_cfg_test["out_nodes"] = ['acc']
layers = []
net['layers'] = layers
layer = {}
layers.append(layer)
layer['bottom_nodes'] = []
layer['top_nodes'] = ['y', 'x']
layer['layer_name'] = 'train_data'
layer['layer_type'] = 72
layer['tag'] = ['Train']
setting = copy.deepcopy(g_layer_setting)
layer['setting'] = setting
setting['batch_size'] = ds.train_batch_size
setting['data_file'] = ds.train_data_file
setting['max_doc_len'] = ds.max_doc_len
layer = {}
layers.append(layer)
layer['bottom_nodes'] = []
layer['top_nodes'] = ['y', 'x']
layer['layer_name'] = 'valid_data'
layer['layer_type'] = 72
layer['tag'] = ['Valid']
setting = copy.deepcopy(g_layer_setting)
layer['setting'] = setting
setting['batch_size'] = ds.valid_batch_size
setting['data_file'] = ds.valid_data_file
setting['max_doc_len'] = ds.max_doc_len
layer = {}
layers.append(layer)
layer['bottom_nodes'] = []
layer['top_nodes'] = ['y', 'x']
layer['layer_name'] = 'test_data'
layer['layer_type'] = 72
layer['tag'] = ['Test']
setting = copy.deepcopy(g_layer_setting)
layer['setting'] = setting
setting['batch_size'] = ds.test_batch_size
setting['data_file'] = ds.test_data_file
setting['max_doc_len'] = ds.max_doc_len
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['x']
layer['top_nodes'] = ['word_rep_seq']
layer['layer_name'] = 'embedding'
layer['layer_type'] = 21
setting = copy.deepcopy(g_layer_setting)
layer['setting'] = setting
setting['embedding_file'] = ds.embedding_file
setting['feat_size'] = ds.d_word_rep
setting['word_count'] = ds.vocab_size
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['word_rep_seq']
layer['top_nodes'] = ['l_lstm_seq']
layer['layer_name'] = 'l_lstm'
layer['layer_type'] = 24
setting = copy.deepcopy(g_layer_setting)
layer['setting'] = setting
setting['d_mem'] = d_mem
setting['reverse'] = False
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['word_rep_seq']
layer['top_nodes'] = ['r_lstm_seq']
layer['layer_name'] = 'r_lstm'
layer['layer_type'] = 24
setting = copy.deepcopy(g_layer_setting)
layer['setting'] = setting
setting['d_mem'] = d_mem
setting['reverse'] = True
if is_share:
print "ORC: share parameters."
share_setting_w = {}
share_setting_w['param_id'] = 0
share_setting_w['source_layer_name'] = 'l_lstm'
share_setting_w['source_param_id'] = 0
share_setting_u = {}
share_setting_u['param_id'] = 1
share_setting_u['source_layer_name'] = 'l_lstm'
share_setting_u['source_param_id'] = 1
share_setting_b = {}
share_setting_b['param_id'] = 2
share_setting_b['source_layer_name'] = 'l_lstm'
share_setting_b['source_param_id'] = 2
setting['share'] = [share_setting_w, share_setting_u, share_setting_b]
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['l_lstm_seq', 'r_lstm_seq']
layer['top_nodes'] = ['bi_lstm_seq']
layer['layer_name'] = 'concat'
layer['layer_type'] = 18
setting = copy.deepcopy(g_layer_setting)
layer['setting'] = setting
setting['bottom_node_num'] = 2
setting['concat_dim_index'] = 3
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['bi_lstm_seq']
layer['top_nodes'] = ['drop_rep']
layer['layer_name'] = 'dropout'
layer['layer_type'] = 13
setting = {'rate':ds.dp_rate}
layer['setting'] = setting
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['drop_rep']
layer['top_nodes'] = ['pos_score']
layer['layer_name'] = 'dim_reduction_for_softmax'
layer['layer_type'] = 28
setting = copy.deepcopy(g_layer_setting)
layer['setting'] = setting
setting['num_hidden'] = ds.num_class
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['pos_score']
layer['top_nodes'] = ['pos_prob']
layer['layer_name'] = 'softmax_func'
layer['layer_type'] = 37 # softmax_func
setting = {"axis":3}
layer['setting'] = setting
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['drop_rep']
layer['top_nodes'] = ['pos_weight_score']
layer['layer_name'] = 'dim_reduction_for_weight'
layer['layer_type'] = 28
setting = copy.deepcopy(g_layer_setting)
layer['setting'] = setting
setting['num_hidden'] = 1
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['pos_weight_score']
layer['top_nodes'] = ['pos_weight_prob']
layer['layer_name'] = 'weight_softmax'
layer['layer_type'] = 38 # softmax_func_var_len
setting = {}
layer['setting'] = setting
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['pos_weight_prob', 'pos_prob']
layer['top_nodes'] = ['pos_prob_reweight']
layer['layer_name'] = 'weight_product'
layer['layer_type'] = 35
setting = {}
layer['setting'] = setting
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['pos_prob_reweight']
layer['top_nodes'] = ['final_prob']
layer['layer_name'] = 'sum'
layer['layer_type'] = 39
setting = {"axis":2}
layer['setting'] = setting
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['final_prob', 'y']
layer['top_nodes'] = ['loss']
layer['layer_name'] = 'cross_entropy'
layer['layer_type'] = 57
setting = {}
layer['setting'] = setting
layer = {}
layers.append(layer)
layer['bottom_nodes'] = ['final_prob', 'y']
layer['top_nodes'] = ['acc']
layer['layer_name'] = 'accuracy'
layer['layer_type'] = 56
setting = {'topk':1}
layer['setting'] = setting
return net
for dataset in ['tb_binary']:
for d_mem in [50]:
idx = 0
for init in [0.03]:
# for lr in [0.3, 0.1, 0.03, 0.01, 0.003, 0.001]:
for lr in [0.001]:
net = gen_gate_bilstm(d_mem = d_mem, init = init, lr =lr, dataset=dataset)
net['log'] = 'log.bilstm.max.{0}.d{1}.{2}'.format(dataset, str(d_mem), str(idx))
# gen_conf_file(net, '/home/wsx/exp/tb/log/run.3/bilstm.max.tb_fine.model.' + str(idx))
gen_conf_file(net, '/home/wsx/exp/gate/lstm/run.10/model.bilstm.gate.{0}.d{1}.{2}'.format(dataset, str(d_mem), str(idx)))
idx += 1
# os.system("../bin/textnet ../bin/conv_lstm_simulation.model > ../bin/simulation/neg.gen.train.{0}".format(d_mem))
| StarcoderdataPython |
1778921 | from enum import Enum
import tkinter as tk
from ..translator import Translator
from ..resources import get_resource_path
from ..updater.SimpleSemVer import SimpleSemVer
class UpdateType(Enum):
App = 0
Skills = 1
AppLanguage = 2
SkillCorrections = 3
class AskUpdate(tk.Toplevel):
def __init__(
self,
parent,
_: Translator,
update_type: UpdateType,
local: SimpleSemVer,
remote: SimpleSemVer,
):
super().__init__(parent)
self._ = _
self.title(_("new-version"))
try:
icon = get_resource_path("ICON")
self.iconbitmap(icon)
except:
pass
self.answer = False
self.message = self.build_message(update_type, local, remote, self._)
self.lang_lbl = tk.Label(self, text=self.message)
self.yes_btn = tk.Button(self, text=_("yes"), command=self.yes)
self.no_btn = tk.Button(self, text=_("no"), command=self.no)
self.lang_lbl.grid(row=0, columnspan=2)
self.yes_btn.grid(row=1, column=0, sticky="e")
self.no_btn.grid(row=1, column=1, sticky="w")
def build_message(self, update_type, local, remote, _):
return _(
{
UpdateType.App: "new-app-update",
UpdateType.Skills: "new-skill-update",
UpdateType.AppLanguage: "new-app-language-update",
UpdateType.SkillCorrections: "new-skill-correction-update",
}[update_type]
).format(local, remote)
def yes(self):
self.answer = True
self.destroy()
def no(self):
self.answer = False
self.destroy()
| StarcoderdataPython |
1738986 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# (C) Copyright IBM Corp. 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator
import inspect
import json
import pytest
import re
import requests
import responses
from ibm_cloud_networking_services.zone_rate_limits_v1 import *
crn = 'testString'
zone_identifier = 'testString'
service = ZoneRateLimitsV1(
authenticator=NoAuthAuthenticator(),
crn=crn,
zone_identifier=zone_identifier
)
base_url = 'https://api.cis.cloud.ibm.com'
service.set_service_url(base_url)
##############################################################################
# Start of Service: ZoneRateLimits
##############################################################################
# region
#-----------------------------------------------------------------------------
# Test Class for list_all_zone_rate_limits
#-----------------------------------------------------------------------------
class TestListAllZoneRateLimits():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# list_all_zone_rate_limits()
#--------------------------------------------------------
@responses.activate
def test_list_all_zone_rate_limits_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/testString/zones/testString/rate_limits')
mock_response = '{"success": true, "errors": [["[]"]], "messages": [["[]"]], "result": [{"id": "92f17202ed8bd63d69a66b86a49a8f6b", "disabled": false, "description": "Prevent multiple login failures to mitigate brute force attacks", "bypass": [{"name": "url", "value": "example.com/*"}], "threshold": 1000, "period": 60, "correlate": {"by": "nat"}, "action": {"mode": "simulate", "timeout": 60, "response": {"content_type": "text/plain", "body": "This request has been rate-limited."}}, "match": {"request": {"methods": ["_ALL_"], "schemes": ["_ALL_"], "url": "*.example.org/path*"}, "response": {"status": [403], "headers": [{"name": "Cf-Cache-Status", "op": "ne", "value": "HIT"}], "origin_traffic": false}}}], "result_info": {"page": 1, "per_page": 10, "count": 1, "total_count": 1}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
page = 38
per_page = 5
# Invoke method
response = service.list_all_zone_rate_limits(
page=page,
per_page=per_page,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate query params
query_string = responses.calls[0].request.url.split('?',1)[1]
query_string = requests.utils.unquote(query_string)
assert 'page={}'.format(page) in query_string
assert 'per_page={}'.format(per_page) in query_string
#--------------------------------------------------------
# test_list_all_zone_rate_limits_required_params()
#--------------------------------------------------------
@responses.activate
def test_list_all_zone_rate_limits_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/testString/zones/testString/rate_limits')
mock_response = '{"success": true, "errors": [["[]"]], "messages": [["[]"]], "result": [{"id": "92f17202ed8bd63d69a66b86a49a8f6b", "disabled": false, "description": "Prevent multiple login failures to mitigate brute force attacks", "bypass": [{"name": "url", "value": "example.com/*"}], "threshold": 1000, "period": 60, "correlate": {"by": "nat"}, "action": {"mode": "simulate", "timeout": 60, "response": {"content_type": "text/plain", "body": "This request has been rate-limited."}}, "match": {"request": {"methods": ["_ALL_"], "schemes": ["_ALL_"], "url": "*.example.org/path*"}, "response": {"status": [403], "headers": [{"name": "Cf-Cache-Status", "op": "ne", "value": "HIT"}], "origin_traffic": false}}}], "result_info": {"page": 1, "per_page": 10, "count": 1, "total_count": 1}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = service.list_all_zone_rate_limits()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_list_all_zone_rate_limits_value_error()
#--------------------------------------------------------
@responses.activate
def test_list_all_zone_rate_limits_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/testString/zones/testString/rate_limits')
mock_response = '{"success": true, "errors": [["[]"]], "messages": [["[]"]], "result": [{"id": "92f17202ed8bd63d69a66b86a49a8f6b", "disabled": false, "description": "Prevent multiple login failures to mitigate brute force attacks", "bypass": [{"name": "url", "value": "example.com/*"}], "threshold": 1000, "period": 60, "correlate": {"by": "nat"}, "action": {"mode": "simulate", "timeout": 60, "response": {"content_type": "text/plain", "body": "This request has been rate-limited."}}, "match": {"request": {"methods": ["_ALL_"], "schemes": ["_ALL_"], "url": "*.example.org/path*"}, "response": {"status": [403], "headers": [{"name": "Cf-Cache-Status", "op": "ne", "value": "HIT"}], "origin_traffic": false}}}], "result_info": {"page": 1, "per_page": 10, "count": 1, "total_count": 1}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Pass in all but one required param and check for a ValueError
req_param_dict = {
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.list_all_zone_rate_limits(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for create_zone_rate_limits
#-----------------------------------------------------------------------------
class TestCreateZoneRateLimits():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# create_zone_rate_limits()
#--------------------------------------------------------
@responses.activate
def test_create_zone_rate_limits_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/testString/zones/testString/rate_limits')
mock_response = '{"success": true, "errors": [["[]"]], "messages": [["[]"]], "result": {"id": "92f17202ed8bd63d69a66b86a49a8f6b", "disabled": false, "description": "Prevent multiple login failures to mitigate brute force attacks", "bypass": [{"name": "url", "value": "example.com/*"}], "threshold": 1000, "period": 60, "correlate": {"by": "nat"}, "action": {"mode": "simulate", "timeout": 60, "response": {"content_type": "text/plain", "body": "This request has been rate-limited."}}, "match": {"request": {"methods": ["_ALL_"], "schemes": ["_ALL_"], "url": "*.example.org/path*"}, "response": {"status": [403], "headers": [{"name": "Cf-Cache-Status", "op": "ne", "value": "HIT"}], "origin_traffic": false}}}}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a RatelimitInputActionResponse model
ratelimit_input_action_response_model = {}
ratelimit_input_action_response_model['content_type'] = 'text/plain'
ratelimit_input_action_response_model['body'] = 'This request has been rate-limited.'
# Construct a dict representation of a RatelimitInputAction model
ratelimit_input_action_model = {}
ratelimit_input_action_model['mode'] = 'simulate'
ratelimit_input_action_model['timeout'] = 60
ratelimit_input_action_model['response'] = ratelimit_input_action_response_model
# Construct a dict representation of a RatelimitInputMatchResponseHeadersItem model
ratelimit_input_match_response_headers_item_model = {}
ratelimit_input_match_response_headers_item_model['name'] = 'Cf-Cache-Status'
ratelimit_input_match_response_headers_item_model['op'] = 'ne'
ratelimit_input_match_response_headers_item_model['value'] = 'HIT'
# Construct a dict representation of a RatelimitInputMatchRequest model
ratelimit_input_match_request_model = {}
ratelimit_input_match_request_model['methods'] = ['GET']
ratelimit_input_match_request_model['schemes'] = ['HTTP']
ratelimit_input_match_request_model['url'] = '*.example.org/path*'
# Construct a dict representation of a RatelimitInputMatchResponse model
ratelimit_input_match_response_model = {}
ratelimit_input_match_response_model['status'] = [403]
ratelimit_input_match_response_model['headers'] = [ratelimit_input_match_response_headers_item_model]
ratelimit_input_match_response_model['origin_traffic'] = False
# Construct a dict representation of a RatelimitInputMatch model
ratelimit_input_match_model = {}
ratelimit_input_match_model['request'] = ratelimit_input_match_request_model
ratelimit_input_match_model['response'] = ratelimit_input_match_response_model
# Construct a dict representation of a RatelimitInputBypassItem model
ratelimit_input_bypass_item_model = {}
ratelimit_input_bypass_item_model['name'] = 'url'
ratelimit_input_bypass_item_model['value'] = 'api.example.com/*'
# Construct a dict representation of a RatelimitInputCorrelate model
ratelimit_input_correlate_model = {}
ratelimit_input_correlate_model['by'] = 'nat'
# Set up parameter values
threshold = 1000
period = 60
action = ratelimit_input_action_model
match = ratelimit_input_match_model
disabled = False
description = 'Prevent multiple login failures to mitigate brute force attacks'
bypass = [ratelimit_input_bypass_item_model]
correlate = ratelimit_input_correlate_model
# Invoke method
response = service.create_zone_rate_limits(
threshold=threshold,
period=period,
action=action,
match=match,
disabled=disabled,
description=description,
bypass=bypass,
correlate=correlate,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['threshold'] == 1000
assert req_body['period'] == 60
assert req_body['action'] == ratelimit_input_action_model
assert req_body['match'] == ratelimit_input_match_model
assert req_body['disabled'] == False
assert req_body['description'] == 'Prevent multiple login failures to mitigate brute force attacks'
assert req_body['bypass'] == [ratelimit_input_bypass_item_model]
assert req_body['correlate'] == ratelimit_input_correlate_model
#--------------------------------------------------------
# test_create_zone_rate_limits_required_params()
#--------------------------------------------------------
@responses.activate
def test_create_zone_rate_limits_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/testString/zones/testString/rate_limits')
mock_response = '{"success": true, "errors": [["[]"]], "messages": [["[]"]], "result": {"id": "92f17202ed8bd63d69a66b86a49a8f6b", "disabled": false, "description": "Prevent multiple login failures to mitigate brute force attacks", "bypass": [{"name": "url", "value": "example.com/*"}], "threshold": 1000, "period": 60, "correlate": {"by": "nat"}, "action": {"mode": "simulate", "timeout": 60, "response": {"content_type": "text/plain", "body": "This request has been rate-limited."}}, "match": {"request": {"methods": ["_ALL_"], "schemes": ["_ALL_"], "url": "*.example.org/path*"}, "response": {"status": [403], "headers": [{"name": "Cf-Cache-Status", "op": "ne", "value": "HIT"}], "origin_traffic": false}}}}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = service.create_zone_rate_limits()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_create_zone_rate_limits_value_error()
#--------------------------------------------------------
@responses.activate
def test_create_zone_rate_limits_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/testString/zones/testString/rate_limits')
mock_response = '{"success": true, "errors": [["[]"]], "messages": [["[]"]], "result": {"id": "92f17202ed8bd63d69a66b86a49a8f6b", "disabled": false, "description": "Prevent multiple login failures to mitigate brute force attacks", "bypass": [{"name": "url", "value": "example.com/*"}], "threshold": 1000, "period": 60, "correlate": {"by": "nat"}, "action": {"mode": "simulate", "timeout": 60, "response": {"content_type": "text/plain", "body": "This request has been rate-limited."}}, "match": {"request": {"methods": ["_ALL_"], "schemes": ["_ALL_"], "url": "*.example.org/path*"}, "response": {"status": [403], "headers": [{"name": "Cf-Cache-Status", "op": "ne", "value": "HIT"}], "origin_traffic": false}}}}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Pass in all but one required param and check for a ValueError
req_param_dict = {
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.create_zone_rate_limits(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for delete_zone_rate_limit
#-----------------------------------------------------------------------------
class TestDeleteZoneRateLimit():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# delete_zone_rate_limit()
#--------------------------------------------------------
@responses.activate
def test_delete_zone_rate_limit_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/testString/zones/testString/rate_limits/testString')
mock_response = '{"success": true, "errors": [["[]"]], "messages": [["[]"]], "result": {"id": "f1aba936b94213e5b8dca0c0dbf1f9cc"}}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
rate_limit_identifier = 'testString'
# Invoke method
response = service.delete_zone_rate_limit(
rate_limit_identifier,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_delete_zone_rate_limit_value_error()
#--------------------------------------------------------
@responses.activate
def test_delete_zone_rate_limit_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/testString/zones/testString/rate_limits/testString')
mock_response = '{"success": true, "errors": [["[]"]], "messages": [["[]"]], "result": {"id": "f1aba936b94213e5b8dca0c0dbf1f9cc"}}'
responses.add(responses.DELETE,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
rate_limit_identifier = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"rate_limit_identifier": rate_limit_identifier,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.delete_zone_rate_limit(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for get_rate_limit
#-----------------------------------------------------------------------------
class TestGetRateLimit():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# get_rate_limit()
#--------------------------------------------------------
@responses.activate
def test_get_rate_limit_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/testString/zones/testString/rate_limits/testString')
mock_response = '{"success": true, "errors": [["[]"]], "messages": [["[]"]], "result": {"id": "92f17202ed8bd63d69a66b86a49a8f6b", "disabled": false, "description": "Prevent multiple login failures to mitigate brute force attacks", "bypass": [{"name": "url", "value": "example.com/*"}], "threshold": 1000, "period": 60, "correlate": {"by": "nat"}, "action": {"mode": "simulate", "timeout": 60, "response": {"content_type": "text/plain", "body": "This request has been rate-limited."}}, "match": {"request": {"methods": ["_ALL_"], "schemes": ["_ALL_"], "url": "*.example.org/path*"}, "response": {"status": [403], "headers": [{"name": "Cf-Cache-Status", "op": "ne", "value": "HIT"}], "origin_traffic": false}}}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
rate_limit_identifier = 'testString'
# Invoke method
response = service.get_rate_limit(
rate_limit_identifier,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_get_rate_limit_value_error()
#--------------------------------------------------------
@responses.activate
def test_get_rate_limit_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/testString/zones/testString/rate_limits/testString')
mock_response = '{"success": true, "errors": [["[]"]], "messages": [["[]"]], "result": {"id": "92f17202ed8bd63d69a66b86a49a8f6b", "disabled": false, "description": "Prevent multiple login failures to mitigate brute force attacks", "bypass": [{"name": "url", "value": "example.com/*"}], "threshold": 1000, "period": 60, "correlate": {"by": "nat"}, "action": {"mode": "simulate", "timeout": 60, "response": {"content_type": "text/plain", "body": "This request has been rate-limited."}}, "match": {"request": {"methods": ["_ALL_"], "schemes": ["_ALL_"], "url": "*.example.org/path*"}, "response": {"status": [403], "headers": [{"name": "Cf-Cache-Status", "op": "ne", "value": "HIT"}], "origin_traffic": false}}}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
rate_limit_identifier = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"rate_limit_identifier": rate_limit_identifier,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.get_rate_limit(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for update_rate_limit
#-----------------------------------------------------------------------------
class TestUpdateRateLimit():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# update_rate_limit()
#--------------------------------------------------------
@responses.activate
def test_update_rate_limit_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/testString/zones/testString/rate_limits/testString')
mock_response = '{"success": true, "errors": [["[]"]], "messages": [["[]"]], "result": {"id": "92f17202ed8bd63d69a66b86a49a8f6b", "disabled": false, "description": "Prevent multiple login failures to mitigate brute force attacks", "bypass": [{"name": "url", "value": "example.com/*"}], "threshold": 1000, "period": 60, "correlate": {"by": "nat"}, "action": {"mode": "simulate", "timeout": 60, "response": {"content_type": "text/plain", "body": "This request has been rate-limited."}}, "match": {"request": {"methods": ["_ALL_"], "schemes": ["_ALL_"], "url": "*.example.org/path*"}, "response": {"status": [403], "headers": [{"name": "Cf-Cache-Status", "op": "ne", "value": "HIT"}], "origin_traffic": false}}}}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a RatelimitInputActionResponse model
ratelimit_input_action_response_model = {}
ratelimit_input_action_response_model['content_type'] = 'text/plain'
ratelimit_input_action_response_model['body'] = 'This request has been rate-limited.'
# Construct a dict representation of a RatelimitInputAction model
ratelimit_input_action_model = {}
ratelimit_input_action_model['mode'] = 'simulate'
ratelimit_input_action_model['timeout'] = 60
ratelimit_input_action_model['response'] = ratelimit_input_action_response_model
# Construct a dict representation of a RatelimitInputMatchResponseHeadersItem model
ratelimit_input_match_response_headers_item_model = {}
ratelimit_input_match_response_headers_item_model['name'] = 'Cf-Cache-Status'
ratelimit_input_match_response_headers_item_model['op'] = 'ne'
ratelimit_input_match_response_headers_item_model['value'] = 'HIT'
# Construct a dict representation of a RatelimitInputMatchRequest model
ratelimit_input_match_request_model = {}
ratelimit_input_match_request_model['methods'] = ['GET']
ratelimit_input_match_request_model['schemes'] = ['HTTP']
ratelimit_input_match_request_model['url'] = '*.example.org/path*'
# Construct a dict representation of a RatelimitInputMatchResponse model
ratelimit_input_match_response_model = {}
ratelimit_input_match_response_model['status'] = [403]
ratelimit_input_match_response_model['headers'] = [ratelimit_input_match_response_headers_item_model]
ratelimit_input_match_response_model['origin_traffic'] = False
# Construct a dict representation of a RatelimitInputMatch model
ratelimit_input_match_model = {}
ratelimit_input_match_model['request'] = ratelimit_input_match_request_model
ratelimit_input_match_model['response'] = ratelimit_input_match_response_model
# Construct a dict representation of a RatelimitInputBypassItem model
ratelimit_input_bypass_item_model = {}
ratelimit_input_bypass_item_model['name'] = 'url'
ratelimit_input_bypass_item_model['value'] = 'api.example.com/*'
# Construct a dict representation of a RatelimitInputCorrelate model
ratelimit_input_correlate_model = {}
ratelimit_input_correlate_model['by'] = 'nat'
# Set up parameter values
rate_limit_identifier = 'testString'
threshold = 1000
period = 60
action = ratelimit_input_action_model
match = ratelimit_input_match_model
disabled = False
description = 'Prevent multiple login failures to mitigate brute force attacks'
bypass = [ratelimit_input_bypass_item_model]
correlate = ratelimit_input_correlate_model
# Invoke method
response = service.update_rate_limit(
rate_limit_identifier,
threshold=threshold,
period=period,
action=action,
match=match,
disabled=disabled,
description=description,
bypass=bypass,
correlate=correlate,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['threshold'] == 1000
assert req_body['period'] == 60
assert req_body['action'] == ratelimit_input_action_model
assert req_body['match'] == ratelimit_input_match_model
assert req_body['disabled'] == False
assert req_body['description'] == 'Prevent multiple login failures to mitigate brute force attacks'
assert req_body['bypass'] == [ratelimit_input_bypass_item_model]
assert req_body['correlate'] == ratelimit_input_correlate_model
#--------------------------------------------------------
# test_update_rate_limit_required_params()
#--------------------------------------------------------
@responses.activate
def test_update_rate_limit_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/testString/zones/testString/rate_limits/testString')
mock_response = '{"success": true, "errors": [["[]"]], "messages": [["[]"]], "result": {"id": "92f17202ed8bd63d69a66b86a49a8f6b", "disabled": false, "description": "Prevent multiple login failures to mitigate brute force attacks", "bypass": [{"name": "url", "value": "example.com/*"}], "threshold": 1000, "period": 60, "correlate": {"by": "nat"}, "action": {"mode": "simulate", "timeout": 60, "response": {"content_type": "text/plain", "body": "This request has been rate-limited."}}, "match": {"request": {"methods": ["_ALL_"], "schemes": ["_ALL_"], "url": "*.example.org/path*"}, "response": {"status": [403], "headers": [{"name": "Cf-Cache-Status", "op": "ne", "value": "HIT"}], "origin_traffic": false}}}}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
rate_limit_identifier = 'testString'
# Invoke method
response = service.update_rate_limit(
rate_limit_identifier,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_update_rate_limit_value_error()
#--------------------------------------------------------
@responses.activate
def test_update_rate_limit_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/testString/zones/testString/rate_limits/testString')
mock_response = '{"success": true, "errors": [["[]"]], "messages": [["[]"]], "result": {"id": "92f17202ed8bd63d69a66b86a49a8f6b", "disabled": false, "description": "Prevent multiple login failures to mitigate brute force attacks", "bypass": [{"name": "url", "value": "example.com/*"}], "threshold": 1000, "period": 60, "correlate": {"by": "nat"}, "action": {"mode": "simulate", "timeout": 60, "response": {"content_type": "text/plain", "body": "This request has been rate-limited."}}, "match": {"request": {"methods": ["_ALL_"], "schemes": ["_ALL_"], "url": "*.example.org/path*"}, "response": {"status": [403], "headers": [{"name": "Cf-Cache-Status", "op": "ne", "value": "HIT"}], "origin_traffic": false}}}}'
responses.add(responses.PUT,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
rate_limit_identifier = 'testString'
# Pass in all but one required param and check for a ValueError
req_param_dict = {
"rate_limit_identifier": rate_limit_identifier,
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.update_rate_limit(**req_copy)
# endregion
##############################################################################
# End of Service: ZoneRateLimits
##############################################################################
##############################################################################
# Start of Model Tests
##############################################################################
# region
#-----------------------------------------------------------------------------
# Test Class for DeleteRateLimitRespResult
#-----------------------------------------------------------------------------
class TestDeleteRateLimitRespResult():
#--------------------------------------------------------
# Test serialization/deserialization for DeleteRateLimitRespResult
#--------------------------------------------------------
def test_delete_rate_limit_resp_result_serialization(self):
# Construct a json representation of a DeleteRateLimitRespResult model
delete_rate_limit_resp_result_model_json = {}
delete_rate_limit_resp_result_model_json['id'] = 'f1aba936b94213e5b8dca0c0dbf1f9cc'
# Construct a model instance of DeleteRateLimitRespResult by calling from_dict on the json representation
delete_rate_limit_resp_result_model = DeleteRateLimitRespResult.from_dict(delete_rate_limit_resp_result_model_json)
assert delete_rate_limit_resp_result_model != False
# Construct a model instance of DeleteRateLimitRespResult by calling from_dict on the json representation
delete_rate_limit_resp_result_model_dict = DeleteRateLimitRespResult.from_dict(delete_rate_limit_resp_result_model_json).__dict__
delete_rate_limit_resp_result_model2 = DeleteRateLimitRespResult(**delete_rate_limit_resp_result_model_dict)
# Verify the model instances are equivalent
assert delete_rate_limit_resp_result_model == delete_rate_limit_resp_result_model2
# Convert model instance back to dict and verify no loss of data
delete_rate_limit_resp_result_model_json2 = delete_rate_limit_resp_result_model.to_dict()
assert delete_rate_limit_resp_result_model_json2 == delete_rate_limit_resp_result_model_json
#-----------------------------------------------------------------------------
# Test Class for ListRatelimitRespResultInfo
#-----------------------------------------------------------------------------
class TestListRatelimitRespResultInfo():
#--------------------------------------------------------
# Test serialization/deserialization for ListRatelimitRespResultInfo
#--------------------------------------------------------
def test_list_ratelimit_resp_result_info_serialization(self):
# Construct a json representation of a ListRatelimitRespResultInfo model
list_ratelimit_resp_result_info_model_json = {}
list_ratelimit_resp_result_info_model_json['page'] = 1
list_ratelimit_resp_result_info_model_json['per_page'] = 10
list_ratelimit_resp_result_info_model_json['count'] = 1
list_ratelimit_resp_result_info_model_json['total_count'] = 1
# Construct a model instance of ListRatelimitRespResultInfo by calling from_dict on the json representation
list_ratelimit_resp_result_info_model = ListRatelimitRespResultInfo.from_dict(list_ratelimit_resp_result_info_model_json)
assert list_ratelimit_resp_result_info_model != False
# Construct a model instance of ListRatelimitRespResultInfo by calling from_dict on the json representation
list_ratelimit_resp_result_info_model_dict = ListRatelimitRespResultInfo.from_dict(list_ratelimit_resp_result_info_model_json).__dict__
list_ratelimit_resp_result_info_model2 = ListRatelimitRespResultInfo(**list_ratelimit_resp_result_info_model_dict)
# Verify the model instances are equivalent
assert list_ratelimit_resp_result_info_model == list_ratelimit_resp_result_info_model2
# Convert model instance back to dict and verify no loss of data
list_ratelimit_resp_result_info_model_json2 = list_ratelimit_resp_result_info_model.to_dict()
assert list_ratelimit_resp_result_info_model_json2 == list_ratelimit_resp_result_info_model_json
#-----------------------------------------------------------------------------
# Test Class for RatelimitInputAction
#-----------------------------------------------------------------------------
class TestRatelimitInputAction():
#--------------------------------------------------------
# Test serialization/deserialization for RatelimitInputAction
#--------------------------------------------------------
def test_ratelimit_input_action_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
ratelimit_input_action_response_model = {} # RatelimitInputActionResponse
ratelimit_input_action_response_model['content_type'] = 'text/plain'
ratelimit_input_action_response_model['body'] = 'This request has been rate-limited.'
# Construct a json representation of a RatelimitInputAction model
ratelimit_input_action_model_json = {}
ratelimit_input_action_model_json['mode'] = 'simulate'
ratelimit_input_action_model_json['timeout'] = 60
ratelimit_input_action_model_json['response'] = ratelimit_input_action_response_model
# Construct a model instance of RatelimitInputAction by calling from_dict on the json representation
ratelimit_input_action_model = RatelimitInputAction.from_dict(ratelimit_input_action_model_json)
assert ratelimit_input_action_model != False
# Construct a model instance of RatelimitInputAction by calling from_dict on the json representation
ratelimit_input_action_model_dict = RatelimitInputAction.from_dict(ratelimit_input_action_model_json).__dict__
ratelimit_input_action_model2 = RatelimitInputAction(**ratelimit_input_action_model_dict)
# Verify the model instances are equivalent
assert ratelimit_input_action_model == ratelimit_input_action_model2
# Convert model instance back to dict and verify no loss of data
ratelimit_input_action_model_json2 = ratelimit_input_action_model.to_dict()
assert ratelimit_input_action_model_json2 == ratelimit_input_action_model_json
#-----------------------------------------------------------------------------
# Test Class for RatelimitInputActionResponse
#-----------------------------------------------------------------------------
class TestRatelimitInputActionResponse():
#--------------------------------------------------------
# Test serialization/deserialization for RatelimitInputActionResponse
#--------------------------------------------------------
def test_ratelimit_input_action_response_serialization(self):
# Construct a json representation of a RatelimitInputActionResponse model
ratelimit_input_action_response_model_json = {}
ratelimit_input_action_response_model_json['content_type'] = 'text/plain'
ratelimit_input_action_response_model_json['body'] = 'This request has been rate-limited.'
# Construct a model instance of RatelimitInputActionResponse by calling from_dict on the json representation
ratelimit_input_action_response_model = RatelimitInputActionResponse.from_dict(ratelimit_input_action_response_model_json)
assert ratelimit_input_action_response_model != False
# Construct a model instance of RatelimitInputActionResponse by calling from_dict on the json representation
ratelimit_input_action_response_model_dict = RatelimitInputActionResponse.from_dict(ratelimit_input_action_response_model_json).__dict__
ratelimit_input_action_response_model2 = RatelimitInputActionResponse(**ratelimit_input_action_response_model_dict)
# Verify the model instances are equivalent
assert ratelimit_input_action_response_model == ratelimit_input_action_response_model2
# Convert model instance back to dict and verify no loss of data
ratelimit_input_action_response_model_json2 = ratelimit_input_action_response_model.to_dict()
assert ratelimit_input_action_response_model_json2 == ratelimit_input_action_response_model_json
#-----------------------------------------------------------------------------
# Test Class for RatelimitInputBypassItem
#-----------------------------------------------------------------------------
class TestRatelimitInputBypassItem():
#--------------------------------------------------------
# Test serialization/deserialization for RatelimitInputBypassItem
#--------------------------------------------------------
def test_ratelimit_input_bypass_item_serialization(self):
# Construct a json representation of a RatelimitInputBypassItem model
ratelimit_input_bypass_item_model_json = {}
ratelimit_input_bypass_item_model_json['name'] = 'url'
ratelimit_input_bypass_item_model_json['value'] = 'api.example.com/*'
# Construct a model instance of RatelimitInputBypassItem by calling from_dict on the json representation
ratelimit_input_bypass_item_model = RatelimitInputBypassItem.from_dict(ratelimit_input_bypass_item_model_json)
assert ratelimit_input_bypass_item_model != False
# Construct a model instance of RatelimitInputBypassItem by calling from_dict on the json representation
ratelimit_input_bypass_item_model_dict = RatelimitInputBypassItem.from_dict(ratelimit_input_bypass_item_model_json).__dict__
ratelimit_input_bypass_item_model2 = RatelimitInputBypassItem(**ratelimit_input_bypass_item_model_dict)
# Verify the model instances are equivalent
assert ratelimit_input_bypass_item_model == ratelimit_input_bypass_item_model2
# Convert model instance back to dict and verify no loss of data
ratelimit_input_bypass_item_model_json2 = ratelimit_input_bypass_item_model.to_dict()
assert ratelimit_input_bypass_item_model_json2 == ratelimit_input_bypass_item_model_json
#-----------------------------------------------------------------------------
# Test Class for RatelimitInputCorrelate
#-----------------------------------------------------------------------------
class TestRatelimitInputCorrelate():
#--------------------------------------------------------
# Test serialization/deserialization for RatelimitInputCorrelate
#--------------------------------------------------------
def test_ratelimit_input_correlate_serialization(self):
# Construct a json representation of a RatelimitInputCorrelate model
ratelimit_input_correlate_model_json = {}
ratelimit_input_correlate_model_json['by'] = 'nat'
# Construct a model instance of RatelimitInputCorrelate by calling from_dict on the json representation
ratelimit_input_correlate_model = RatelimitInputCorrelate.from_dict(ratelimit_input_correlate_model_json)
assert ratelimit_input_correlate_model != False
# Construct a model instance of RatelimitInputCorrelate by calling from_dict on the json representation
ratelimit_input_correlate_model_dict = RatelimitInputCorrelate.from_dict(ratelimit_input_correlate_model_json).__dict__
ratelimit_input_correlate_model2 = RatelimitInputCorrelate(**ratelimit_input_correlate_model_dict)
# Verify the model instances are equivalent
assert ratelimit_input_correlate_model == ratelimit_input_correlate_model2
# Convert model instance back to dict and verify no loss of data
ratelimit_input_correlate_model_json2 = ratelimit_input_correlate_model.to_dict()
assert ratelimit_input_correlate_model_json2 == ratelimit_input_correlate_model_json
#-----------------------------------------------------------------------------
# Test Class for RatelimitInputMatch
#-----------------------------------------------------------------------------
class TestRatelimitInputMatch():
#--------------------------------------------------------
# Test serialization/deserialization for RatelimitInputMatch
#--------------------------------------------------------
def test_ratelimit_input_match_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
ratelimit_input_match_response_headers_item_model = {} # RatelimitInputMatchResponseHeadersItem
ratelimit_input_match_response_headers_item_model['name'] = 'Cf-Cache-Status'
ratelimit_input_match_response_headers_item_model['op'] = 'ne'
ratelimit_input_match_response_headers_item_model['value'] = 'HIT'
ratelimit_input_match_request_model = {} # RatelimitInputMatchRequest
ratelimit_input_match_request_model['methods'] = ['GET']
ratelimit_input_match_request_model['schemes'] = ['HTTP']
ratelimit_input_match_request_model['url'] = '*.example.org/path*'
ratelimit_input_match_response_model = {} # RatelimitInputMatchResponse
ratelimit_input_match_response_model['status'] = [403]
ratelimit_input_match_response_model['headers'] = [ratelimit_input_match_response_headers_item_model]
ratelimit_input_match_response_model['origin_traffic'] = False
# Construct a json representation of a RatelimitInputMatch model
ratelimit_input_match_model_json = {}
ratelimit_input_match_model_json['request'] = ratelimit_input_match_request_model
ratelimit_input_match_model_json['response'] = ratelimit_input_match_response_model
# Construct a model instance of RatelimitInputMatch by calling from_dict on the json representation
ratelimit_input_match_model = RatelimitInputMatch.from_dict(ratelimit_input_match_model_json)
assert ratelimit_input_match_model != False
# Construct a model instance of RatelimitInputMatch by calling from_dict on the json representation
ratelimit_input_match_model_dict = RatelimitInputMatch.from_dict(ratelimit_input_match_model_json).__dict__
ratelimit_input_match_model2 = RatelimitInputMatch(**ratelimit_input_match_model_dict)
# Verify the model instances are equivalent
assert ratelimit_input_match_model == ratelimit_input_match_model2
# Convert model instance back to dict and verify no loss of data
ratelimit_input_match_model_json2 = ratelimit_input_match_model.to_dict()
assert ratelimit_input_match_model_json2 == ratelimit_input_match_model_json
#-----------------------------------------------------------------------------
# Test Class for RatelimitInputMatchRequest
#-----------------------------------------------------------------------------
class TestRatelimitInputMatchRequest():
#--------------------------------------------------------
# Test serialization/deserialization for RatelimitInputMatchRequest
#--------------------------------------------------------
def test_ratelimit_input_match_request_serialization(self):
# Construct a json representation of a RatelimitInputMatchRequest model
ratelimit_input_match_request_model_json = {}
ratelimit_input_match_request_model_json['methods'] = ['GET']
ratelimit_input_match_request_model_json['schemes'] = ['HTTP']
ratelimit_input_match_request_model_json['url'] = '*.example.org/path*'
# Construct a model instance of RatelimitInputMatchRequest by calling from_dict on the json representation
ratelimit_input_match_request_model = RatelimitInputMatchRequest.from_dict(ratelimit_input_match_request_model_json)
assert ratelimit_input_match_request_model != False
# Construct a model instance of RatelimitInputMatchRequest by calling from_dict on the json representation
ratelimit_input_match_request_model_dict = RatelimitInputMatchRequest.from_dict(ratelimit_input_match_request_model_json).__dict__
ratelimit_input_match_request_model2 = RatelimitInputMatchRequest(**ratelimit_input_match_request_model_dict)
# Verify the model instances are equivalent
assert ratelimit_input_match_request_model == ratelimit_input_match_request_model2
# Convert model instance back to dict and verify no loss of data
ratelimit_input_match_request_model_json2 = ratelimit_input_match_request_model.to_dict()
assert ratelimit_input_match_request_model_json2 == ratelimit_input_match_request_model_json
#-----------------------------------------------------------------------------
# Test Class for RatelimitInputMatchResponse
#-----------------------------------------------------------------------------
class TestRatelimitInputMatchResponse():
#--------------------------------------------------------
# Test serialization/deserialization for RatelimitInputMatchResponse
#--------------------------------------------------------
def test_ratelimit_input_match_response_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
ratelimit_input_match_response_headers_item_model = {} # RatelimitInputMatchResponseHeadersItem
ratelimit_input_match_response_headers_item_model['name'] = 'Cf-Cache-Status'
ratelimit_input_match_response_headers_item_model['op'] = 'ne'
ratelimit_input_match_response_headers_item_model['value'] = 'HIT'
# Construct a json representation of a RatelimitInputMatchResponse model
ratelimit_input_match_response_model_json = {}
ratelimit_input_match_response_model_json['status'] = [403]
ratelimit_input_match_response_model_json['headers'] = [ratelimit_input_match_response_headers_item_model]
ratelimit_input_match_response_model_json['origin_traffic'] = False
# Construct a model instance of RatelimitInputMatchResponse by calling from_dict on the json representation
ratelimit_input_match_response_model = RatelimitInputMatchResponse.from_dict(ratelimit_input_match_response_model_json)
assert ratelimit_input_match_response_model != False
# Construct a model instance of RatelimitInputMatchResponse by calling from_dict on the json representation
ratelimit_input_match_response_model_dict = RatelimitInputMatchResponse.from_dict(ratelimit_input_match_response_model_json).__dict__
ratelimit_input_match_response_model2 = RatelimitInputMatchResponse(**ratelimit_input_match_response_model_dict)
# Verify the model instances are equivalent
assert ratelimit_input_match_response_model == ratelimit_input_match_response_model2
# Convert model instance back to dict and verify no loss of data
ratelimit_input_match_response_model_json2 = ratelimit_input_match_response_model.to_dict()
assert ratelimit_input_match_response_model_json2 == ratelimit_input_match_response_model_json
#-----------------------------------------------------------------------------
# Test Class for RatelimitInputMatchResponseHeadersItem
#-----------------------------------------------------------------------------
class TestRatelimitInputMatchResponseHeadersItem():
#--------------------------------------------------------
# Test serialization/deserialization for RatelimitInputMatchResponseHeadersItem
#--------------------------------------------------------
def test_ratelimit_input_match_response_headers_item_serialization(self):
# Construct a json representation of a RatelimitInputMatchResponseHeadersItem model
ratelimit_input_match_response_headers_item_model_json = {}
ratelimit_input_match_response_headers_item_model_json['name'] = 'Cf-Cache-Status'
ratelimit_input_match_response_headers_item_model_json['op'] = 'ne'
ratelimit_input_match_response_headers_item_model_json['value'] = 'HIT'
# Construct a model instance of RatelimitInputMatchResponseHeadersItem by calling from_dict on the json representation
ratelimit_input_match_response_headers_item_model = RatelimitInputMatchResponseHeadersItem.from_dict(ratelimit_input_match_response_headers_item_model_json)
assert ratelimit_input_match_response_headers_item_model != False
# Construct a model instance of RatelimitInputMatchResponseHeadersItem by calling from_dict on the json representation
ratelimit_input_match_response_headers_item_model_dict = RatelimitInputMatchResponseHeadersItem.from_dict(ratelimit_input_match_response_headers_item_model_json).__dict__
ratelimit_input_match_response_headers_item_model2 = RatelimitInputMatchResponseHeadersItem(**ratelimit_input_match_response_headers_item_model_dict)
# Verify the model instances are equivalent
assert ratelimit_input_match_response_headers_item_model == ratelimit_input_match_response_headers_item_model2
# Convert model instance back to dict and verify no loss of data
ratelimit_input_match_response_headers_item_model_json2 = ratelimit_input_match_response_headers_item_model.to_dict()
assert ratelimit_input_match_response_headers_item_model_json2 == ratelimit_input_match_response_headers_item_model_json
#-----------------------------------------------------------------------------
# Test Class for RatelimitObjectAction
#-----------------------------------------------------------------------------
class TestRatelimitObjectAction():
#--------------------------------------------------------
# Test serialization/deserialization for RatelimitObjectAction
#--------------------------------------------------------
def test_ratelimit_object_action_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
ratelimit_object_action_response_model = {} # RatelimitObjectActionResponse
ratelimit_object_action_response_model['content_type'] = 'text/plain'
ratelimit_object_action_response_model['body'] = 'This request has been rate-limited.'
# Construct a json representation of a RatelimitObjectAction model
ratelimit_object_action_model_json = {}
ratelimit_object_action_model_json['mode'] = 'simulate'
ratelimit_object_action_model_json['timeout'] = 60
ratelimit_object_action_model_json['response'] = ratelimit_object_action_response_model
# Construct a model instance of RatelimitObjectAction by calling from_dict on the json representation
ratelimit_object_action_model = RatelimitObjectAction.from_dict(ratelimit_object_action_model_json)
assert ratelimit_object_action_model != False
# Construct a model instance of RatelimitObjectAction by calling from_dict on the json representation
ratelimit_object_action_model_dict = RatelimitObjectAction.from_dict(ratelimit_object_action_model_json).__dict__
ratelimit_object_action_model2 = RatelimitObjectAction(**ratelimit_object_action_model_dict)
# Verify the model instances are equivalent
assert ratelimit_object_action_model == ratelimit_object_action_model2
# Convert model instance back to dict and verify no loss of data
ratelimit_object_action_model_json2 = ratelimit_object_action_model.to_dict()
assert ratelimit_object_action_model_json2 == ratelimit_object_action_model_json
#-----------------------------------------------------------------------------
# Test Class for RatelimitObjectActionResponse
#-----------------------------------------------------------------------------
class TestRatelimitObjectActionResponse():
#--------------------------------------------------------
# Test serialization/deserialization for RatelimitObjectActionResponse
#--------------------------------------------------------
def test_ratelimit_object_action_response_serialization(self):
# Construct a json representation of a RatelimitObjectActionResponse model
ratelimit_object_action_response_model_json = {}
ratelimit_object_action_response_model_json['content_type'] = 'text/plain'
ratelimit_object_action_response_model_json['body'] = 'This request has been rate-limited.'
# Construct a model instance of RatelimitObjectActionResponse by calling from_dict on the json representation
ratelimit_object_action_response_model = RatelimitObjectActionResponse.from_dict(ratelimit_object_action_response_model_json)
assert ratelimit_object_action_response_model != False
# Construct a model instance of RatelimitObjectActionResponse by calling from_dict on the json representation
ratelimit_object_action_response_model_dict = RatelimitObjectActionResponse.from_dict(ratelimit_object_action_response_model_json).__dict__
ratelimit_object_action_response_model2 = RatelimitObjectActionResponse(**ratelimit_object_action_response_model_dict)
# Verify the model instances are equivalent
assert ratelimit_object_action_response_model == ratelimit_object_action_response_model2
# Convert model instance back to dict and verify no loss of data
ratelimit_object_action_response_model_json2 = ratelimit_object_action_response_model.to_dict()
assert ratelimit_object_action_response_model_json2 == ratelimit_object_action_response_model_json
#-----------------------------------------------------------------------------
# Test Class for RatelimitObjectBypassItem
#-----------------------------------------------------------------------------
class TestRatelimitObjectBypassItem():
#--------------------------------------------------------
# Test serialization/deserialization for RatelimitObjectBypassItem
#--------------------------------------------------------
def test_ratelimit_object_bypass_item_serialization(self):
# Construct a json representation of a RatelimitObjectBypassItem model
ratelimit_object_bypass_item_model_json = {}
ratelimit_object_bypass_item_model_json['name'] = 'url'
ratelimit_object_bypass_item_model_json['value'] = 'example.com/*'
# Construct a model instance of RatelimitObjectBypassItem by calling from_dict on the json representation
ratelimit_object_bypass_item_model = RatelimitObjectBypassItem.from_dict(ratelimit_object_bypass_item_model_json)
assert ratelimit_object_bypass_item_model != False
# Construct a model instance of RatelimitObjectBypassItem by calling from_dict on the json representation
ratelimit_object_bypass_item_model_dict = RatelimitObjectBypassItem.from_dict(ratelimit_object_bypass_item_model_json).__dict__
ratelimit_object_bypass_item_model2 = RatelimitObjectBypassItem(**ratelimit_object_bypass_item_model_dict)
# Verify the model instances are equivalent
assert ratelimit_object_bypass_item_model == ratelimit_object_bypass_item_model2
# Convert model instance back to dict and verify no loss of data
ratelimit_object_bypass_item_model_json2 = ratelimit_object_bypass_item_model.to_dict()
assert ratelimit_object_bypass_item_model_json2 == ratelimit_object_bypass_item_model_json
#-----------------------------------------------------------------------------
# Test Class for RatelimitObjectCorrelate
#-----------------------------------------------------------------------------
class TestRatelimitObjectCorrelate():
#--------------------------------------------------------
# Test serialization/deserialization for RatelimitObjectCorrelate
#--------------------------------------------------------
def test_ratelimit_object_correlate_serialization(self):
# Construct a json representation of a RatelimitObjectCorrelate model
ratelimit_object_correlate_model_json = {}
ratelimit_object_correlate_model_json['by'] = 'nat'
# Construct a model instance of RatelimitObjectCorrelate by calling from_dict on the json representation
ratelimit_object_correlate_model = RatelimitObjectCorrelate.from_dict(ratelimit_object_correlate_model_json)
assert ratelimit_object_correlate_model != False
# Construct a model instance of RatelimitObjectCorrelate by calling from_dict on the json representation
ratelimit_object_correlate_model_dict = RatelimitObjectCorrelate.from_dict(ratelimit_object_correlate_model_json).__dict__
ratelimit_object_correlate_model2 = RatelimitObjectCorrelate(**ratelimit_object_correlate_model_dict)
# Verify the model instances are equivalent
assert ratelimit_object_correlate_model == ratelimit_object_correlate_model2
# Convert model instance back to dict and verify no loss of data
ratelimit_object_correlate_model_json2 = ratelimit_object_correlate_model.to_dict()
assert ratelimit_object_correlate_model_json2 == ratelimit_object_correlate_model_json
#-----------------------------------------------------------------------------
# Test Class for RatelimitObjectMatch
#-----------------------------------------------------------------------------
class TestRatelimitObjectMatch():
#--------------------------------------------------------
# Test serialization/deserialization for RatelimitObjectMatch
#--------------------------------------------------------
def test_ratelimit_object_match_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
ratelimit_object_match_response_headers_item_model = {} # RatelimitObjectMatchResponseHeadersItem
ratelimit_object_match_response_headers_item_model['name'] = 'Cf-Cache-Status'
ratelimit_object_match_response_headers_item_model['op'] = 'ne'
ratelimit_object_match_response_headers_item_model['value'] = 'HIT'
ratelimit_object_match_request_model = {} # RatelimitObjectMatchRequest
ratelimit_object_match_request_model['methods'] = ['_ALL_']
ratelimit_object_match_request_model['schemes'] = ['_ALL_']
ratelimit_object_match_request_model['url'] = '*.example.org/path*'
ratelimit_object_match_response_model = {} # RatelimitObjectMatchResponse
ratelimit_object_match_response_model['status'] = [403]
ratelimit_object_match_response_model['headers'] = [ratelimit_object_match_response_headers_item_model]
ratelimit_object_match_response_model['origin_traffic'] = False
# Construct a json representation of a RatelimitObjectMatch model
ratelimit_object_match_model_json = {}
ratelimit_object_match_model_json['request'] = ratelimit_object_match_request_model
ratelimit_object_match_model_json['response'] = ratelimit_object_match_response_model
# Construct a model instance of RatelimitObjectMatch by calling from_dict on the json representation
ratelimit_object_match_model = RatelimitObjectMatch.from_dict(ratelimit_object_match_model_json)
assert ratelimit_object_match_model != False
# Construct a model instance of RatelimitObjectMatch by calling from_dict on the json representation
ratelimit_object_match_model_dict = RatelimitObjectMatch.from_dict(ratelimit_object_match_model_json).__dict__
ratelimit_object_match_model2 = RatelimitObjectMatch(**ratelimit_object_match_model_dict)
# Verify the model instances are equivalent
assert ratelimit_object_match_model == ratelimit_object_match_model2
# Convert model instance back to dict and verify no loss of data
ratelimit_object_match_model_json2 = ratelimit_object_match_model.to_dict()
assert ratelimit_object_match_model_json2 == ratelimit_object_match_model_json
#-----------------------------------------------------------------------------
# Test Class for RatelimitObjectMatchRequest
#-----------------------------------------------------------------------------
class TestRatelimitObjectMatchRequest():
#--------------------------------------------------------
# Test serialization/deserialization for RatelimitObjectMatchRequest
#--------------------------------------------------------
def test_ratelimit_object_match_request_serialization(self):
# Construct a json representation of a RatelimitObjectMatchRequest model
ratelimit_object_match_request_model_json = {}
ratelimit_object_match_request_model_json['methods'] = ['_ALL_']
ratelimit_object_match_request_model_json['schemes'] = ['_ALL_']
ratelimit_object_match_request_model_json['url'] = '*.example.org/path*'
# Construct a model instance of RatelimitObjectMatchRequest by calling from_dict on the json representation
ratelimit_object_match_request_model = RatelimitObjectMatchRequest.from_dict(ratelimit_object_match_request_model_json)
assert ratelimit_object_match_request_model != False
# Construct a model instance of RatelimitObjectMatchRequest by calling from_dict on the json representation
ratelimit_object_match_request_model_dict = RatelimitObjectMatchRequest.from_dict(ratelimit_object_match_request_model_json).__dict__
ratelimit_object_match_request_model2 = RatelimitObjectMatchRequest(**ratelimit_object_match_request_model_dict)
# Verify the model instances are equivalent
assert ratelimit_object_match_request_model == ratelimit_object_match_request_model2
# Convert model instance back to dict and verify no loss of data
ratelimit_object_match_request_model_json2 = ratelimit_object_match_request_model.to_dict()
assert ratelimit_object_match_request_model_json2 == ratelimit_object_match_request_model_json
#-----------------------------------------------------------------------------
# Test Class for RatelimitObjectMatchResponse
#-----------------------------------------------------------------------------
class TestRatelimitObjectMatchResponse():
#--------------------------------------------------------
# Test serialization/deserialization for RatelimitObjectMatchResponse
#--------------------------------------------------------
def test_ratelimit_object_match_response_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
ratelimit_object_match_response_headers_item_model = {} # RatelimitObjectMatchResponseHeadersItem
ratelimit_object_match_response_headers_item_model['name'] = 'Cf-Cache-Status'
ratelimit_object_match_response_headers_item_model['op'] = 'ne'
ratelimit_object_match_response_headers_item_model['value'] = 'HIT'
# Construct a json representation of a RatelimitObjectMatchResponse model
ratelimit_object_match_response_model_json = {}
ratelimit_object_match_response_model_json['status'] = [403]
ratelimit_object_match_response_model_json['headers'] = [ratelimit_object_match_response_headers_item_model]
ratelimit_object_match_response_model_json['origin_traffic'] = False
# Construct a model instance of RatelimitObjectMatchResponse by calling from_dict on the json representation
ratelimit_object_match_response_model = RatelimitObjectMatchResponse.from_dict(ratelimit_object_match_response_model_json)
assert ratelimit_object_match_response_model != False
# Construct a model instance of RatelimitObjectMatchResponse by calling from_dict on the json representation
ratelimit_object_match_response_model_dict = RatelimitObjectMatchResponse.from_dict(ratelimit_object_match_response_model_json).__dict__
ratelimit_object_match_response_model2 = RatelimitObjectMatchResponse(**ratelimit_object_match_response_model_dict)
# Verify the model instances are equivalent
assert ratelimit_object_match_response_model == ratelimit_object_match_response_model2
# Convert model instance back to dict and verify no loss of data
ratelimit_object_match_response_model_json2 = ratelimit_object_match_response_model.to_dict()
assert ratelimit_object_match_response_model_json2 == ratelimit_object_match_response_model_json
#-----------------------------------------------------------------------------
# Test Class for RatelimitObjectMatchResponseHeadersItem
#-----------------------------------------------------------------------------
class TestRatelimitObjectMatchResponseHeadersItem():
#--------------------------------------------------------
# Test serialization/deserialization for RatelimitObjectMatchResponseHeadersItem
#--------------------------------------------------------
def test_ratelimit_object_match_response_headers_item_serialization(self):
# Construct a json representation of a RatelimitObjectMatchResponseHeadersItem model
ratelimit_object_match_response_headers_item_model_json = {}
ratelimit_object_match_response_headers_item_model_json['name'] = 'Cf-Cache-Status'
ratelimit_object_match_response_headers_item_model_json['op'] = 'ne'
ratelimit_object_match_response_headers_item_model_json['value'] = 'HIT'
# Construct a model instance of RatelimitObjectMatchResponseHeadersItem by calling from_dict on the json representation
ratelimit_object_match_response_headers_item_model = RatelimitObjectMatchResponseHeadersItem.from_dict(ratelimit_object_match_response_headers_item_model_json)
assert ratelimit_object_match_response_headers_item_model != False
# Construct a model instance of RatelimitObjectMatchResponseHeadersItem by calling from_dict on the json representation
ratelimit_object_match_response_headers_item_model_dict = RatelimitObjectMatchResponseHeadersItem.from_dict(ratelimit_object_match_response_headers_item_model_json).__dict__
ratelimit_object_match_response_headers_item_model2 = RatelimitObjectMatchResponseHeadersItem(**ratelimit_object_match_response_headers_item_model_dict)
# Verify the model instances are equivalent
assert ratelimit_object_match_response_headers_item_model == ratelimit_object_match_response_headers_item_model2
# Convert model instance back to dict and verify no loss of data
ratelimit_object_match_response_headers_item_model_json2 = ratelimit_object_match_response_headers_item_model.to_dict()
assert ratelimit_object_match_response_headers_item_model_json2 == ratelimit_object_match_response_headers_item_model_json
#-----------------------------------------------------------------------------
# Test Class for DeleteRateLimitResp
#-----------------------------------------------------------------------------
class TestDeleteRateLimitResp():
#--------------------------------------------------------
# Test serialization/deserialization for DeleteRateLimitResp
#--------------------------------------------------------
def test_delete_rate_limit_resp_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
delete_rate_limit_resp_result_model = {} # DeleteRateLimitRespResult
delete_rate_limit_resp_result_model['id'] = 'f1aba936b94213e5b8dca0c0dbf1f9cc'
# Construct a json representation of a DeleteRateLimitResp model
delete_rate_limit_resp_model_json = {}
delete_rate_limit_resp_model_json['success'] = True
delete_rate_limit_resp_model_json['errors'] = [['[]']]
delete_rate_limit_resp_model_json['messages'] = [['[]']]
delete_rate_limit_resp_model_json['result'] = delete_rate_limit_resp_result_model
# Construct a model instance of DeleteRateLimitResp by calling from_dict on the json representation
delete_rate_limit_resp_model = DeleteRateLimitResp.from_dict(delete_rate_limit_resp_model_json)
assert delete_rate_limit_resp_model != False
# Construct a model instance of DeleteRateLimitResp by calling from_dict on the json representation
delete_rate_limit_resp_model_dict = DeleteRateLimitResp.from_dict(delete_rate_limit_resp_model_json).__dict__
delete_rate_limit_resp_model2 = DeleteRateLimitResp(**delete_rate_limit_resp_model_dict)
# Verify the model instances are equivalent
assert delete_rate_limit_resp_model == delete_rate_limit_resp_model2
# Convert model instance back to dict and verify no loss of data
delete_rate_limit_resp_model_json2 = delete_rate_limit_resp_model.to_dict()
assert delete_rate_limit_resp_model_json2 == delete_rate_limit_resp_model_json
#-----------------------------------------------------------------------------
# Test Class for ListRatelimitResp
#-----------------------------------------------------------------------------
class TestListRatelimitResp():
#--------------------------------------------------------
# Test serialization/deserialization for ListRatelimitResp
#--------------------------------------------------------
def test_list_ratelimit_resp_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
ratelimit_object_match_response_headers_item_model = {} # RatelimitObjectMatchResponseHeadersItem
ratelimit_object_match_response_headers_item_model['name'] = 'Cf-Cache-Status'
ratelimit_object_match_response_headers_item_model['op'] = 'ne'
ratelimit_object_match_response_headers_item_model['value'] = 'HIT'
ratelimit_object_action_response_model = {} # RatelimitObjectActionResponse
ratelimit_object_action_response_model['content_type'] = 'text/plain'
ratelimit_object_action_response_model['body'] = 'This request has been rate-limited.'
ratelimit_object_match_request_model = {} # RatelimitObjectMatchRequest
ratelimit_object_match_request_model['methods'] = ['_ALL_']
ratelimit_object_match_request_model['schemes'] = ['_ALL_']
ratelimit_object_match_request_model['url'] = '*.example.org/path*'
ratelimit_object_match_response_model = {} # RatelimitObjectMatchResponse
ratelimit_object_match_response_model['status'] = [403]
ratelimit_object_match_response_model['headers'] = [ratelimit_object_match_response_headers_item_model]
ratelimit_object_match_response_model['origin_traffic'] = False
ratelimit_object_action_model = {} # RatelimitObjectAction
ratelimit_object_action_model['mode'] = 'simulate'
ratelimit_object_action_model['timeout'] = 60
ratelimit_object_action_model['response'] = ratelimit_object_action_response_model
ratelimit_object_bypass_item_model = {} # RatelimitObjectBypassItem
ratelimit_object_bypass_item_model['name'] = 'url'
ratelimit_object_bypass_item_model['value'] = 'example.com/*'
ratelimit_object_correlate_model = {} # RatelimitObjectCorrelate
ratelimit_object_correlate_model['by'] = 'nat'
ratelimit_object_match_model = {} # RatelimitObjectMatch
ratelimit_object_match_model['request'] = ratelimit_object_match_request_model
ratelimit_object_match_model['response'] = ratelimit_object_match_response_model
list_ratelimit_resp_result_info_model = {} # ListRatelimitRespResultInfo
list_ratelimit_resp_result_info_model['page'] = 1
list_ratelimit_resp_result_info_model['per_page'] = 10
list_ratelimit_resp_result_info_model['count'] = 1
list_ratelimit_resp_result_info_model['total_count'] = 1
ratelimit_object_model = {} # RatelimitObject
ratelimit_object_model['id'] = '92f17202ed8bd63d69a66b86a49a8f6b'
ratelimit_object_model['disabled'] = False
ratelimit_object_model['description'] = 'Prevent multiple login failures to mitigate brute force attacks'
ratelimit_object_model['bypass'] = [ratelimit_object_bypass_item_model]
ratelimit_object_model['threshold'] = 1000
ratelimit_object_model['period'] = 60
ratelimit_object_model['correlate'] = ratelimit_object_correlate_model
ratelimit_object_model['action'] = ratelimit_object_action_model
ratelimit_object_model['match'] = ratelimit_object_match_model
# Construct a json representation of a ListRatelimitResp model
list_ratelimit_resp_model_json = {}
list_ratelimit_resp_model_json['success'] = True
list_ratelimit_resp_model_json['errors'] = [['[]']]
list_ratelimit_resp_model_json['messages'] = [['[]']]
list_ratelimit_resp_model_json['result'] = [ratelimit_object_model]
list_ratelimit_resp_model_json['result_info'] = list_ratelimit_resp_result_info_model
# Construct a model instance of ListRatelimitResp by calling from_dict on the json representation
list_ratelimit_resp_model = ListRatelimitResp.from_dict(list_ratelimit_resp_model_json)
assert list_ratelimit_resp_model != False
# Construct a model instance of ListRatelimitResp by calling from_dict on the json representation
list_ratelimit_resp_model_dict = ListRatelimitResp.from_dict(list_ratelimit_resp_model_json).__dict__
list_ratelimit_resp_model2 = ListRatelimitResp(**list_ratelimit_resp_model_dict)
# Verify the model instances are equivalent
assert list_ratelimit_resp_model == list_ratelimit_resp_model2
# Convert model instance back to dict and verify no loss of data
list_ratelimit_resp_model_json2 = list_ratelimit_resp_model.to_dict()
assert list_ratelimit_resp_model_json2 == list_ratelimit_resp_model_json
#-----------------------------------------------------------------------------
# Test Class for RatelimitObject
#-----------------------------------------------------------------------------
class TestRatelimitObject():
#--------------------------------------------------------
# Test serialization/deserialization for RatelimitObject
#--------------------------------------------------------
def test_ratelimit_object_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
ratelimit_object_match_response_headers_item_model = {} # RatelimitObjectMatchResponseHeadersItem
ratelimit_object_match_response_headers_item_model['name'] = 'Cf-Cache-Status'
ratelimit_object_match_response_headers_item_model['op'] = 'ne'
ratelimit_object_match_response_headers_item_model['value'] = 'HIT'
ratelimit_object_action_response_model = {} # RatelimitObjectActionResponse
ratelimit_object_action_response_model['content_type'] = 'text/plain'
ratelimit_object_action_response_model['body'] = 'This request has been rate-limited.'
ratelimit_object_match_request_model = {} # RatelimitObjectMatchRequest
ratelimit_object_match_request_model['methods'] = ['_ALL_']
ratelimit_object_match_request_model['schemes'] = ['_ALL_']
ratelimit_object_match_request_model['url'] = '*.example.org/path*'
ratelimit_object_match_response_model = {} # RatelimitObjectMatchResponse
ratelimit_object_match_response_model['status'] = [403]
ratelimit_object_match_response_model['headers'] = [ratelimit_object_match_response_headers_item_model]
ratelimit_object_match_response_model['origin_traffic'] = False
ratelimit_object_action_model = {} # RatelimitObjectAction
ratelimit_object_action_model['mode'] = 'simulate'
ratelimit_object_action_model['timeout'] = 60
ratelimit_object_action_model['response'] = ratelimit_object_action_response_model
ratelimit_object_bypass_item_model = {} # RatelimitObjectBypassItem
ratelimit_object_bypass_item_model['name'] = 'url'
ratelimit_object_bypass_item_model['value'] = 'example.com/*'
ratelimit_object_correlate_model = {} # RatelimitObjectCorrelate
ratelimit_object_correlate_model['by'] = 'nat'
ratelimit_object_match_model = {} # RatelimitObjectMatch
ratelimit_object_match_model['request'] = ratelimit_object_match_request_model
ratelimit_object_match_model['response'] = ratelimit_object_match_response_model
# Construct a json representation of a RatelimitObject model
ratelimit_object_model_json = {}
ratelimit_object_model_json['id'] = '92f17202ed8bd63d69a66b86a49a8f6b'
ratelimit_object_model_json['disabled'] = False
ratelimit_object_model_json['description'] = 'Prevent multiple login failures to mitigate brute force attacks'
ratelimit_object_model_json['bypass'] = [ratelimit_object_bypass_item_model]
ratelimit_object_model_json['threshold'] = 1000
ratelimit_object_model_json['period'] = 60
ratelimit_object_model_json['correlate'] = ratelimit_object_correlate_model
ratelimit_object_model_json['action'] = ratelimit_object_action_model
ratelimit_object_model_json['match'] = ratelimit_object_match_model
# Construct a model instance of RatelimitObject by calling from_dict on the json representation
ratelimit_object_model = RatelimitObject.from_dict(ratelimit_object_model_json)
assert ratelimit_object_model != False
# Construct a model instance of RatelimitObject by calling from_dict on the json representation
ratelimit_object_model_dict = RatelimitObject.from_dict(ratelimit_object_model_json).__dict__
ratelimit_object_model2 = RatelimitObject(**ratelimit_object_model_dict)
# Verify the model instances are equivalent
assert ratelimit_object_model == ratelimit_object_model2
# Convert model instance back to dict and verify no loss of data
ratelimit_object_model_json2 = ratelimit_object_model.to_dict()
assert ratelimit_object_model_json2 == ratelimit_object_model_json
#-----------------------------------------------------------------------------
# Test Class for RatelimitResp
#-----------------------------------------------------------------------------
class TestRatelimitResp():
#--------------------------------------------------------
# Test serialization/deserialization for RatelimitResp
#--------------------------------------------------------
def test_ratelimit_resp_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
ratelimit_object_match_response_headers_item_model = {} # RatelimitObjectMatchResponseHeadersItem
ratelimit_object_match_response_headers_item_model['name'] = 'Cf-Cache-Status'
ratelimit_object_match_response_headers_item_model['op'] = 'ne'
ratelimit_object_match_response_headers_item_model['value'] = 'HIT'
ratelimit_object_action_response_model = {} # RatelimitObjectActionResponse
ratelimit_object_action_response_model['content_type'] = 'text/plain'
ratelimit_object_action_response_model['body'] = 'This request has been rate-limited.'
ratelimit_object_match_request_model = {} # RatelimitObjectMatchRequest
ratelimit_object_match_request_model['methods'] = ['_ALL_']
ratelimit_object_match_request_model['schemes'] = ['_ALL_']
ratelimit_object_match_request_model['url'] = '*.example.org/path*'
ratelimit_object_match_response_model = {} # RatelimitObjectMatchResponse
ratelimit_object_match_response_model['status'] = [403]
ratelimit_object_match_response_model['headers'] = [ratelimit_object_match_response_headers_item_model]
ratelimit_object_match_response_model['origin_traffic'] = False
ratelimit_object_action_model = {} # RatelimitObjectAction
ratelimit_object_action_model['mode'] = 'simulate'
ratelimit_object_action_model['timeout'] = 60
ratelimit_object_action_model['response'] = ratelimit_object_action_response_model
ratelimit_object_bypass_item_model = {} # RatelimitObjectBypassItem
ratelimit_object_bypass_item_model['name'] = 'url'
ratelimit_object_bypass_item_model['value'] = 'example.com/*'
ratelimit_object_correlate_model = {} # RatelimitObjectCorrelate
ratelimit_object_correlate_model['by'] = 'nat'
ratelimit_object_match_model = {} # RatelimitObjectMatch
ratelimit_object_match_model['request'] = ratelimit_object_match_request_model
ratelimit_object_match_model['response'] = ratelimit_object_match_response_model
ratelimit_object_model = {} # RatelimitObject
ratelimit_object_model['id'] = '92f17202ed8bd63d69a66b86a49a8f6b'
ratelimit_object_model['disabled'] = False
ratelimit_object_model['description'] = 'Prevent multiple login failures to mitigate brute force attacks'
ratelimit_object_model['bypass'] = [ratelimit_object_bypass_item_model]
ratelimit_object_model['threshold'] = 1000
ratelimit_object_model['period'] = 60
ratelimit_object_model['correlate'] = ratelimit_object_correlate_model
ratelimit_object_model['action'] = ratelimit_object_action_model
ratelimit_object_model['match'] = ratelimit_object_match_model
# Construct a json representation of a RatelimitResp model
ratelimit_resp_model_json = {}
ratelimit_resp_model_json['success'] = True
ratelimit_resp_model_json['errors'] = [['[]']]
ratelimit_resp_model_json['messages'] = [['[]']]
ratelimit_resp_model_json['result'] = ratelimit_object_model
# Construct a model instance of RatelimitResp by calling from_dict on the json representation
ratelimit_resp_model = RatelimitResp.from_dict(ratelimit_resp_model_json)
assert ratelimit_resp_model != False
# Construct a model instance of RatelimitResp by calling from_dict on the json representation
ratelimit_resp_model_dict = RatelimitResp.from_dict(ratelimit_resp_model_json).__dict__
ratelimit_resp_model2 = RatelimitResp(**ratelimit_resp_model_dict)
# Verify the model instances are equivalent
assert ratelimit_resp_model == ratelimit_resp_model2
# Convert model instance back to dict and verify no loss of data
ratelimit_resp_model_json2 = ratelimit_resp_model.to_dict()
assert ratelimit_resp_model_json2 == ratelimit_resp_model_json
# endregion
##############################################################################
# End of Model Tests
##############################################################################
| StarcoderdataPython |
81888 | flag="OYE, jaa 4 plate emarald se cheese maggi aur patties le kar aa... aur bolna kharcha mere khate mie likh dene!! ISM: TOH_SANDEEP_KO_BULANA_PADTA_HAI"
binary_password="<PASSWORD>"
| StarcoderdataPython |
139110 | from typing import List
from collections import Counter
class Solution:
def minSetSize(self, arr: List[int]) -> int:
freq = Counter(arr)
freq = freq.most_common()
numRequired = len(arr) // 2
start = 0
while numRequired > 0:
numRequired -= freq[start][1]
start += 1
return start
sol = Solution()
print(sol.minSetSize(arr=[3, 3, 3, 3, 5, 5, 5, 2, 2, 7]))
| StarcoderdataPython |
1601365 | <reponame>wmeueleleyb/Leibniz-series-plotted-<gh_stars>0
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import random
x = []
y = []
pi = 4
j = 0
plt.style.use('fivethirtyeight')
def animate(i):
global pi, j
if j % 2 == 0: pi -= (4/(j*2 + 3))
else: pi += (4/(j*2 + 3))
j += 1
x.append(j)
y.append(pi)
plt.cla()
plt.plot(x,y)
plt.title("pi = " + str(pi))
plt.xlabel('Iterations')
animation = FuncAnimation(plt.gcf(), animate, interval = 10)
plt.tight_layout()
plt.show()
| StarcoderdataPython |
3215700 | <gh_stars>1-10
__author__ = 'lorenzo'
#
# http://stackoverflow.com/a/29681061/2536357
#
from google.appengine.ext import vendor
# Add any libraries installed in the "lib" folder.
vendor.add('lib')
# run from the project root:
# pip install -t lib -r requirements.txt
# Uncomment if appstat is on
#def webapp_add_wsgi_middleware(app):
# from google.appengine.ext.appstats import recording
# app = recording.appstats_wsgi_middleware(app)
# return app
| StarcoderdataPython |
189666 | from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = 'Clean all entries for http451'
def add_arguments(self, parser):
pass
# parser.add_argument('dump', nargs='+', type=int)
def handle(self, *args, **options):
self.stdout.write(self.style.ERROR('Command not yet implemented'))
| StarcoderdataPython |
3221842 | <filename>yacg/model/model.py
# Attention, this file is generated. Manual changes get lost with the next
# run of the code generation.
# created by yacg (template: pythonBeans.mako v1.0.0)
from enum import Enum
class Type:
""" Dummy base class to implement strong typed references
"""
def __init__(self):
pass
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
return obj
class IntegerType (Type):
""" integer values
"""
def __init__(self):
super(Type, self).__init__()
#: integer values
self.format = None
#: integer values
self.default = None
#: integer values
self.minimum = None
#: integer values
self.exclusiveMinimum = None
#: integer values
self.maximum = None
#: integer values
self.exclusiveMaximum = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.format = IntegerTypeFormatEnum.valueForString(dict.get('format', None))
obj.default = dict.get('default', None)
obj.minimum = dict.get('minimum', None)
obj.exclusiveMinimum = dict.get('exclusiveMinimum', None)
obj.maximum = dict.get('maximum', None)
obj.exclusiveMaximum = dict.get('exclusiveMaximum', None)
return obj
class IntegerTypeFormatEnum(Enum):
INT32 = 'int32'
INT64 = 'int64'
@classmethod
def valueForString(cls, stringValue):
lowerStringValue = stringValue.lower() if stringValue is not None else None
if lowerStringValue is None:
return None
elif lowerStringValue == 'int32':
return IntegerTypeFormatEnum.INT32
elif lowerStringValue == 'int64':
return IntegerTypeFormatEnum.INT64
else:
return None
@classmethod
def valueAsString(cls, enumValue):
if enumValue is None:
return ''
elif enumValue == IntegerTypeFormatEnum.INT32:
return 'int32'
elif enumValue == IntegerTypeFormatEnum.INT64:
return 'int64'
else:
return ''
class NumberType (Type):
""" floating point values
"""
def __init__(self):
super(Type, self).__init__()
#: floating point values
self.format = None
#: floating point values
self.default = None
#: floating point values
self.minimum = None
#: floating point values
self.exclusiveMinimum = None
#: floating point values
self.maximum = None
#: floating point values
self.exclusiveMaximum = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.format = NumberTypeFormatEnum.valueForString(dict.get('format', None))
obj.default = dict.get('default', None)
obj.minimum = dict.get('minimum', None)
obj.exclusiveMinimum = dict.get('exclusiveMinimum', None)
obj.maximum = dict.get('maximum', None)
obj.exclusiveMaximum = dict.get('exclusiveMaximum', None)
return obj
class NumberTypeFormatEnum(Enum):
FLOAT = 'float'
DOUBLE = 'double'
@classmethod
def valueForString(cls, stringValue):
lowerStringValue = stringValue.lower() if stringValue is not None else None
if lowerStringValue is None:
return None
elif lowerStringValue == 'float':
return NumberTypeFormatEnum.FLOAT
elif lowerStringValue == 'double':
return NumberTypeFormatEnum.DOUBLE
else:
return None
@classmethod
def valueAsString(cls, enumValue):
if enumValue is None:
return ''
elif enumValue == NumberTypeFormatEnum.FLOAT:
return 'float'
elif enumValue == NumberTypeFormatEnum.DOUBLE:
return 'double'
else:
return ''
class BooleanType (Type):
""" boolean values
"""
def __init__(self):
super(Type, self).__init__()
#: boolean values
self.default = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.default = dict.get('default', None)
return obj
class StringType (Type):
""" integer values
"""
def __init__(self):
super(Type, self).__init__()
#: integer values
self.default = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.default = dict.get('default', None)
return obj
class UuidType (Type):
""" UUID values
"""
def __init__(self):
super(Type, self).__init__()
#: UUID values
self.default = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.default = dict.get('default', None)
return obj
class EnumType (Type):
""" type for enum values - fixed value types
"""
def __init__(self):
super(Type, self).__init__()
#: type for enum values - fixed value types
self.version = None
#: type for enum values - fixed value types
self.name = None
#: type for enum values - fixed value types
self.domain = None
#: type for enum values - fixed value types
self.source = None
#: type for enum values - fixed value types
self.description = None
#: type for enum values - fixed value types
self.values = []
#: type for enum values - fixed value types
self.default = None
#: type for enum values - fixed value types
self.tags = []
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.version = dict.get('version', None)
obj.name = dict.get('name', None)
obj.domain = dict.get('domain', None)
obj.source = dict.get('source', None)
obj.description = dict.get('description', None)
arrayValues = dict.get('values', [])
for elemValues in arrayValues:
obj.values.append(elemValues)
obj.default = dict.get('default', None)
arrayTags = dict.get('tags', [])
for elemTags in arrayTags:
obj.tags.append(
Tag.dictToObject(elemTags))
return obj
class Tag:
""" a tag type
"""
def __init__(self):
#: a tag type
self.name = None
#: a tag type
self.value = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.name = dict.get('name', None)
obj.value = dict.get('value', None)
return obj
class DateType (Type):
""" type for date values
"""
def __init__(self):
super(Type, self).__init__()
#: type for date values
self.default = None
#: type for date values
self.minimum = None
#: type for date values
self.exclusiveMinimum = None
#: type for date values
self.maximum = None
#: type for date values
self.exclusiveMaximum = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.default = dict.get('default', None)
obj.minimum = dict.get('minimum', None)
obj.exclusiveMinimum = dict.get('exclusiveMinimum', None)
obj.maximum = dict.get('maximum', None)
obj.exclusiveMaximum = dict.get('exclusiveMaximum', None)
return obj
class DateTimeType (Type):
""" type for timestamp values
"""
def __init__(self):
super(Type, self).__init__()
#: type for timestamp values
self.default = None
#: type for timestamp values
self.minimum = None
#: type for timestamp values
self.exclusiveMinimum = None
#: type for timestamp values
self.maximum = None
#: type for timestamp values
self.exclusiveMaximum = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.default = dict.get('default', None)
obj.minimum = dict.get('minimum', None)
obj.exclusiveMinimum = dict.get('exclusiveMinimum', None)
obj.maximum = dict.get('maximum', None)
obj.exclusiveMaximum = dict.get('exclusiveMaximum', None)
return obj
class BytesType (Type):
""" type for byte values, it will usually be rendered to a byte array
"""
def __init__(self):
super(Type, self).__init__()
#: type for byte values, it will usually be rendered to a byte array
self.default = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.default = dict.get('default', None)
return obj
class ComplexType (Type):
""" complex type description
"""
def __init__(self):
super(Type, self).__init__()
#: complex type description
self.version = None
#: complex type description
self.name = None
#: complex type description
self.description = None
#: complex type description
self.domain = None
#: complex type description
self.source = None
#: complex type description
self.extendsType = None
#: complex type description
self.extendedBy = []
#: complex type description
self.referencedBy = []
#: complex type description
self.properties = []
#: complex type description
self.tags = []
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.version = dict.get('version', None)
obj.name = dict.get('name', None)
obj.description = dict.get('description', None)
obj.domain = dict.get('domain', None)
obj.source = dict.get('source', None)
obj.extendsType = ComplexType.dictToObject(dict.get('extendsType', None))
arrayExtendedBy = dict.get('extendedBy', [])
for elemExtendedBy in arrayExtendedBy:
obj.extendedBy.append(
ComplexType.dictToObject(elemExtendedBy))
arrayReferencedBy = dict.get('referencedBy', [])
for elemReferencedBy in arrayReferencedBy:
obj.referencedBy.append(
ComplexType.dictToObject(elemReferencedBy))
arrayProperties = dict.get('properties', [])
for elemProperties in arrayProperties:
obj.properties.append(
Property.dictToObject(elemProperties))
arrayTags = dict.get('tags', [])
for elemTags in arrayTags:
obj.tags.append(
Tag.dictToObject(elemTags))
return obj
class Property:
""" a property of a type
"""
def __init__(self):
#: a property of a type
self.name = None
#: a property of a type
self.isArray = False
#: a property of a type
self.arrayMinItems = None
#: a property of a type
self.arrayMaxItems = None
#: a property of a type
self.arrayUniqueItems = None
#: a property of a type
self.type = None
#: a property of a type
self.tags = []
#: a property of a type
self.description = None
#: a property of a type
self.required = False
#: a property of a type
self.ordinal = None
#: a property of a type
self.isKey = False
#: a property of a type
self.isVisualKey = False
#: a property of a type
self.foreignKey = None
#: a property of a type
self.format = None
@classmethod
def dictToObject(cls, dict):
if dict is None:
return None
obj = cls()
obj.name = dict.get('name', None)
obj.isArray = dict.get('isArray', False)
obj.arrayMinItems = dict.get('arrayMinItems', None)
obj.arrayMaxItems = dict.get('arrayMaxItems', None)
obj.arrayUniqueItems = dict.get('arrayUniqueItems', None)
obj.type = Type.dictToObject(dict.get('type', None))
arrayTags = dict.get('tags', [])
for elemTags in arrayTags:
obj.tags.append(
Tag.dictToObject(elemTags))
obj.description = dict.get('description', None)
obj.required = dict.get('required', False)
obj.ordinal = dict.get('ordinal', None)
obj.isKey = dict.get('isKey', False)
obj.isVisualKey = dict.get('isVisualKey', False)
obj.foreignKey = Type.dictToObject(dict.get('foreignKey', None))
obj.format = dict.get('format', None)
return obj
| StarcoderdataPython |
1756294 | <reponame>realjf/ceph-board-py
from django.contrib import admin
from .models import Article
class ArticleAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'created_time',)
list_display_links = ('title',)
admin.site.register(Article, ArticleAdmin)
| StarcoderdataPython |
3348313 | from avatar_sgg.dataset.ade20k import get_ade20k_split
from avatar_sgg.config.util import get_config
from avatar_sgg.image_retrieval.evaluation import compute_similarity, compute_text_graph_similarity, \
compute_recall_on_category, compute_recall_johnson_feiefei, \
use_merged_sequence, run_evaluation
import numpy as np
import os
from avatar_sgg.dataset.ade20k import get_preprocessed_text_text_graphs_for_test
if __name__ == "__main__":
print("Start")
output_dir = os.path.join(get_config()["output_dir"], "image_retrieval")
current = get_preprocessed_text_text_graphs_for_test()
threshold_list = [None]
eval_name = lambda caption_type, recall_type: f"{caption_type}_{recall_type}"
ade20k_category_recall = "ade20k_category_recall"
fei_fei_recall = "feifei_johnson_recall"
text_scene_graph_query = "text_text_graph_query_9_epochs"
evaluation_name = eval_name(text_scene_graph_query, fei_fei_recall)
threshold_list.extend(np.linspace(0.85, 0.99, 15))
run_evaluation(evaluation_name, current, compute_text_graph_similarity, threshold_list,
compute_recall_johnson_feiefei,
output_dir)
evaluation_name = eval_name(text_scene_graph_query, ade20k_category_recall)
run_evaluation(evaluation_name, current, compute_text_graph_similarity, threshold_list, compute_recall_on_category,
output_dir)
print("Done")
| StarcoderdataPython |
198436 | <gh_stars>100-1000
# Copyright 2019-present Kensho Technologies, LLC.
import datetime
from typing import Tuple
import unittest
from graphql import print_ast
import pytest
from .. import test_input_data
from ...cost_estimation.analysis import analyze_query_string
from ...cost_estimation.statistics import LocalStatistics
from ...exceptions import GraphQLInvalidArgumentError
from ...global_utils import QueryStringWithParameters
from ...query_pagination import paginate_query
from ...query_pagination.pagination_planning import (
InsufficientQuantiles,
MissingClassCount,
PaginationAdvisory,
PaginationPlan,
VertexPartitionPlan,
get_pagination_plan,
)
from ...query_pagination.parameter_generator import (
_choose_parameter_values,
generate_parameters_for_vertex_partition,
)
from ...query_pagination.query_parameterizer import generate_parameterized_queries
from ...schema.schema_info import EdgeConstraint, QueryPlanningSchemaInfo, UUIDOrdering
from ...schema_generation.graphql_schema import get_graphql_schema_from_schema_graph
from ..test_helpers import compare_graphql, generate_schema_graph, get_function_names_from_module
from ..test_input_data import CommonTestData
# The following TestCase class uses the 'snapshot_orientdb_client' fixture
# which pylint does not recognize as a class member.
# pylint: disable=no-member
@pytest.mark.slow
class QueryPaginationTests(unittest.TestCase):
"""Test the query pagination module."""
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_planning_basic(self) -> None:
schema_graph = generate_schema_graph(self.orientdb_client) # type: ignore # from fixture
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Animal": 1000}
statistics = LocalStatistics(class_counts)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
# Check that the correct plan is generated when it's obvious (page the root)
query = QueryStringWithParameters(
"""{
Animal {
name @output(out_name: "animal_name")
}
}""",
{},
)
number_of_pages = 10
analysis = analyze_query_string(schema_info, query)
pagination_plan, advisories = get_pagination_plan(analysis, number_of_pages)
expected_plan = PaginationPlan((VertexPartitionPlan(("Animal",), "uuid", number_of_pages),))
expected_advisories: Tuple[PaginationAdvisory, ...] = tuple()
self.assertEqual([w.message for w in expected_advisories], [w.message for w in advisories])
self.assertEqual(expected_plan, pagination_plan)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_planning_invalid_extra_args(self) -> None:
schema_graph = generate_schema_graph(self.orientdb_client) # type: ignore # from fixture
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Animal": 1000}
statistics = LocalStatistics(class_counts)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
# Check that the correct plan is generated when it's obvious (page the root)
query = QueryStringWithParameters(
"""{
Animal {
name @output(out_name: "animal_name")
}
}""",
{"country": "USA"},
)
with self.assertRaises(GraphQLInvalidArgumentError):
number_of_pages = 10
analysis = analyze_query_string(schema_info, query)
get_pagination_plan(analysis, number_of_pages)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_planning_invalid_missing_args(self) -> None:
schema_graph = generate_schema_graph(self.orientdb_client) # type: ignore # from fixture
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Animal": 1000}
statistics = LocalStatistics(class_counts)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
# Check that the correct plan is generated when it's obvious (page the root)
query = QueryStringWithParameters(
"""{
Animal {
name @output(out_name: "animal_name")
@filter(op_name: "=", value: ["$animal_name"])
}
}""",
{},
)
with self.assertRaises(GraphQLInvalidArgumentError):
number_of_pages = 10
analysis = analyze_query_string(schema_info, query)
get_pagination_plan(analysis, number_of_pages)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_planning_unique_filter(self) -> None:
schema_graph = generate_schema_graph(self.orientdb_client) # type: ignore # from fixture
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Animal": 1000}
statistics = LocalStatistics(class_counts)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Animal {
uuid @filter(op_name: "=", value: ["$animal_uuid"])
name @output(out_name: "animal_name")
}
}""",
{
"animal_uuid": "40000000-0000-0000-0000-000000000000",
},
)
number_of_pages = 10
analysis = analyze_query_string(schema_info, query)
pagination_plan, advisories = get_pagination_plan(analysis, number_of_pages)
# This is a white box test. We check that we don't paginate on the root when it has a
# unique filter on it. A better plan is to paginate on a different vertex, but that is
# not implemented.
expected_plan = PaginationPlan(tuple())
expected_advisories: Tuple[PaginationAdvisory, ...] = tuple()
self.assertEqual([w.message for w in expected_advisories], [w.message for w in advisories])
self.assertEqual(expected_plan, pagination_plan)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_planning_unique_filter_on_many_to_one(self) -> None:
schema_graph = generate_schema_graph(self.orientdb_client) # type: ignore # from fixture
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {
"Animal": 1000,
"Animal_FedAt": 10000000,
"FeedingEvent": 100000,
}
statistics = LocalStatistics(class_counts)
edge_constraints = {"Animal_ParentOf": EdgeConstraint.AtMostOneSource}
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
edge_constraints=edge_constraints,
)
query = QueryStringWithParameters(
"""{
Animal {
name @output(out_name: "animal_name")
out_Animal_ParentOf {
uuid @filter(op_name: "=", value: ["$animal_uuid"])
}
out_Animal_FedAt {
name @output(out_name: "feeding_event_name")
}
}
}""",
{
"animal_uuid": "40000000-0000-0000-0000-000000000000",
},
)
number_of_pages = 10
analysis = analyze_query_string(schema_info, query)
pagination_plan, advisories = get_pagination_plan(analysis, number_of_pages)
# This is a white box test. There's a filter on the child, which narrows down
# the number of possible roots down to 1. This makes the root a bad pagination
# vertex. Ideally, we'd paginate on the FeedingEvent node, but that's not implemented.
expected_plan = PaginationPlan(tuple())
expected_advisories: Tuple[PaginationAdvisory, ...] = tuple()
self.assertEqual([w.message for w in expected_advisories], [w.message for w in advisories])
self.assertEqual(expected_plan, pagination_plan)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_planning_on_int(self) -> None:
schema_graph = generate_schema_graph(self.orientdb_client) # type: ignore # from fixture
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
class_counts = {"Species": 1000}
statistics = LocalStatistics(
class_counts, field_quantiles={("Species", "limbs"): list(range(100))}
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
# Check that the paginator generates a plan paginating on an int field
query = QueryStringWithParameters(
"""{
Species {
name @output(out_name: "species_name")
}
}""",
{},
)
number_of_pages = 10
analysis = analyze_query_string(schema_info, query)
pagination_plan, advisories = get_pagination_plan(analysis, number_of_pages)
expected_plan = PaginationPlan(
(VertexPartitionPlan(("Species",), "limbs", number_of_pages),)
)
expected_advisories: Tuple[PaginationAdvisory, ...] = ()
self.assertEqual([w.message for w in expected_advisories], [w.message for w in advisories])
self.assertEqual(expected_plan, pagination_plan)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_planning_on_int_error(self) -> None:
schema_graph = generate_schema_graph(self.orientdb_client) # type: ignore # from fixture
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
class_counts = {"Species": 1000}
statistics = LocalStatistics(class_counts)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
# Check that the paginator detects a lack of quantile data for Species.limbs
query = QueryStringWithParameters(
"""{
Species {
name @output(out_name: "species_name")
}
}""",
{},
)
number_of_pages = 10
analysis = analyze_query_string(schema_info, query)
pagination_plan, advisories = get_pagination_plan(analysis, number_of_pages)
expected_plan = PaginationPlan(tuple())
expected_advisories = (InsufficientQuantiles("Species", "limbs", 0, 51),)
self.assertEqual([w.message for w in expected_advisories], [w.message for w in advisories])
self.assertEqual(expected_plan, pagination_plan)
# TODO: These tests can be sped up by having an existing test SchemaGraph object.
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_basic_pagination(self) -> None:
"""Ensure a basic pagination query is handled correctly."""
schema_graph = generate_schema_graph(self.orientdb_client) # type: ignore # from fixture
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
query = QueryStringWithParameters(
"""{
Animal {
name @output(out_name: "animal")
}
}""",
{},
)
count_data = {
"Animal": 4,
}
statistics = LocalStatistics(count_data)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
first_page_and_remainder, _ = paginate_query(schema_info, query, 1)
first = first_page_and_remainder.one_page
remainder = first_page_and_remainder.remainder
expected_first = QueryStringWithParameters(
"""{
Animal {
uuid @filter(op_name: "<", value: ["$__paged_param_0"])
name @output(out_name: "animal")
}
}""",
{
"__paged_param_0": "40000000-0000-0000-0000-000000000000",
},
)
expected_remainder = QueryStringWithParameters(
"""{
Animal {
uuid @filter(op_name: ">=", value: ["$__paged_param_0"])
name @output(out_name: "animal")
}
}""",
{
"__paged_param_0": "40000000-0000-0000-0000-000000000000",
},
)
# Check that the correct first page and remainder are generated
compare_graphql(self, expected_first.query_string, first.query_string)
self.assertEqual(expected_first.parameters, first.parameters)
self.assertEqual(1, len(remainder))
compare_graphql(self, expected_remainder.query_string, remainder[0].query_string)
self.assertEqual(expected_remainder.parameters, remainder[0].parameters)
# Check that the first page is estimated to fit into a page
first_page_cardinality_estimate = analyze_query_string(
schema_info, first
).cardinality_estimate
self.assertAlmostEqual(1, first_page_cardinality_estimate)
# Get the second page
second_page_and_remainder, _ = paginate_query(schema_info, remainder[0], 1)
second = second_page_and_remainder.one_page
remainder = second_page_and_remainder.remainder
expected_second = QueryStringWithParameters(
"""{
Animal {
uuid @filter(op_name: ">=", value: ["$__paged_param_0"])
@filter(op_name: "<", value: ["$__paged_param_1"])
name @output(out_name: "animal")
}
}""",
{
"__paged_param_0": "40000000-0000-0000-0000-000000000000",
"__paged_param_1": "80000000-0000-0000-0000-000000000000",
},
)
expected_remainder = QueryStringWithParameters(
"""{
Animal {
uuid @filter(op_name: ">=", value: ["$__paged_param_1"])
name @output(out_name: "animal")
}
}""",
{
"__paged_param_1": "80000000-0000-0000-0000-000000000000",
},
)
# Check that the correct queries are generated
compare_graphql(self, expected_second.query_string, second.query_string)
self.assertEqual(expected_second.parameters, second.parameters)
self.assertEqual(1, len(remainder))
compare_graphql(self, expected_remainder.query_string, remainder[0].query_string)
self.assertEqual(expected_remainder.parameters, remainder[0].parameters)
# Check that the second page is estimated to fit into a page
second_page_cardinality_estimate = analyze_query_string(
schema_info, first
).cardinality_estimate
self.assertAlmostEqual(1, second_page_cardinality_estimate)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_basic_pagination_mssql_uuids(self) -> None:
"""Ensure a basic pagination query is handled correctly."""
schema_graph = generate_schema_graph(self.orientdb_client) # type: ignore # from fixture
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LastSixBytesFirst}
for vertex_name in schema_graph.vertex_class_names
}
query = QueryStringWithParameters(
"""{
Animal {
name @output(out_name: "animal")
}
}""",
{},
)
count_data = {
"Animal": 4,
}
statistics = LocalStatistics(count_data)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
first_page_and_remainder, _ = paginate_query(schema_info, query, 1)
first = first_page_and_remainder.one_page
remainder = first_page_and_remainder.remainder
expected_first = QueryStringWithParameters(
"""{
Animal {
uuid @filter(op_name: "<", value: ["$__paged_param_0"])
name @output(out_name: "animal")
}
}""",
{
"__paged_param_0": "00000000-0000-0000-0000-400000000000",
},
)
expected_remainder = QueryStringWithParameters(
"""{
Animal {
uuid @filter(op_name: ">=", value: ["$__paged_param_0"])
name @output(out_name: "animal")
}
}""",
{
"__paged_param_0": "00000000-0000-0000-0000-400000000000",
},
)
# Check that the correct first page and remainder are generated
compare_graphql(self, expected_first.query_string, first.query_string)
self.assertEqual(expected_first.parameters, first.parameters)
self.assertEqual(1, len(remainder))
compare_graphql(self, expected_remainder.query_string, remainder[0].query_string)
self.assertEqual(expected_remainder.parameters, remainder[0].parameters)
# Check that the first page is estimated to fit into a page
first_page_cardinality_estimate = analyze_query_string(
schema_info, first
).cardinality_estimate
self.assertAlmostEqual(1, first_page_cardinality_estimate)
# Get the second page
second_page_and_remainder, _ = paginate_query(schema_info, remainder[0], 1)
second = second_page_and_remainder.one_page
remainder = second_page_and_remainder.remainder
expected_second = QueryStringWithParameters(
"""{
Animal {
uuid @filter(op_name: ">=", value: ["$__paged_param_0"])
@filter(op_name: "<", value: ["$__paged_param_1"])
name @output(out_name: "animal")
}
}""",
{
"__paged_param_0": "00000000-0000-0000-0000-400000000000",
"__paged_param_1": "00000000-0000-0000-0000-800000000000",
},
)
expected_remainder = QueryStringWithParameters(
"""{
Animal {
uuid @filter(op_name: ">=", value: ["$__paged_param_1"])
name @output(out_name: "animal")
}
}""",
{
"__paged_param_1": "00000000-0000-0000-0000-800000000000",
},
)
# Check that the correct queries are generated
compare_graphql(self, expected_second.query_string, second.query_string)
self.assertEqual(expected_second.parameters, second.parameters)
self.assertEqual(1, len(remainder))
compare_graphql(self, expected_remainder.query_string, remainder[0].query_string)
self.assertEqual(expected_remainder.parameters, remainder[0].parameters)
# Check that the second page is estimated to fit into a page
second_page_cardinality_estimate = analyze_query_string(
schema_info, first
).cardinality_estimate
self.assertAlmostEqual(1, second_page_cardinality_estimate)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_datetime(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Event"] = ("event_date",) # Force pagination on datetime field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Event": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={
("Event", "event_date"): [datetime.datetime(2000 + i, 1, 1) for i in range(101)],
},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Event {
name @output(out_name: "event_name")
}
}""",
{},
)
first_page_and_remainder, _ = paginate_query(schema_info, query, 100)
first = first_page_and_remainder.one_page
remainder = first_page_and_remainder.remainder
# There are 1000 dates uniformly spread out between year 2000 and 3000, so to get
# 100 results, we stop at 2010.
expected_page_query = QueryStringWithParameters(
"""{
Event {
event_date @filter(op_name: "<", value: ["$__paged_param_0"])
name @output(out_name: "event_name")
}
}""",
{
"__paged_param_0": datetime.datetime(2010, 1, 1, 0, 0),
},
)
expected_remainder_query = QueryStringWithParameters(
"""{
Event {
event_date @filter(op_name: ">=", value: ["$__paged_param_0"])
name @output(out_name: "event_name")
}
}""",
{
"__paged_param_0": datetime.datetime(2010, 1, 1, 0, 0),
},
)
# Check that the correct queries are generated
compare_graphql(self, expected_page_query.query_string, first.query_string)
self.assertEqual(expected_page_query.parameters, first.parameters)
self.assertEqual(1, len(remainder))
compare_graphql(self, expected_remainder_query.query_string, remainder[0].query_string)
self.assertEqual(expected_remainder_query.parameters, remainder[0].parameters)
# Get the second page
second_page_and_remainder, _ = paginate_query(schema_info, remainder[0], 100)
second = second_page_and_remainder.one_page
remainder = second_page_and_remainder.remainder
expected_page_query = QueryStringWithParameters(
"""{
Event {
event_date @filter(op_name: ">=", value: ["$__paged_param_0"])
@filter(op_name: "<", value: ["$__paged_param_1"])
name @output(out_name: "event_name")
}
}""",
{
# TODO parameters seem wonky
"__paged_param_0": datetime.datetime(2010, 1, 1, 0, 0),
"__paged_param_1": datetime.datetime(2019, 1, 1, 0, 0),
},
)
expected_remainder_query = QueryStringWithParameters(
"""{
Event {
event_date @filter(op_name: ">=", value: ["$__paged_param_1"])
name @output(out_name: "event_name")
}
}""",
{
"__paged_param_1": datetime.datetime(2019, 1, 1, 0, 0),
},
)
# Check that the correct queries are generated
compare_graphql(self, expected_page_query.query_string, second.query_string)
self.assertEqual(expected_page_query.parameters, second.parameters)
self.assertEqual(1, len(remainder))
compare_graphql(self, expected_remainder_query.query_string, remainder[0].query_string)
self.assertEqual(expected_remainder_query.parameters, remainder[0].parameters)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_datetime_existing_filter(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
# We allow pagination on uuid as well and leave it to the pagination planner to decide to
# paginate on event_date to prevent empty pages if the two fields are correlated.
pagination_keys["Event"] = ("uuid", "event_date")
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Event": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={
("Event", "event_date"): [datetime.datetime(2000 + i, 1, 1) for i in range(101)],
},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
local_datetime = datetime.datetime(2050, 1, 1, 0, 0)
query = QueryStringWithParameters(
"""{
Event {
name @output(out_name: "event_name")
event_date @filter(op_name: ">=", value: ["$date_lower"])
}
}""",
{"date_lower": local_datetime},
)
first_page_and_remainder, _ = paginate_query(schema_info, query, 100)
first = first_page_and_remainder.one_page
remainder = first_page_and_remainder.remainder
# There are 1000 dates uniformly spread out between year 2000 and 3000, so to get
# 100 results after 2050, we stop at 2059.
expected_page_query = QueryStringWithParameters(
"""{
Event {
name @output(out_name: "event_name")
event_date @filter(op_name: ">=", value: ["$date_lower"])
@filter(op_name: "<", value: ["$__paged_param_0"])
}
}""",
{
"date_lower": local_datetime,
"__paged_param_0": datetime.datetime(2059, 1, 1, 0, 0),
},
)
expected_remainder_query = QueryStringWithParameters(
"""{
Event {
name @output(out_name: "event_name")
event_date @filter(op_name: ">=", value: ["$__paged_param_0"])
}
}""",
{
"__paged_param_0": datetime.datetime(2059, 1, 1, 0, 0),
},
)
# Check that the correct queries are generated
compare_graphql(self, expected_page_query.query_string, first.query_string)
self.assertEqual(expected_page_query.parameters, first.parameters)
self.assertEqual(1, len(remainder))
compare_graphql(self, expected_remainder_query.query_string, remainder[0].query_string)
self.assertEqual(expected_remainder_query.parameters, remainder[0].parameters)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_existing_datetime_filter(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Event"] = ("event_date",) # Force pagination on datetime field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Event": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={
("Event", "event_date"): [datetime.datetime(2000 + i, 1, 1) for i in range(101)],
},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Event {
name @output(out_name: "event_name")
event_date @filter(op_name: ">=", value: ["$date_lower"])
}
}""",
{"date_lower": datetime.datetime(2050, 1, 1, 0, 0)},
)
first_page_and_remainder, _ = paginate_query(schema_info, query, 100)
first = first_page_and_remainder.one_page
remainder = first_page_and_remainder.remainder
# We can't expect anything good when using a tz-aware filter on a tz-naive
# field, but at least we shouldn't error. The current implementation ignores
# the timezone, so this is a white-box test for that behavior.
expected_page_query = QueryStringWithParameters(
"""{
Event {
name @output(out_name: "event_name")
event_date @filter(op_name: ">=", value: ["$date_lower"])
@filter(op_name: "<", value: ["$__paged_param_0"])
}
}""",
{
"date_lower": datetime.datetime(2050, 1, 1, 0, 0),
"__paged_param_0": datetime.datetime(2059, 1, 1, 0, 0),
},
)
expected_remainder_query = QueryStringWithParameters(
"""{
Event {
name @output(out_name: "event_name")
event_date @filter(op_name: ">=", value: ["$__paged_param_0"])
}
}""",
{
"__paged_param_0": datetime.datetime(2059, 1, 1, 0, 0),
},
)
# Check that the correct queries are generated
compare_graphql(self, expected_page_query.query_string, first.query_string)
self.assertEqual(expected_page_query.parameters, first.parameters)
self.assertEqual(1, len(remainder))
compare_graphql(self, expected_remainder_query.query_string, remainder[0].query_string)
self.assertEqual(expected_remainder_query.parameters, remainder[0].parameters)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_parameter_value_generation_int(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={
("Species", "limbs"): [i for i in range(101)],
},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
name @output(out_name: "species_name")
}
}""",
{},
)
analysis = analyze_query_string(schema_info, query)
vertex_partition = VertexPartitionPlan(("Species",), "limbs", 4)
generated_parameters = generate_parameters_for_vertex_partition(analysis, vertex_partition)
expected_parameters = [25, 50, 75]
self.assertEqual(expected_parameters, list(generated_parameters))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_parameter_value_generation_int_few_quantiles(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 10000000}
statistics = LocalStatistics(
class_counts,
field_quantiles={
("Species", "limbs"): [
0,
10,
20,
30,
],
},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
name @output(out_name: "species_name")
}
}""",
{},
)
analysis = analyze_query_string(schema_info, query)
vertex_partition = VertexPartitionPlan(("Species",), "limbs", 3)
generated_parameters = generate_parameters_for_vertex_partition(analysis, vertex_partition)
expected_parameters = [10, 20]
self.assertEqual(expected_parameters, list(generated_parameters))
def test_choose_parameter_values(self):
self.assertEqual([1], list(_choose_parameter_values([1], 2)))
self.assertEqual([1], list(_choose_parameter_values([1], 3)))
self.assertEqual([1], list(_choose_parameter_values([1, 1], 3)))
self.assertEqual([3], list(_choose_parameter_values([1, 3], 2)))
self.assertEqual([1, 3], list(_choose_parameter_values([1, 3], 3)))
self.assertEqual([1, 3], list(_choose_parameter_values([1, 3], 4)))
self.assertEqual([3], list(_choose_parameter_values([1, 3, 5], 2)))
self.assertEqual([3, 5], list(_choose_parameter_values([1, 3, 5], 3)))
self.assertEqual([1, 3, 5], list(_choose_parameter_values([1, 3, 5], 4)))
self.assertEqual([1, 3, 5], list(_choose_parameter_values([1, 3, 5], 5)))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_parameter_value_generation_int_existing_filters(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={
("Species", "limbs"): [i for i in range(101)],
},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
name @output(out_name: "species_name")
limbs @filter(op_name: ">=", value: ["$limbs_lower"])
}
}""",
{"limbs_lower": 25},
)
analysis = analyze_query_string(schema_info, query)
vertex_partition = VertexPartitionPlan(("Species",), "limbs", 3)
generated_parameters = generate_parameters_for_vertex_partition(analysis, vertex_partition)
expected_parameters = [50, 75]
self.assertEqual(expected_parameters, list(generated_parameters))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_parameter_value_generation_int_existing_filter_tiny_page(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={("Species", "limbs"): list(range(0, 101, 10))},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
name @output(out_name: "species_name")
limbs @filter(op_name: ">=", value: ["$limbs_lower"])
}
}""",
{"limbs_lower": 10},
)
analysis = analyze_query_string(schema_info, query)
vertex_partition = VertexPartitionPlan(("Species",), "limbs", 10)
generated_parameters = generate_parameters_for_vertex_partition(analysis, vertex_partition)
first_parameter = next(generated_parameters)
self.assertTrue(first_parameter > 10)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_parameter_value_generation_int_existing_filters_2(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={
("Species", "limbs"): [i for i in range(101)],
},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
name @output(out_name: "species_name")
limbs @filter(op_name: "<", value: ["$limbs_upper"])
}
}""",
{"limbs_upper": 76},
)
analysis = analyze_query_string(schema_info, query)
vertex_partition = VertexPartitionPlan(("Species",), "limbs", 3)
generated_parameters = generate_parameters_for_vertex_partition(analysis, vertex_partition)
expected_parameters = [25, 50]
self.assertEqual(expected_parameters, list(generated_parameters))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_parameter_value_generation_inline_fragment(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={
("Species", "limbs"): [i for i in range(101)],
},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
out_Entity_Related {
... on Species {
name @output(out_name: "species_name")
}
}
}
}""",
{},
)
analysis = analyze_query_string(schema_info, query)
vertex_partition = VertexPartitionPlan(("Species", "out_Entity_Related"), "limbs", 4)
generated_parameters = generate_parameters_for_vertex_partition(analysis, vertex_partition)
expected_parameters = [25, 50, 75]
self.assertEqual(expected_parameters, list(generated_parameters))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_parameter_value_generation_with_existing_filters(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 1000}
statistics = LocalStatistics(
class_counts, field_quantiles={("Species", "limbs"): list(range(0, 1001, 10))}
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
limbs @filter(op_name: "<", value: ["$num_limbs"])
name @output(out_name: "species_name")
}
}""",
{"num_limbs": 505},
)
analysis = analyze_query_string(schema_info, query)
vertex_partition = VertexPartitionPlan(("Species",), "limbs", 4)
generated_parameters = generate_parameters_for_vertex_partition(analysis, vertex_partition)
# XXX document why this is expected, see if bisect_left logic is correct
expected_parameters = [130, 260, 390]
self.assertEqual(expected_parameters, list(generated_parameters))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_parameter_value_generation_datetime(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Event"] = ("event_date",) # Force pagination on datetime field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Event": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={
("Event", "event_date"): [datetime.datetime(2000 + i, 1, 1) for i in range(101)],
},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Event {
name @output(out_name: "event_name")
}
}""",
{},
)
analysis = analyze_query_string(schema_info, query)
vertex_partition = VertexPartitionPlan(("Event",), "event_date", 4)
generated_parameters = generate_parameters_for_vertex_partition(analysis, vertex_partition)
expected_parameters = [
datetime.datetime(2025, 1, 1, 0, 0),
datetime.datetime(2050, 1, 1, 0, 0),
datetime.datetime(2075, 1, 1, 0, 0),
]
self.assertEqual(expected_parameters, list(generated_parameters))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_parameter_value_generation_uuid(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Animal": 1000}
statistics = LocalStatistics(class_counts)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Animal {
name @output(out_name: "animal_name")
}
}""",
{},
)
analysis = analyze_query_string(schema_info, query)
vertex_partition = VertexPartitionPlan(("Animal",), "uuid", 4)
generated_parameters = generate_parameters_for_vertex_partition(analysis, vertex_partition)
expected_parameters = [
"40000000-0000-0000-0000-000000000000",
"80000000-0000-0000-0000-000000000000",
"c0000000-0000-0000-0000-000000000000",
]
self.assertEqual(expected_parameters, list(generated_parameters))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_parameter_value_generation_mssql_uuid(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LastSixBytesFirst}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Animal": 1000}
statistics = LocalStatistics(class_counts)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Animal {
name @output(out_name: "animal_name")
}
}""",
{},
)
analysis = analyze_query_string(schema_info, query)
vertex_partition = VertexPartitionPlan(("Animal",), "uuid", 4)
generated_parameters = generate_parameters_for_vertex_partition(analysis, vertex_partition)
expected_parameters = [
"00000000-0000-0000-0000-400000000000",
"00000000-0000-0000-0000-800000000000",
"00000000-0000-0000-0000-c00000000000",
]
self.assertEqual(expected_parameters, list(generated_parameters))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_parameter_value_generation_mssql_uuid_with_existing_filter(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LastSixBytesFirst}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Animal": 1000}
statistics = LocalStatistics(class_counts)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Animal {
uuid @filter(op_name: ">=", value: ["$uuid_lower"])
name @output(out_name: "animal_name")
}
}""",
{
"uuid_lower": "00000000-0000-0000-0000-800000000000",
},
)
analysis = analyze_query_string(schema_info, query)
vertex_partition = VertexPartitionPlan(("Animal",), "uuid", 4)
generated_parameters = generate_parameters_for_vertex_partition(analysis, vertex_partition)
expected_parameters = [
"00000000-0000-0000-0000-a00000000000",
"00000000-0000-0000-0000-c00000000000",
"00000000-0000-0000-0000-e00000000000",
]
self.assertEqual(expected_parameters, list(generated_parameters))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_parameter_value_generation_consecutive(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={("Species", "limbs"): [0 for i in range(1000)] + list(range(101))},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
name @output(out_name: "species_name")
}
}""",
{},
)
analysis = analyze_query_string(schema_info, query)
vertex_partition = VertexPartitionPlan(("Species",), "limbs", 4)
generated_parameters = generate_parameters_for_vertex_partition(analysis, vertex_partition)
# Check that there are no duplicates
list_parameters = list(generated_parameters)
self.assertEqual(len(list_parameters), len(set(list_parameters)))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_query_parameterizer(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={("Species", "limbs"): [0 for i in range(1000)] + list(range(101))},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
name @output(out_name: "species_name")
}
}""",
{},
)
vertex_partition = VertexPartitionPlan(("Species",), "limbs", 4)
analysis = analyze_query_string(schema_info, query)
next_page, remainder = generate_parameterized_queries(analysis, vertex_partition, 100)
expected_next_page = """{
Species {
limbs @filter(op_name: "<", value: ["$__paged_param_0"])
name @output(out_name: "species_name")
}
}"""
expected_remainder = """{
Species {
limbs @filter(op_name: ">=", value: ["$__paged_param_0"])
name @output(out_name: "species_name")
}
}"""
compare_graphql(self, expected_next_page, print_ast(next_page.query_ast))
compare_graphql(self, expected_remainder, print_ast(remainder.query_ast))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_query_parameterizer_name_conflict(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={("Species", "limbs"): [0 for i in range(1000)] + list(range(101))},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
name @output(out_name: "species_name")
@filter(op_name: "!=", value: ["$__paged_param_0"])
}
}""",
{"__paged_param_0": "Cow"},
)
vertex_partition = VertexPartitionPlan(("Species",), "limbs", 4)
analysis = analyze_query_string(schema_info, query)
next_page, remainder = generate_parameterized_queries(analysis, vertex_partition, 100)
expected_next_page = """{
Species {
limbs @filter(op_name: "<", value: ["$__paged_param_1"])
name @output(out_name: "species_name")
@filter(op_name: "!=", value: ["$__paged_param_0"])
}
}"""
expected_remainder = """{
Species {
limbs @filter(op_name: ">=", value: ["$__paged_param_1"])
name @output(out_name: "species_name")
@filter(op_name: "!=", value: ["$__paged_param_0"])
}
}"""
compare_graphql(self, expected_next_page, print_ast(next_page.query_ast))
compare_graphql(self, expected_remainder, print_ast(remainder.query_ast))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_query_parameterizer_filter_deduplication(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={("Species", "limbs"): [0 for i in range(1000)] + list(range(101))},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
limbs @filter(op_name: ">=", value: ["$limbs_more_than"])
name @output(out_name: "species_name")
}
}""",
{
"limbs_more_than": 100,
},
)
vertex_partition = VertexPartitionPlan(("Species",), "limbs", 4)
analysis = analyze_query_string(schema_info, query)
next_page, remainder = generate_parameterized_queries(analysis, vertex_partition, 100)
expected_next_page = """{
Species {
limbs @filter(op_name: ">=", value: ["$limbs_more_than"])
@filter(op_name: "<", value: ["$__paged_param_0"])
name @output(out_name: "species_name")
}
}"""
expected_remainder = """{
Species {
limbs @filter(op_name: ">=", value: ["$__paged_param_0"])
name @output(out_name: "species_name")
}
}"""
compare_graphql(self, expected_next_page, print_ast(next_page.query_ast))
compare_graphql(self, expected_remainder, print_ast(remainder.query_ast))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_no_pagination(self):
"""Ensure pagination is not done when not needed."""
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
original_query = QueryStringWithParameters(
"""{
Animal {
name @output(out_name: "animal")
}
}""",
{},
)
count_data = {
"Animal": 4,
}
statistics = LocalStatistics(count_data)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
first_page_and_remainder, _ = paginate_query(schema_info, original_query, 10)
first = first_page_and_remainder.one_page
remainder = first_page_and_remainder.remainder
# No pagination necessary
compare_graphql(self, original_query.query_string, first.query_string)
self.assertEqual(original_query.parameters, first.parameters)
self.assertEqual(0, len(remainder))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_impossible_pagination(self):
"""Ensure no unwanted error is raised when pagination is needed but stats are missing."""
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {} # No pagination keys, so the planner has no options
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
original_query = QueryStringWithParameters(
"""{
Animal {
name @output(out_name: "animal")
}
}""",
{},
)
count_data = {
"Animal": 100000,
}
statistics = LocalStatistics(count_data)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
first_page_and_remainder, _ = paginate_query(schema_info, original_query, 10)
first = first_page_and_remainder.one_page
remainder = first_page_and_remainder.remainder
# Query should be split, but there's no viable pagination method.
compare_graphql(self, original_query.query_string, first.query_string)
self.assertEqual(original_query.parameters, first.parameters)
self.assertEqual(0, len(remainder))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_impossible_pagination_strong_filters_few_repeated_quantiles(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 1000000000000}
statistics = LocalStatistics(
class_counts,
field_quantiles={
("Species", "limbs"): list(i for i in range(0, 101, 10) for _ in range(10000))
},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
name @output(out_name: "species_name")
limbs @filter(op_name: "between", value: ["$limbs_lower", "$limbs_upper"])
}
}""",
{
"limbs_lower": 10,
"limbs_upper": 14,
},
)
first_page_and_remainder, _ = paginate_query(schema_info, query, 10)
first = first_page_and_remainder.one_page
remainder = first_page_and_remainder.remainder
# Query should be split, but there's not enough quantiles
compare_graphql(self, query.query_string, first.query_string)
self.assertEqual(query.parameters, first.parameters)
self.assertEqual(0, len(remainder))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_impossible_pagination_strong_filters_few_quantiles(self):
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 1000000000000}
statistics = LocalStatistics(
class_counts,
field_quantiles={
("Species", "limbs"): list(i for i in range(0, 101, 10) for _ in range(10000))
},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
name @output(out_name: "species_name")
limbs @filter(op_name: "between", value: ["$limbs_lower", "$limbs_upper"])
}
}""",
{
"limbs_lower": 10,
"limbs_upper": 14,
},
)
first_page_and_remainder, _ = paginate_query(schema_info, query, 10)
first = first_page_and_remainder.one_page
remainder = first_page_and_remainder.remainder
# Query should be split, but there's not enough quantiles
compare_graphql(self, query.query_string, first.query_string)
self.assertEqual(query.parameters, first.parameters)
self.assertEqual(0, len(remainder))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_with_compiler_tests(self):
"""Test that pagination doesn't crash on any of the queries from the compiler tests."""
schema_graph = generate_schema_graph(self.orientdb_client)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
count_data = {vertex_name: 100 for vertex_name in schema_graph.vertex_class_names}
count_data.update({edge_name: 100 for edge_name in schema_graph.edge_class_names})
statistics = LocalStatistics(count_data)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
arbitrary_value_for_type = {
"String": "string_1",
"ID": "40000000-0000-0000-0000-000000000000",
"Int": 5,
"Date": datetime.date(2000, 1, 1),
"DateTime": datetime.datetime(2000, 1, 1),
"Decimal": 5.3,
"[String]": ["string_1", "string_2"],
}
for test_name in get_function_names_from_module(test_input_data):
method = getattr(test_input_data, test_name)
if hasattr(method, "__annotations__"):
output_type = method.__annotations__.get("return")
if output_type == CommonTestData:
test_data = method()
query = test_data.graphql_input
args = {
arg_name: arbitrary_value_for_type[str(arg_type)]
for arg_name, arg_type in test_data.expected_input_metadata.items()
}
paginate_query(schema_info, QueryStringWithParameters(query, args), 10)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_missing_vertex_class_count(self) -> None:
"""Ensure a basic pagination query is handled correctly."""
schema_graph = generate_schema_graph(self.orientdb_client) # type: ignore # from fixture
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
query = QueryStringWithParameters(
"""{
Animal {
name @output(out_name: "animal")
}
}""",
{},
)
# No class counts provided
statistics = LocalStatistics({})
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
first_page_and_remainder, advisories = paginate_query(schema_info, query, 1)
self.assertTrue(first_page_and_remainder.remainder == tuple())
self.assertEqual(advisories, (MissingClassCount("Animal"),))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_missing_non_root_vertex_class_count(self) -> None:
"""Ensure a basic pagination query is handled correctly."""
schema_graph = generate_schema_graph(self.orientdb_client) # type: ignore # from fixture
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
query = QueryStringWithParameters(
"""{
Animal {
out_Animal_LivesIn {
name @output(out_name: "animal")
}
}
}""",
{},
)
# No counts for Location
count_data = {
"Animal": 1000,
"Animal_LivesIn": 1000,
}
statistics = LocalStatistics(count_data)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
first_page_and_remainder, advisories = paginate_query(schema_info, query, 1)
self.assertTrue(first_page_and_remainder.remainder == tuple())
self.assertEqual(advisories, (MissingClassCount("Location"),))
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_missing_edge_class_count(self) -> None:
"""Ensure a basic pagination query is handled correctly."""
schema_graph = generate_schema_graph(self.orientdb_client) # type: ignore # from fixture
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
query = QueryStringWithParameters(
"""{
Animal {
out_Animal_LivesIn {
name @output(out_name: "animal")
}
}
}""",
{},
)
# No counts for Animal_LivesIn
count_data = {
"Animal": 1000,
"Location": 10000,
}
statistics = LocalStatistics(count_data)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
first_page_and_remainder, advisories = paginate_query(schema_info, query, 1)
self.assertTrue(first_page_and_remainder.remainder == tuple())
self.assertEqual(advisories, (MissingClassCount("Animal_LivesIn"),))
@pytest.mark.xfail(strict=True, reason="inline fragment not supported", raises=Exception)
@pytest.mark.usefixtures("snapshot_orientdb_client")
def test_pagination_with_inline_fragment(self) -> None:
schema_graph = generate_schema_graph(self.orientdb_client) # type: ignore # from fixture
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(schema_graph)
pagination_keys = {
vertex_name: ("uuid",) for vertex_name in schema_graph.vertex_class_names
}
pagination_keys["Species"] = ("limbs",) # Force pagination on int field
uuid4_field_info = {
vertex_name: {"uuid": UUIDOrdering.LeftToRight}
for vertex_name in schema_graph.vertex_class_names
}
class_counts = {"Species": 1000}
statistics = LocalStatistics(
class_counts,
field_quantiles={("Species", "limbs"): list(range(100))},
)
schema_info = QueryPlanningSchemaInfo(
schema=graphql_schema,
type_equivalence_hints=type_equivalence_hints,
schema_graph=schema_graph,
statistics=statistics,
pagination_keys=pagination_keys,
uuid4_field_info=uuid4_field_info,
)
query = QueryStringWithParameters(
"""{
Species {
out_Entity_Related {
... on Species {
name @output(out_name: "species_name")
}
}
}
}""",
{},
)
analysis = analyze_query_string(schema_info, query)
vertex_partition_plan = VertexPartitionPlan(("Species", "out_Entity_Related"), "limbs", 2)
generated_parameters = generate_parameters_for_vertex_partition(
analysis, vertex_partition_plan
)
sentinel = object()
first_param = next(generated_parameters, sentinel)
self.assertEqual(50, first_param)
page_query, _ = generate_parameterized_queries(analysis, vertex_partition_plan, first_param)
expected_page_query_string = """{
Species {
out_Entity_Related {
... on Species {
limbs @filter(op_name: "<", value: ["$__paged_param_0"])
name @output(out_name: "species_name")
}
}
}
}"""
compare_graphql(self, expected_page_query_string, print_ast(page_query.query_ast))
| StarcoderdataPython |
3381924 | import operator
import django.urls
import ipware.ip
import ipware.ip
from django.contrib.auth.decorators import login_required
from django.db import transaction
from django.db.models import Prefetch, Min
from django.http.response import (HttpResponseNotFound,
JsonResponse,
HttpResponseForbidden)
from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.cache import cache_page
from django.views.decorators.http import require_POST
import frontend.icons
import frontend.table
import modules.ejudge.queue
import sistema.helpers
import sistema.uploads
import users.models
from frontend.table.utils import TableDataSource
from . import forms
from . import models
from . import upgrades
def get_entrance_level_and_tasks(school, user):
base_level = upgrades.get_base_entrance_level(school, user)
tasks = upgrades.get_entrance_tasks(school, user, base_level)
return base_level, tasks
class EntrancedUsersTable(frontend.table.Table):
index = frontend.table.IndexColumn(verbose_name='')
name = frontend.table.Column(
accessor='get_full_name',
verbose_name='Имя',
order_by=('profile.last_name',
'profile.first_name'),
search_in=('profile.first_name',
'profile.last_name'))
city = frontend.table.Column(
accessor='profile.city',
orderable=True,
searchable=True,
verbose_name='Город')
school_and_class = frontend.table.Column(
accessor='profile',
search_in='profile.school_name',
verbose_name='Школа и класс')
session = frontend.table.Column(
accessor='entrance_statuses',
verbose_name='Смена')
parallel = frontend.table.Column(
accessor='entrance_statuses',
verbose_name='Параллель')
enrolled_status = frontend.table.Column(
empty_values=(),
verbose_name='Статус')
class Meta:
icon = frontend.icons.FaIcon('check')
# TODO: title depending on school
title = 'Поступившие'
pagination = False
def __init__(self, school, *args, **kwargs):
qs = users.models.User.objects.filter(
entrance_statuses__school=school,
entrance_statuses__status=models.EntranceStatus.Status.ENROLLED,
entrance_statuses__is_status_visible=True,
).annotate(
min_session=Min('entrance_statuses__sessions_and_parallels__session__name'),
min_parallel=Min('entrance_statuses__sessions_and_parallels__parallel__name')
).order_by(
'min_session',
'min_parallel',
'profile__last_name',
'profile__first_name',
).select_related('profile').prefetch_related(
Prefetch(
'entrance_statuses',
models.EntranceStatus.objects.filter(school=school)),
Prefetch(
'absence_reasons',
models.AbstractAbsenceReason.objects.filter(school=school)),
)
super().__init__(
qs,
django.urls.reverse('school:entrance:results_data',
args=[school.short_name]),
*args, **kwargs)
def render_school_and_class(self, value):
parts = []
if value.school_name:
parts.append(value.school_name)
if value.current_class is not None:
parts.append(str(value.current_class) + ' класс')
return ', '.join(parts)
def render_session(self, value):
# TODO: will it be filtered?
status = value.get()
sessions_and_parallels = status.sessions_and_parallels.all()
selected_session = sessions_and_parallels.filter(selected_by_user=True).first()
if selected_session is not None:
return selected_session.session.name if selected_session.session else ''
return ', '.join(set(
sessions_and_parallels
.filter(session__isnull=False)
.order_by('session_id')
.values_list('session__name', flat=True)
))
def render_parallel(self, value):
# TODO: will it be filtered?
status = value.get()
sessions_and_parallels = status.sessions_and_parallels.all()
selected_parallel = sessions_and_parallels.filter(selected_by_user=True).first()
if selected_parallel is not None:
return selected_parallel.parallel.name if selected_parallel.parallel else ''
return ', '.join(set(
sessions_and_parallels
.filter(parallel__isnull=False)
.order_by('parallel_id')
.values_list('parallel__name', flat=True)
))
def render_enrolled_status(self, record):
absence_reasons = record.absence_reasons.all()
absence_reason = absence_reasons[0] if absence_reasons else None
if absence_reason is not None:
return str(absence_reason)
entrance_status = record.entrance_statuses.get()
if not entrance_status.is_approved:
return 'Участие не подтверждено'
return ''
@login_required
def exam(request, selected_task_id=None):
entrance_exam = get_object_or_404(
models.EntranceExam,
school=request.school
)
is_closed = entrance_exam.is_closed()
base_level, tasks = get_entrance_level_and_tasks(request.school, request.user)
# Order task by type and order
tasks = sorted(tasks, key=lambda t: (t.type_title, t.order))
for task in tasks:
task.user_solutions = list(
task.solutions.filter(user=request.user).order_by('-created_at'))
task.is_accepted = task.is_accepted_for_user(request.user)
task.is_solved = task.is_solved_by_user(request.user)
task.form = task.get_form_for_user(request.user)
if selected_task_id is None and len(tasks) > 0:
selected_task_id = tasks[0].id
try:
selected_task_id = int(selected_task_id)
except ValueError:
selected_task_id = None
categories = list(sorted(
{task.category for task in tasks},
key=operator.attrgetter('order'),
))
for category in categories:
category.is_started = category.is_started_for_user(request.user)
category.is_finished = category.is_finished_for_user(request.user)
categories_with_tasks = [
(category, [task for task in tasks if task.category == category])
for category in categories
]
return render(request, 'entrance/exam.html', {
'is_closed': is_closed,
'entrance_level': base_level,
'school': request.school,
'categories_with_tasks': categories_with_tasks,
'is_user_at_maximum_level': upgrades.is_user_at_maximum_level(
request.school,
request.user,
base_level
),
'can_upgrade': not is_closed and upgrades.can_user_upgrade(
request.school,
request.user,
base_level
),
'selected_task_id': selected_task_id
})
@login_required
def task(request, task_id):
return exam(request, task_id)
@login_required
@require_POST
def submit(request, task_id):
entrance_exam = get_object_or_404(models.EntranceExam, school=request.school)
task = get_object_or_404(models.EntranceExamTask, pk=task_id)
if task.exam_id != entrance_exam.id:
return HttpResponseNotFound()
is_closed = (
entrance_exam.is_closed() or
task.category.is_finished_for_user(request.user))
ip = ipware.ip.get_ip(request) or ''
form = task.get_form_for_user(request.user, data=request.POST, files=request.FILES)
# TODO (andgein): extract this logic to models
if type(task) is models.TestEntranceExamTask:
if is_closed:
form.add_error('solution', 'Вступительная работа завершена. Решения больше не принимаются')
elif form.is_valid():
solution_text = form.cleaned_data['solution']
solution = models.TestEntranceExamTaskSolution(
user=request.user,
task=task,
solution=solution_text,
ip=ip
)
solution.save()
return JsonResponse({'status': 'ok', 'solution_id': solution.id})
return JsonResponse({'status': 'error', 'errors': form.errors})
if type(task) is models.FileEntranceExamTask:
if is_closed:
form.add_error('solution', 'Вступительная работа завершена. Решения больше не принимаются')
elif form.is_valid():
form_file = form.cleaned_data['solution']
solution_file = sistema.uploads.save_file(
form_file,
'entrance-exam-files-solutions'
)
solution = models.FileEntranceExamTaskSolution(
user=request.user,
task=task,
solution=solution_file,
original_filename=form_file.name,
ip=ip
)
solution.save()
return JsonResponse({'status': 'ok', 'solution_id': solution.id})
return JsonResponse({'status': 'error', 'errors': form.errors})
if isinstance(task, models.EjudgeEntranceExamTask):
if is_closed:
form.add_error('solution', 'Вступительная работа завершена. Решения больше не принимаются')
elif form.is_valid():
solution_file = sistema.uploads.save_file(
form.cleaned_data['solution'],
'entrance-exam-programs-solutions'
)
with transaction.atomic():
if type(task) is models.ProgramEntranceExamTask:
language = form.cleaned_data['language']
solution_kwargs = {'language': language}
else:
language = None
solution_kwargs = {}
ejudge_queue_element = modules.ejudge.queue.add_from_file(
task.ejudge_contest_id,
task.ejudge_problem_id,
language,
solution_file
)
solution = task.solution_class(
user=request.user,
task=task,
solution=solution_file,
ejudge_queue_element=ejudge_queue_element,
ip=ip,
**solution_kwargs
)
solution.save()
return JsonResponse({'status': 'ok', 'solution_id': solution.id})
return JsonResponse({'status': 'error', 'errors': form.errors})
@login_required
def task_solutions(request, task_id):
task = get_object_or_404(models.EntranceExamTask, id=task_id)
solutions = task.solutions.filter(user=request.user).order_by('-created_at')
if isinstance(task, models.EjudgeEntranceExamTask):
is_checking = any(s.result is None for s in solutions)
is_passed = any(s.is_checked and s.result.is_success for s in solutions)
template_name = task.solutions_template_file
return render(request, 'entrance/exam/' + template_name, {
'task': task,
'solutions': solutions,
'is_checking': is_checking,
'is_passed': is_passed
})
if type(task) is models.FileEntranceExamTask:
return render(request, 'entrance/exam/_file_solutions.html', {
'task': task,
'solution': solutions.first()
})
return HttpResponseNotFound()
@login_required
def upgrade_panel(request):
base_level, _ = get_entrance_level_and_tasks(request.school, request.user)
return render(request, 'entrance/_exam_upgrade.html', {
'is_user_at_maximum_level': upgrades.is_user_at_maximum_level(
request.school,
request.user,
base_level
),
'can_upgrade': upgrades.can_user_upgrade(
request.school,
request.user,
base_level
),
})
@login_required
def solution(request, solution_id):
solution = get_object_or_404(models.FileEntranceExamTaskSolution,
id=solution_id)
if solution.user != request.user and not request.user.is_staff:
return HttpResponseForbidden()
return sistema.helpers.respond_as_attachment(request,
solution.solution,
solution.original_filename)
@require_POST
@login_required
@transaction.atomic
def upgrade(request):
entrance_exam = get_object_or_404(models.EntranceExam, school=request.school)
is_closed = entrance_exam.is_closed()
# Not allow to upgrade if exam has been finished already
if is_closed:
return redirect(entrance_exam.get_absolute_url())
base_level = upgrades.get_base_entrance_level(request.school, request.user)
# We may need to upgrade several times because there are levels with
# the same sets of tasks
while upgrades.can_user_upgrade(request.school, request.user):
maximum_level = upgrades.get_maximum_issued_entrance_level(
request.school,
request.user,
base_level
)
next_level = models.EntranceLevel.objects.filter(
school=request.school,
order__gt=maximum_level.order
).order_by('order').first()
models.EntranceLevelUpgrade(
user=request.user,
upgraded_to=next_level
).save()
return redirect(entrance_exam.get_absolute_url())
def results(request):
table = EntrancedUsersTable(request.school)
frontend.table.RequestConfig(request).configure(table)
return render(request, 'entrance/results.html', {
'table': table,
'school': request.school,
})
@cache_page(5 * 60)
def results_data(request):
table = EntrancedUsersTable(request.school)
return TableDataSource(table).get_response(request)
@require_POST
@login_required
def set_enrollment_type(request, step_id):
step = get_object_or_404(
models.SelectEnrollmentTypeEntranceStep,
id=step_id, school=request.school
)
form = forms.SelectEnrollmentTypeForm(
step.enrollment_types.all(),
data=request.POST
)
if form.is_valid():
enrollment_type = models.EnrollmentType.objects.get(
pk=form.cleaned_data['enrollment_type']
)
models.SelectedEnrollmentType.objects.update_or_create(
user=request.user,
step=step,
defaults={
'enrollment_type': enrollment_type,
'is_moderated': not enrollment_type.needs_moderation,
'is_approved': not enrollment_type.needs_moderation,
'entrance_level': None
}
)
else:
# TODO (andgein): show error if form is not valid
raise ValueError('Errors: ' + ', '.join(map(str, form.errors)))
return redirect('school:user', request.school.short_name)
@require_POST
@login_required
def reset_enrollment_type(request, step_id):
step = get_object_or_404(
models.SelectEnrollmentTypeEntranceStep,
id=step_id, school=request.school
)
models.SelectedEnrollmentType.objects.filter(
user=request.user,
step=step
).delete()
return redirect('school:user', request.school.short_name)
@require_POST
@login_required
def select_session_and_parallel(request, step_id):
get_object_or_404(models.ResultsEntranceStep, id=step_id, school=request.school)
entrance_status = models.EntranceStatus.get_visible_status(request.school, request.user)
if not entrance_status.is_enrolled:
return HttpResponseNotFound()
form = forms.SelectSessionAndParallelForm(
entrance_status.sessions_and_parallels.all(),
data=request.POST
)
if form.is_valid():
selected = models.EnrolledToSessionAndParallel.objects.get(
pk=form.cleaned_data['session_and_parallel']
)
with transaction.atomic():
selected.select_this_option()
entrance_status.approve()
else:
# TODO (andgein): show error if form is not valid
raise ValueError('Errors: ' + ', '.join(map(str, form.errors)))
return redirect('school:user', request.school.short_name)
@require_POST
@login_required
def reset_session_and_parallel(request, step_id):
step = get_object_or_404(models.ResultsEntranceStep, id=step_id, school=request.school)
entrance_status = models.EntranceStatus.get_visible_status(request.school, request.user)
if not entrance_status.is_enrolled:
return HttpResponseNotFound()
if step.available_to_time and step.available_to_time.passed_for_user(request.user):
return redirect('school:user', request.school.short_name)
with transaction.atomic():
entrance_status.sessions_and_parallels.update(selected_by_user=False)
entrance_status.remove_approving()
return redirect('school:user', request.school.short_name)
@require_POST
@login_required
def approve_enrollment(request, step_id):
get_object_or_404(models.ResultsEntranceStep, id=step_id, school=request.school)
entrance_status = models.EntranceStatus.get_visible_status(request.school, request.user)
if not entrance_status.is_enrolled:
return HttpResponseNotFound()
if entrance_status.sessions_and_parallels.count() != 1:
return HttpResponseNotFound()
with transaction.atomic():
entrance_status.sessions_and_parallels.update(selected_by_user=True)
entrance_status.approve()
return redirect('school:user', request.school.short_name)
@require_POST
@login_required
def reject_participation(request, step_id):
get_object_or_404(models.ResultsEntranceStep, id=step_id, school=request.school)
entrance_status = models.EntranceStatus.get_visible_status(request.school, request.user)
if not entrance_status.is_enrolled:
return HttpResponseNotFound()
models.RejectionAbsenceReason.objects.create(
school=request.school,
user=request.user,
created_by=request.user,
)
return redirect('school:user', request.school.short_name)
| StarcoderdataPython |
1755108 | import numpy as np
import sys
import optwrapper as ow
A = np.array( [ [1.0979, -.0105, .0167 ], [-.0105, 1.0481, .0825], [.0167, .0825, 1.1540] ] )
def instcost( x, u, grad=True ):
Q = np.zeros( (3,3) )
R = .01
if( not grad ):
return x.dot(Q).dot(x) + u.dot(R).dot(u)
return ( x.dot(Q).dot(x) + u.dot(R).dot(u),
2 * x.dot(Q),
2 * u.dot(R) )
icostdxpattern = np.ones( (3,) )
icostdupattern = np.ones( (1,) )
def fcost( x, grad=True ):
S = np.identity(3)
dist = x - np.ones(3)
if( not grad ):
return dist.dot(S).dot(dist)
return( dist.dot(S).dot(dist),
2 * dist.dot(S) )
fcostdxpattern = np.ones( (3,) )
def dynamics_mode1( x, u, grad=True ):
B1 = np.array( [ [.9801], [-.1987], [0] ] )
if( not grad ):
return A.dot(x) + B1.dot(u)
return( A.dot(x) + B1.dot(u),
A,
B1 )
Apattern = np.ones( (3,3) )
B1pattern = np.array( [ [1], [1], [0] ], dtype=np.int )
def dynamics_mode2( x, u, grad=True ):
B2 = np.array( [ [.1743], [.8601], [-.4794] ] )
if( not grad ):
return A.dot(x) + B2.dot(u)
return( A.dot(x) + B2.dot(u),
A,
B2 )
B2pattern = np.ones( (3,1) )
def dynamics_mode3( x, u, grad=True ):
B3 = np.array( [ [.0952], [.4699], [0.8776] ] )
if( not grad ):
return A.dot(x) + B3.dot(u)
return( A.dot(x) + B3.dot(u),
A,
B3 )
B3pattern = np.ones( (3,1) )
## create an instance of socp:
prob = ow.socp.Problem( Nstates = 3, Ninputs = 1, Nmodes = 3, Ncons = 0 )
prob.initCond( [ 0.0, 0.0, 0.0 ] )
prob.timeHorizon( 0.0, 1.0 )
prob.costInstant( (instcost,) * 3,
dxpattern = (icostdxpattern,) * 3,
dupattern = (icostdupattern,) * 3 )
prob.costFinal( fcost, dxpattern = fcostdxpattern )
prob.vectorField( ( dynamics_mode1, dynamics_mode2, dynamics_mode3 ),
dxpattern = (Apattern,) * 3,
dupattern = ( B1pattern, B2pattern, B3pattern ) )
prob.consBoxState( -1 * np.ones( (prob.Nstates,) ), 2 * np.ones( (prob.Nstates,) ) )
prob.consBoxInput( -20 * np.ones( (prob.Ninputs,) ), 20 * np.ones( (prob.Ninputs,) ) )
( nlpprob, initencode, solndecode ) = prob.discForwardEuler( Nsamples = 30 )
if( not nlpprob.checkGrad( debug=True ) ):
sys.exit( "Gradient check failed." )
if( not nlpprob.checkPattern( debug=True ) ):
sys.exit( "Pattern check failed." )
solver = ow.ipopt.Solver( nlpprob )
solver.initPoint( initencode( prob.init, [0], [1/3, 1/3, 1/3] ) )
solver.debug = True
# solver.options[ "summaryFile" ] = "stdout"
# solver.options[ "printFile" ] = "debugp.txt"
# solver.options[ "printLevel" ] = 111111
# solver.options[ "minorPrintLevel" ] = 10
# solver.options[ "verifyLevel" ] = 3
solver.solve()
print( "Status: " + nlpprob.soln.getStatus() )
print( "Value: " + str( nlpprob.soln.value ) )
# print( "Final point: " + str( nlpprob.soln.final ) )
print( "Retval: " + str( nlpprob.soln.retval ) )
( st, u, d, time ) = solndecode( nlpprob.soln.final )
print( "Optimal state:\n" + str( st ) )
print( "Optimal continuous input:\n" + str( u ) )
print( "Optimal relaxed discrete input:\n" + str( d ) )
Npwm = 5
thaar = np.linspace( time[0], time[-1], 2**Npwm + 1 )
uhaar = ow.socp.haarWaveletApprox( time, u, Npwm )
dhaar = ow.socp.haarWaveletApprox( time, d, Npwm )
( tpwm, upwm, dpwm ) = ow.socp.pwmTransform( thaar, uhaar, dhaar )
print( "PWM time samples:\n" + str( tpwm ) )
print( "Optimal continuous input:\n" + str( upwm ) )
print( "Optimal pure discrete input:\n" + str( dpwm ) )
| StarcoderdataPython |
4828648 | <gh_stars>0
from elegant_finrl.run import *
from elegant_finrl.agent import AgentPPO, AgentDDPG
from elegant_finrl.env import StockTradingEnv
import yfinance as yf
from stockstats import StockDataFrame as Sdf
args = Arguments(if_on_policy=True)
args.agent = AgentPPO()
args.env = StockTradingEnv(cwd='./', if_eval=True)
args.if_remove = False
args.cwd = './AgentPPO/StockTradingEnv-v1_0'
args.init_before_training()
prediction = args.env.trade_prediction(args, torch)
args.env.backtest_plot(prediction, baseline_ticker = '^DJI', baseline_start = '2019-01-01', baseline_end = '2021-01-01')
| StarcoderdataPython |
1798617 | <reponame>Aliacf21/BotBuilder-Samples<filename>samples/python/47.inspection/data_models/__init__.py
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from .custom_state import CustomState
__all__ = ["CustomState"]
| StarcoderdataPython |
1627657 | <gh_stars>0
# -*- coding: utf-8 -*-
import logging
from pymongo import MongoClient
class DB_manager(object):
def __init__(self):
self.logger = logging.getLogger(" {0}".format(__name__))
def init_db(self):
self.client = MongoClient('localhost', 24730)
self.db = self.client.mobileparser
self.logger.debug('mongodb connection opened')
def close_db(self):
self.client.close()
self.logger.debug('mongodb connection closed')
def handle_data(self, data):
self.logger.debug(data)
parser_info = {
"parser_version": data["parser_version"],
"parser_name": data["parser_name"].lower(),
"parse_date": data["parse_date"]
}
for restaurant in data['restaurants']:
serialized = self.todict(restaurant)
serialized["parser_info"] = parser_info
self.logger.debug(serialized)
self.update_restaurant(serialized)
def update_restaurant(self, restaurant):
r_id = restaurant["restaurant_info"]["id"]
chain = restaurant["restaurant_info"]["chain"]
week = restaurant["foodlist_date"]["week_number"]
year = restaurant["foodlist_date"]["year"]
# restaurant foodlist
searched_foodlist = {
"foodlist_info.id": r_id,
"foodlist_chain": chain,
"foodlist_info.week_number": week,
"foodlist_info.year": year
}
foodlist = {
"foodlist_info": {
"id": r_id,
"chain": chain,
"week_number": week,
"year": year
},
"weekly_foods": restaurant["weekly_foods"],
"debug": restaurant["parser_info"]
}
# restaurant info
searched_info = {
"restaurant_info.id": r_id
}
info = {
"restaurant_info": restaurant["restaurant_info"],
"debug": restaurant["parser_info"]
}
self.db.foods.update(searched_foodlist, foodlist, upsert=True)
self.db.info.update(searched_info, info, upsert=True)
def update_parser_version(self, version):
self.db.parser.save({"version": version})
def todict(self, obj, classkey=None):
if isinstance(obj, dict):
data = {}
for (k, v) in obj.items():
data[k] = self.todict(v, classkey)
return data
elif hasattr(obj, "_ast"):
return self.todict(obj._ast())
elif hasattr(obj, "__iter__"):
return [self.todict(v, classkey) for v in obj]
elif hasattr(obj, "__dict__"):
data = dict(
[(key, self.todict(value, classkey))
for key, value in obj.__dict__.iteritems()
if not callable(value) and not key.startswith('_')]
)
if classkey is not None and hasattr(obj, "__class__"):
data[classkey] = obj.__class__.__name__
return data
else:
return obj
| StarcoderdataPython |
3388588 | <reponame>smrmkt/online_learning_algorithms
#!/usr/bin/env python
#-*-coding:utf-8-*-
import enum
import numpy as np
class Evaluator:
CalcType = enum.Enum("CalcType", ["update", "predict"])
def __init__(self, model, y_vec, feats_vec):
self.model = model
self.count = len(y_vec)
self.y_vec = y_vec
self.feats_vec = feats_vec
self.tp = 0
self.fp = 0
self.fn = 0
self.tn = 0
self.accuracy = []
self.precision = []
self.recall = []
def _calc(self, calc_type):
tp, fp, fn, tn = 0, 0, 0, 0
accuracy, precision, recall = [], [], []
for i in range(self.count):
if calc_type == Evaluator.CalcType.update:
ret = self.model.update(self.y_vec[i], self.feats_vec[i])
elif calc_type == Evaluator.CalcType.predict:
ret = 1 if self.y_vec[i]*self.model.predict(self.feats_vec[i]) > 0 else 0
tp += 1 if ret == 1 and self.y_vec[i] == 1 else 0
fp += 1 if ret == 0 and self.y_vec[i] == -1 else 0
fn += 1 if ret == 0 and self.y_vec[i] == 1 else 0
tn += 1 if ret == 1 and self.y_vec[i] == -1 else 0
accuracy.append(float(tp+tn)/(tp+fp+fn+tn))
precision.append(float(tp)/(tp+fp) if tp+fp > 0 else 0.0)
recall.append(float(tp)/(tp+fn) if tp+fn > 0 else 0.0)
self.tp = tp
self.fp = fp
self.fn = fn
self.tn = tn
self.accuracy = accuracy
self.precision = precision
self.recall = recall
def update(self):
self._calc(Evaluator.CalcType.update)
def predict(self):
self._calc(Evaluator.CalcType.predict)
| StarcoderdataPython |
1759033 | # -*- coding: utf-8 -*-
import time
from pykinect2 import PyKinectV2
from pykinect2.PyKinectV2 import *
from pykinect2 import PyKinectRuntime
from Kinetic import extractPoints
from numpy import *
#import pyttsx
k = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Body)
print "Kinect lance"
#e = pyttsx.init()
#e.say('Bonjour et bienvenu dans la prossaidure de calibration de la machine vivante. Une personne doit se mettre debout au centre de la saine, face public, les bras ecartai comme jaizu cri. et une autre personne est praite a tourner la Kinect selon l''axe Z. Tenez vous prai dans dix, neuf, huit, sept, six, cinq, quatre, trois, deux, un.')
#e.runAndWait()
calib = True
while calib :
time.sleep(0.1)
seeBody = False
if k.has_new_body_frame():
bs = k.get_last_body_frame()
tiltrad = arctan(bs.floor_clip_plane.z/bs.floor_clip_plane.y)
w = bs.floor_clip_plane.w
#print tiltrad*180.0/pi,w
if bs is not None:
for b in bs.bodies:
if not b.is_tracked:
continue
# get joints positions
js = b.joints
kpos = extractPoints(js,tiltrad,w,0.0,0.0)
if kpos["spine_base"][1]>0.05:
# e.say(u'tourner la kinect un peu a droite!')
# e.runAndWait()
print(u'tourner la kinect un peu a droite!')
elif kpos["spine_base"][1]<-0.05:
# e.say(u'tourner la kinect un peu a gauche!')
# e.runAndWait()
print(u'tourner la kinect un peu a gauche!')
else:
# e.say('c''est bon ne touchez plus la Kinect, tout est calibrai. Merci de votre devoumain')
# e.runAndWait()
print('c''est bon ne touchez plus la Kinect, tout est calibrai. Merci de votre devoumain')
print "rtip"
print kpos["r_tip"]
print "ltip"
print kpos["l_tip"]
print "spine"
print kpos["spine_base"]
print "tilt"
print tiltrad*180.0/pi
print "hkinect"
print w
print "dkinect"
print -kpos["spine_base"][0]
print "pan"
print arctan((kpos["r_tip"][0]-kpos["l_tip"][0])/(kpos["r_tip"][1]-kpos["l_tip"][1]))*180.0/pi
calib = False | StarcoderdataPython |
58800 | import indicoio
from celery import Celery
from flask import Flask
from flask_bootstrap import Bootstrap
from flask_googlemaps import GoogleMaps
from flask_mail import Mail
from flask_mongoengine import MongoEngine
from app.celery.factory import init_celery
from config import config, DEVELOPMENT_CONFIG_NAME
celery = Celery('app')
mail = Mail()
db = MongoEngine()
bootstrap = Bootstrap()
def create_app(config_name=DEVELOPMENT_CONFIG_NAME):
app = Flask(__name__)
app.config.from_object(config[config_name])
register_extensions(app)
register_blueprints(app)
init_celery(app, celery)
GoogleMaps(app)
set_indico_key(config[config_name])
return app
def set_indico_key(cfg):
indicoio.config.api_key = cfg.INDICO_KEY
def register_extensions(app):
mail.init_app(app)
db.init_app(app)
bootstrap.init_app(app)
def register_blueprints(app):
from app.blueprints.main import main
from app.blueprints.auth import auth
from app.blueprints.account import account
from app.blueprints.errors import errors
app.register_blueprint(main)
app.register_blueprint(auth)
app.register_blueprint(account)
app.register_blueprint(errors)
| StarcoderdataPython |
4803573 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test apps security token API."""
import unittest
from mock import patch
from test_utils import GetExpectedPrintOutput
from test_utils import LoadTestJsonFile
from tokens_api_test_base import TokensApiPrintTokensTestBase
_USER_ID = '0' # Stub user_id.
# PyLint dislikes the method names Python unittest prefers (testXXX).
# pylint: disable=g-bad-name
@patch('admin_sdk_directory_api.tokens_api.TokensApiWrapper.GetToken')
class TokensApiPrintTokensForUserAndClientIdWithTokensTest(
TokensApiPrintTokensTestBase):
"""Wrapper to test token print code with both user and client_id supplied.
NOTE: When you @patch() a class, every testXXX() method will receive an
extra argument. The extra argument is the patched function object
that is then used to mock return values, check invocations, etc.
"""
def setUp(self): # pylint: disable=g-bad-name
"""Setup token return data for these tests."""
super(TokensApiPrintTokensForUserAndClientIdWithTokensTest,
self).setUp()
self._client_id = 'twitter.com'
# Token data captured from:
# ./cmds/ls_tokens_for_user_clientid.py -u <EMAIL> \
# -i twitter.com
self.returned_token_doc = {
u'anonymous': False,
u'clientId': u'twitter.com',
u'displayText': u'twitter.com',
u'kind': u'admin#directory#token',
u'nativeApp': False,
u'scopes': [u'http://www.google.com/m8/feeds/'],
u'userKey': u'112351558298938768732'}
def testBasicPrintSingleTokenForUserAndClientIdWithTokens(
self, mock_get_token_request_fn):
mock_get_token_request_fn.return_value = self.returned_token_doc
self._tokens_api.PrintTokenForUserClientId(_USER_ID, self._client_id,
long_list=False)
self.assertEqual(GetExpectedPrintOutput('PrintTokenForUserClientId.1'),
self._new_stdout.print_messages)
def testLongPrintSingleTokenForUserAndClientIdWithTokens(
self, mock_get_token_request_fn):
mock_get_token_request_fn.return_value = self.returned_token_doc
self._tokens_api.PrintTokenForUserClientId(_USER_ID, self._client_id,
long_list=True)
self.assertEqual(GetExpectedPrintOutput('PrintTokenForUserClientId.2'),
self._new_stdout.print_messages)
@patch('admin_sdk_directory_api.tokens_api.TokensApiWrapper.ListTokens')
class TokensApiPrintTokensForUserWithTokensTest(
TokensApiPrintTokensTestBase):
"""Wrapper to test token print code with just user supplied."""
def setUp(self):
"""Setup token return data for these tests."""
super(TokensApiPrintTokensForUserWithTokensTest, self).setUp()
self.returned_token_doc = LoadTestJsonFile(
'primarydomain.com_valid_tokendata.json')
def testBasicPrintTokensForUserWithTokens(self, mock_list_tokens_request_fn):
mock_list_tokens_request_fn.return_value = self.returned_token_doc
self._tokens_api.PrintTokensForUser(_USER_ID, long_list=False)
self.assertEqual(GetExpectedPrintOutput('PrintTokensForUser.1'),
self._new_stdout.print_messages)
def testLongPrintTokensForUserWithTokens(
self, mock_list_tokens_request_fn):
mock_list_tokens_request_fn.return_value = self.returned_token_doc
self._tokens_api.PrintTokensForUser(_USER_ID, long_list=True)
self.assertEqual(GetExpectedPrintOutput('PrintTokensForUser.2'),
self._new_stdout.print_messages)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1674004 | <reponame>Pseudomanifold/pyper
"""Filtrations and persistent homology calculation for functions."""
import enum
import operator
import numpy as np
from ..utilities import UnionFind
from ..representations import PersistenceDiagram
def calculate_persistence_diagrams_1d(
function,
order='sublevel',
):
"""Calculate persistence diagrams for a 1D function.
Calculates persistence diagrams for a 1D function, following the
usual Morse filtration. This is equivalent to calculating one of
the "merge" or "split" trees.
Parameters
----------
function:
Input function. Should be an array or an array-like data
structure that affords iteration.
order:
Specifies the filtration order that is to be used for calculating
persistence diagrams. Can be either 'sublevel' for a sublevel set
filtration, or 'superlevel' for a superlevel set filtration.
Returns
-------
Persistence diagram of the merge or split tree.
"""
assert order in ['sublevel', 'superlevel']
function = np.asarray(function)
if order == 'sublevel':
indices = np.argsort(function, kind='stable')
predicate = operator.lt
else:
indices = np.argsort(-function, kind='stable')
predicate = operator.gt
n = len(function)
# Union--Find data structure for tracking the indices of the
# persistence diagram.
uf = UnionFind(n)
# Will contain persistence pairs as index tuples where each
# index refers to a point in the input series.
persistence_pairs = []
# In the comments of this function, I will assume that `predicate`
# refers to the 'less than' operator. Hence, I will speak of local
# minima and so on. This is done to make the code more readable.
for index in indices:
x = function[index]
u = x
v = x
# Grab neighbours, if available. Else, we just pretend that we
# discovered x again. Since we do not check for equality below
# this works out just fine.
if index > 0:
u = function[index - 1]
if index < n - 1:
v = function[index + 1]
# Case 1 [local minimum]: both neighbours have higher function
# values
if predicate(x, u) and predicate(x, v):
# Nothing to do here
pass
# Case 2 [local maximum]: both neighbours have lower function
# values
elif predicate(u, x) and predicate(v, x):
# For the persistence pairing, the 'older' branch persists,
# while the 'younger' branch is being merged. To decide the
# age, we use the predicate between both neighbours. Notice
# that this also decides the 'direction' of the merge. It's
# crucial to look up the lowest point in each component for
# merging in the proper direction.
if predicate(function[uf.find(index - 1)],
function[uf.find(index + 1)]):
# u is the 'older' branch and persists; merge everything
# into it.
persistence_pairs.append((uf.find(index + 1), index))
uf.merge(index, index + 1)
uf.merge(index + 1, index - 1)
else:
# v is the 'older' branch and persists; merge everything
# into it.
persistence_pairs.append((uf.find(index - 1), index))
uf.merge(index, index - 1)
uf.merge(index - 1, index + 1)
# Case 3 [regular point]: one neighbour has a higher function
# value, the other one has a lower function value.
else:
# Indicates whether a merge should be done, and if so, which
# direction should be used. `LEFT` refers to the left vertex
# with respect to the current vertex, for example.
class Merge(enum.Enum):
LEFT, RIGHT = range(2)
# Indicates all merges that need to be done. It is possible
# that we want to merge both to the left and to the right.
merges = []
# Only add the edge to the *lower* point because we have
# already encountered the point in filtration order. The
# higher point will then add the second edge.
if predicate(u, x):
merges.append(Merge.LEFT)
elif predicate(v, x):
merges.append(Merge.RIGHT)
# Check whether the point is incomparable with its left and
# right neighbours, respectively. This will decide how they
# can be merged.
incomparable_l_nb = not predicate(u, x) and not predicate(x, u)
incomparable_r_nb = not predicate(v, x) and not predicate(x, v)
# At this point, we have already checked whether there are
# points that are truly *lower* than the current point and
# we have recorded their merges. In addition, we now check
# for vertices for which the predicate yields incomparable
# results, for this indicates that they are *equal* to the
# current point, from the perspective of the predicate.
#
# Left neighbour is incomparable with current point, hence
# we can choose it to create a new edge.
if incomparable_l_nb:
merges.append(Merge.LEFT)
# Right neighbour is incomparable with current point, hence
# we can choose it to create a new edge.
if incomparable_r_nb:
merges.append(Merge.RIGHT)
# Depending on the merge direction, adjust all required
# data structures. If we do not have at least one valid
# merge, we just skip everything.
if not merges:
continue
if Merge.LEFT in merges and index != 0:
# Switch the order in which the merges happens if the
# neighbours are incomparable. This is done because a
# representative of a component should be the highest
# index with respect to the ordering of the series so
# that lower indices will become children of it.
if incomparable_l_nb:
uf.merge(index - 1, index)
else:
uf.merge(index, index - 1)
if Merge.RIGHT in merges and index != n - 1:
if incomparable_r_nb:
uf.merge(index + 1, index)
else:
uf.merge(index, index + 1)
# Merge the minimum with the maximum for the time series. This
# ensures that the diagram always contains at least a *single*
# persistence pair.
if len(indices) >= 2:
if (indices[0], indices[-1]) not in persistence_pairs:
persistence_pairs.append((indices[0], indices[-1]))
pd = PersistenceDiagram(
[(function[c], function[d]) for c, d in persistence_pairs]
)
return pd
| StarcoderdataPython |
193605 | from django import template
from trax.trax import utils
register = template.Library()
@register.filter(name='humanize_timedelta')
def d(value):
return utils.humanize_timedelta(value)
| StarcoderdataPython |
1721012 | # Generated by Django 2.2.9 on 2020-10-25 10:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0005_auto_20201004_1241'),
]
operations = [
migrations.AlterModelOptions(
name='group',
options={'verbose_name': 'Группа', 'verbose_name_plural': 'Группы'},
),
migrations.AlterModelOptions(
name='post',
options={'ordering': ['-pub_date'], 'verbose_name': 'Запись', 'verbose_name_plural': 'Записи'},
),
migrations.AlterField(
model_name='group',
name='slug',
field=models.SlugField(max_length=200, unique=True),
),
]
| StarcoderdataPython |
182623 | <gh_stars>1-10
import logging
import time
try:
from fortrace.core.vmm import Vmm
from fortrace.utility.logger_helper import create_logger
from fortrace.core.vmm import GuestListener
from fortrace.core.reporter import Reporter
import fortrace.utility.scenarioHelper as scenH
except ImportError as ie:
print("Import error! in fileManagement.py " + str(ie))
exit(1)
# This test script verifies the functionality and shows the usage of the different antiForensics functions.
# The effect of some functions may interfere with each other (e.g. cleaning the Event Log and then deleting the log
# files seems pretty pointless
# Function status
disableEventLog = True
disableHibernation = True
disablePageFile = True
disablePrefetch = True
disableRecentFiles = True
disableRecycleBin = True
disableThumbcache = True
disableUserAssist = True
clearEventLogFiles = True
clearEventLogEntries = True
clearJumpLists = True
clearPrefetch = True
clearThumbcache = True
clearUserAssist = True
clearRecentDocs = True
setRegistryKey = True
deleteRegistryKey = True
# Instanciate VMM and a VM
logger = create_logger('fortraceManager', logging.INFO)
macsInUse = []
guests = []
guestListener = GuestListener(guests, logger)
virtual_machine_monitor1 = Vmm(macsInUse, guests, logger)
imagename = "antiForensics_testscript"
guest = virtual_machine_monitor1.create_guest(guest_name=imagename, platform="windows")
# Wait for the VM to connect to the VMM
guest.waitTillAgentIsConnected()
# Create mailClient object
antifor = guest.application("antiForensics", {})
# Disable the Recycle Bin
logger.info("Disabling the Recycle Bin")
try:
antifor.disableRecycleBin("1")
while antifor.is_busy is True:
time.sleep(1)
except Exception as e:
disableRecycleBin = False
print("An error occured: ")
print(e)
time.sleep(5)
# Disble creation of Thumbcache entries
logger.info("Disabling the Thumbcache")
try:
antifor.disableThumbcache("1")
while antifor.is_busy is True:
time.sleep(1)
except Exception as e:
disableThumbcache = False
print("An error occured: ")
print(e)
time.sleep(5)
# Disble creation of Prefetch files
logger.info("Disabling Prefetch")
try:
antifor.disablePrefetch("1")
while antifor.is_busy is True:
time.sleep(1)
except Exception as e:
disablePrefetch = False
print("An error occured: ")
print(e)
time.sleep(5)
# clear Event log entries
# Event Log service must be running
logger.info("Clearing Event log entries")
try:
antifor.clearEventLogEntries("security")
while antifor.is_busy is True:
time.sleep(1)
except Exception as e:
clearEventLogEntries = False
print("An error occured: ")
print(e)
time.sleep(5)
# Disable the Event Log Service
# CAUTION: Disabling the Service does NOT prevent the creation of events, but is necessary to delete the Log files.
logger.info("Disabling the Event Log service")
try:
antifor.disableEventLog("1")
while antifor.is_busy is True:
time.sleep(1)
except Exception as e:
disableEventLog = False
print("An error occured: ")
print(e)
time.sleep(5)
# Disable the creation of UserAssist entries in the Registry
logger.info("Disabling UserAssist")
try:
antifor.disableUserAssist("1")
while antifor.is_busy is True:
time.sleep(1)
except Exception as e:
disableUserAssist = False
print("An error occured: ")
print(e)
time.sleep(5)
# Disable System Hibernation and therefore the creation of the Hibernation file
logger.info("Disabling Hibernation")
try:
antifor.disableHibernation("1")
while antifor.is_busy is True:
time.sleep(1)
except Exception as e:
disableHibernation = False
print("An error occured: ")
print(e)
time.sleep(5)
# Disable the page file usage
# This can have a negative effect on the system performance, but clearing the pagefile at shutdown
# did not work in every test run
logger.info("Disabling page file usage")
try:
antifor.disablePagefile("1")
while antifor.is_busy is True:
time.sleep(1)
except Exception as e:
disablePageFile = False
print("An error occured: ")
print(e)
time.sleep(5)
# disable recent files
logger.info("Disabling recent files")
try:
antifor.disableRecentFiles("1")
while antifor.is_busy is True:
time.sleep(1)
except Exception as e:
disableRecentFiles = False
print("An error occured: ")
print(e)
time.sleep(5)
##############
# To ensure all set options are active, the system is rebooted
##############
guest.shutdown("keep")
while guest.isGuestPowered():
time.sleep(5)
guest.start()
guest.waitTillAgentIsConnected()
# clear User Assist Registry key
logger.info("Deleting User Assist")
try:
antifor.clearUserAssist()
while antifor.is_busy is True:
time.sleep(1)
except Exception as e:
clearUserAssist = False
print("An error occured: ")
print(e)
time.sleep(5)
# clear Recent Docs Registry key
logger.info("Deleting Recent Docs")
try:
antifor.clearRecentDocs()
while antifor.is_busy is True:
time.sleep(1)
except Exception as e:
clearRecentDocs = False
print("An error occured: ")
print(e)
time.sleep(5)
# clear Event Log files
logger.info("Deleting Event Log files")
try:
antifor.clearEventLogFiles()
while antifor.is_busy is True:
time.sleep(1)
except Exception as e:
clearEventLogFiles = False
print("An error occured: ")
print(e)
time.sleep(5)
# Clear Prefetch Files
logger.info("Clearing Prefetch files")
try:
antifor.clearPrefetch()
while antifor.is_busy is True:
time.sleep(1)
except Exception as e:
clearPrefetch = False
print("An error occured: ")
print(e)
time.sleep(5)
# Clear Thumbcache files
logger.info("Clearing Thumbcache files")
try:
antifor.clearThumbcache("fortrace")
while antifor.is_busy is True:
time.sleep(1)
except Exception as e:
clearThumbcache = False
print("An error occured: ")
print(e)
time.sleep(5)
# Clear Jumplist files
logger.info("Clearing Jumplist files")
try:
antifor.clearJumpLists("fortrace")
while antifor.is_busy is True:
time.sleep(1)
except Exception as e:
clearJumpLists = False
print("An error occured: ")
print(e)
time.sleep(5)
# Set Registry key
logger.info("Changing user defined Registry key")
regKey_set = r"HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Policies\Explorer"
regkey_set_value = "DisableThumbnails"
regkey_set_type = "REG_DWORD"
try:
antifor.setRegistryKey(regKey_set, regkey_set_type, regkey_set_value, "1")
while antifor.is_busy is True:
time.sleep(1)
except Exception as e:
setRegistryKey = False
print("An error occured: ")
print(e)
time.sleep(5)
# Delete Registry key
regkey_del = r"HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Power"
regkey_del_val = "HibernateEnabledDefault"
try:
antifor.deleteRegistryKey(regkey_del, regkey_del_val)
while antifor.is_busy is True:
time.sleep(1)
except Exception as e:
deleteRegistryKey = False
print("An error occured: ")
print(e)
time.sleep(5)
# Shut down system and delete guest
guest.shutdown("keep")
while guest.isGuestPowered():
time.sleep(5)
guest.delete()
# Finish and print results
logger.info("Scenario finished!")
logger.info("Results:")
logger.info("disableEventLog: " + str(disableEventLog))
logger.info("disableHibernation: " + str(disableHibernation))
logger.info("disablePageFile: " + str(disablePageFile))
logger.info("disablePrefetch: " + str(disablePrefetch))
logger.info("disableRecentFiles: " + str(disableRecentFiles))
logger.info("disableRecycleBin: " + str(disableRecycleBin))
logger.info("disableThumbcache: " + str(disableThumbcache))
logger.info("disableUserAssist: " + str(disableUserAssist))
logger.info("clearEventLogEntries: " + str(clearEventLogEntries))
logger.info("clearEventLogFiles: " + str(clearEventLogFiles))
logger.info("clearJumpLists: " + str(clearJumpLists))
logger.info("clearPrefetch: " + str(clearPrefetch))
logger.info("clearThumbcache: " + str(clearThumbcache))
logger.info("clearUserAssist: " + str(clearUserAssist))
logger.info("clearRecentDocs: " + str(clearRecentDocs))
logger.info("setRegistryKey: " + str(setRegistryKey))
logger.info("deleteRegistryKey: " + str(deleteRegistryKey))
| StarcoderdataPython |
1745178 | <filename>FEF_and_LIP_and_mdPul_v3.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 12 16:23:57 2020
@author: amelie
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 11 13:56:08 2020
@author: amelie
"""
from brian2 import *
from scipy import signal
from FEF_full import *
from LIP_full import *
from mdPul_two_columns_v3 import *
from itertools import *
def generate_syn(source,target,syntype,connection_pattern,g_i,taur_i,taud_i,V_i):
eq_syn='''_post=s_i*g_i*(V_post-V_i) : amp * meter ** -2 (summed)
ds_i/dt=-s_i/taud_i+(1-s_i)/taur_i*0.5*(1+tanh(V_pre/10/mV)) : 1
g_i : siemens * meter**-2
V_i : volt
taud_i : second
taur_i : second
'''
S=Synapses(source,target,model=syntype+eq_syn,method='exact')
if connection_pattern=='':
S.connect()
else :
S.connect(j=connection_pattern, skip_if_invalid=True)
S.g_i=g_i
S.taur_i=taur_i
S.taud_i=taud_i
S.V_i=V_i
return S
if __name__=='__main__':
prefs.codegen.target = 'numpy'
theta_phase='bad'
target_on=True
start_scope()
close('all')
runtime=1*second
Vrev_inp=0*mV
taurinp=0.1*ms
taudinp=0.5*ms
tauinp=taudinp
Vhigh=0*mV
Vlow=-80*mV
NN=1 #multiplicative factor on the number of neurons
N_RS,N_FS,N_SI,N_IB= NN*80,NN*20,NN*20,NN*20 #Number of neurons of RE, TC, and HTC type
N_SI,N_RS_gran,N_SI_gran=20,20,20
N_RS_vis,N_FS_vis,N_RS_mot,N_SI_mot,N_dSI_vm,N_RS_vm,N_gSI_vm=[20]*7
N_HTC,N_TC,N_RE= 20,80,100 #Number of neurons of RE, TC, and HTC type
all_SIdFSg=[2*msiemens * cm **-2] #1
all_FSgRSg=[1* msiemens * cm **-2]
all_RSgFSg=[1*msiemens * cm **-2]
all_RSgRSg=[0.3*msiemens * cm **-2]
all_FSgFSg=[0.3* msiemens * cm **-2]
all_RSgRSs=[2*msiemens * cm **-2]
all_RSgFSs=[0.1*msiemens * cm **-2]
all_FSgRSs=[0.1* msiemens * cm **-2]
all_J_RSg=['30 * uA * cmeter ** -2']
all_J_FSg=['5 * uA * cmeter ** -2']
all_thal=[10* msiemens * cm **-2]
thal=all_thal[0]
all_syn_cond=list(product(all_SIdFSg,all_FSgRSg,all_RSgFSg,all_RSgRSg,all_FSgFSg,all_RSgRSs,all_RSgFSs,all_FSgRSs))
all_J=list(product(all_J_RSg,all_J_FSg))
syn_cond=all_syn_cond[0]
J=all_J[0]
if theta_phase=='bad':
input_beta2_IB=False
input_beta2_RS=False
input_beta2_FS_SI=True
input_thalamus_gran=True
gFS=0* msiemens * cm **-2
ginp_SI=0* msiemens * cm **-2
ginpSIdeep=0* msiemens * cm **-2
thal_cond=2* msiemens * cm **-2
kainate='low'
if theta_phase=='good':
# input_beta2_IB=True
input_beta2_IB=False
ginp_IB=500* msiemens * cm **-2
ginpSIdeep=500* msiemens * cm **-2
input_beta2_RS=False
input_beta2_FS_SI=False
input_thalamus_gran=True
thal_cond=thal
kainate='low'
if theta_phase=='mixed':
input_mixed=True
ginp_IB=500* msiemens * cm **-2
ginpSIdeep=500* msiemens * cm **-2
input_beta2_IB=False
input_beta2_RS=False
input_beta2_RS=False
input_beta2_FS_SI=False
input_thalamus_gran=False
kainate='low'
Vrev_inp2=0*mV
taurinp2=0.1*ms
taudinp2=0.5*ms
tauinp2=taudinp2
Vhigh2=0*mV
Vlow2=-80*mV
condition='mAChR'
in_mode='single_spike'
# in_mode='burst'
theta_phase='mixed'
if condition=='mGluR1':
gKL_TC=0.0028e-3 * siemens * cm **-2
gKL_HTC=0.0069e-3 * siemens * cm **-2
gKL_RE=0.05e-3 * siemens * cm **-2
elif condition=='mAChR':
gKL_TC=0.0028e-3 * siemens * cm **-2
gKL_HTC=0.0069e-3 * siemens * cm **-2
gKL_RE=0.08e-3 * siemens * cm **-2
# gapp=0.1*mamp * cmeter ** -2 # in HTC cells
gKL_HTC=0.001e-3 * siemens * cm **-2
gapp=0.1*mamp * cmeter ** -2 # in HTC cells
print('Network setup')
net=Network()
all_neurons_FEF,all_synapses_FEF,all_monitors_FEF=create_network_no_motor2(N_RS_vis,N_FS_vis,N_RS_mot,N_SI_mot,N_dSI_vm,N_RS_vm,N_gSI_vm,theta_phase,target_on,runtime)
R1FEF,R2FEF,R3FEF,V1FEF,V2FEF,V3FEF,R4FEF,R5FEF,V4FEF,V5FEF,mon_FS=all_monitors_FEF
RSvm_FEF,SIvm_FEF=all_neurons_FEF[1],all_neurons_FEF[2]
SI2vm_FEF=all_neurons_FEF[0]
all_neurons_LIP, all_synapses_LIP, all_gap_junctions_LIP, all_monitors_LIP=make_full_network(syn_cond,J,thal,theta_phase)
V1,V2,V3,R1,R2,R3,I1,I2,I3,V4,R4,I4s,I4a,I4ad,I4bd,R5,R6,R7,V5,V6,V7,inpmon,inpIBmon=all_monitors_LIP
RS_sup_LIP,IB_LIP,SI_deep_LIP=all_neurons_LIP[0],all_neurons_LIP[5],all_neurons_LIP[9]
RS_gran_LIP,FS_gran_LIP=all_neurons_LIP[7],all_neurons_LIP[8]
all_neurons_mdPul,all_synapses_mdPul,all_gap_junctions_mdPul,all_monitors_mdPul=create_mdPul(N_HTC,N_TC,N_RE,condition,in_mode,theta_phase)
R1A,R2A,V1A,V2A,I1A,I2A,R1B,R2B,V1B,V2B,I1B,I2B,RA,RB=all_monitors_mdPul
TC_B=all_neurons_mdPul[1]
IB_LIP.ginp_IB=0* msiemens * cm **-2 #the input to RS_sup_LIP is provided with synapses from FEF
SI_deep_LIP.ginp_SI=0* msiemens * cm **-2
SI2vm_FEF.ginp_VIP_good=0* msiemens * cm **-2
SI2vm_FEF.ginp_VIP_bad=0* msiemens * cm **-2
RS_gran_LIP.ginp_RS_good=0* msiemens * cm **-2 #5
FS_gran_LIP.ginp_FS_good=0* msiemens * cm **-2 #5
RS_gran_LIP.ginp_RS_bad=0* msiemens * cm **-2 #5
FS_gran_LIP.ginp_FS_bad=0* msiemens * cm **-2 #5
# if theta_phase=='good' or theta_phase=='mixed':
# RSvm_FEF.ginp_RS=10* msiemens * cm **-2
# SIvm_FEF.ginp_SI=10* msiemens * cm **-2
RSvm_FEF.ginp_RS=0* msiemens * cm **-2 #10
SIvm_FEF.ginp_SI=0* msiemens * cm **-2 #10
HTC_A,HTC_B=all_neurons_mdPul[0],all_neurons_mdPul[2]
if in_mode=='single_spike':
HTC_A.delay_steps = [1] # delay in time steps per neuron
HTC_B.delay_steps = [1] # delay in time steps per neuron
buffer_size = 2 # 1+Maximum delay (in time steps)
else :
HTC_A.delay_steps = [3999] # delay in time steps per neuron
HTC_B.delay_steps = [3999] # delay in time steps per neuron
buffer_size = 4000 # 1+Maximum delay (in time steps)
HTC_A.variables.add_array('voltage_buffer', dimensions=volt.dim, size=(buffer_size, len(HTC_A)))
HTC_B.variables.add_array('voltage_buffer', dimensions=volt.dim, size=(buffer_size, len(HTC_B)))
update_code = '''buffer_pointer = (buffer_pointer + 1) % buffer_size
voltage_delayed = update_voltage_buffer(V, voltage_buffer, buffer_pointer, delay_steps, buffer_size)'''
buffer_updater_A = HTC_A.run_regularly(update_code, codeobj_class=NumpyCodeObject)
buffer_updater_B = HTC_B.run_regularly(update_code, codeobj_class=NumpyCodeObject)
@check_units(V=volt, voltage_buffer=volt, buffer_pointer=1, delay_steps=1, buffer_size=1, result=volt)
def update_voltage_buffer(V, voltage_buffer, buffer_pointer, delay_steps, buffer_size):
# Write current rate into the buffer
voltage_buffer[buffer_pointer, :] = V
# Get delayed rates
rows = (buffer_pointer - delay_steps) % buffer_size
return voltage_buffer[rows, arange(len(rows))]
net.add(all_neurons_FEF)
net.add(all_synapses_FEF)
net.add(all_monitors_FEF)
net.add(all_neurons_LIP)
net.add(all_synapses_LIP)
net.add(all_gap_junctions_LIP)
net.add(all_monitors_LIP)
net.add(all_neurons_mdPul)
net.add(all_synapses_mdPul)
net.add(all_gap_junctions_mdPul)
net.add(all_monitors_mdPul)
S_FEF_IB_LIP=generate_syn(RSvm_FEF,IB_LIP,'Isyn_FEF','',0*msiemens * cm **-2,0.125*ms,1*ms,0*mV)
S_FEF_SIdeep_LIP=generate_syn(RSvm_FEF,SI_deep_LIP,'Isyn_FEF','',0.03*msiemens * cm **-2,0.125*ms,1*ms,0*mV)
S_LIP_RS_FEF=generate_syn(RS_sup_LIP,RSvm_FEF,'Isyn_LIP','',0.004*msiemens * cm **-2,0.125*ms,1*ms,0*mV)
S_LIP_FS_FEF=generate_syn(RS_sup_LIP,SIvm_FEF,'Isyn_LIP','',0.004*msiemens * cm **-2,0.125*ms,1*ms,0*mV)
S_FEF_mdPul=generate_syn(RSvm_FEF,TC_B,'Isyn_FEF','',0*msiemens * cm **-2,0.125*ms,1*ms,0*mV)
S_mdPul_FEF_VIP=generate_syn(TC_B,SI2vm_FEF,'Isyn_mdPul','',0.05*msiemens * cm **-2,0.125*ms,1*ms,0*mV)
S_mdPul_LIP_RSg=generate_syn(TC_B,RS_gran_LIP,'Isyn_mdPul','',0.05*msiemens * cm **-2,0.125*ms,1*ms,0*mV)
S_mdPul_LIP_FSg=generate_syn(TC_B,FS_gran_LIP,'Isyn_mdPul','',0.05*msiemens * cm **-2,0.125*ms,1*ms,0*mV)
net.add(S_FEF_IB_LIP)
net.add(S_FEF_SIdeep_LIP)
net.add(S_LIP_RS_FEF)
net.add(S_LIP_FS_FEF)
net.add([S_FEF_mdPul])
net.add([S_mdPul_FEF_VIP,S_mdPul_LIP_RSg,S_mdPul_LIP_FSg])
print('Compiling with cython')
prefs.codegen.target = 'cython' #cython=faster, numpy = default python
net.run(runtime,report='text',report_period=300*second)
# LIP Plots
figure()
plot(R1.t,R1.i+140,'r.',label='RS cells')
plot(R2.t,R2.i+120,'m.',label='FS cells')
plot(R3.t,R3.i+100,'y.',label='SI cells')
plot(R5.t,R5.i+70,'g.',label='Granular RS')
plot(R6.t,R6.i+50,'c.',label='Granular FS')
plot(R4.t,R4.i+20,'b.',label='IB cells')
plot(R7.t,R7.i,'k.',label='Deep SI')
xlim(0,runtime/second)
legend(loc='upper left')
# min_t=int(50*ms*100000*Hz)
# LFP_V_RS=1/N_RS*sum(V1.V,axis=0)[min_t:]
# LFP_V_FS=1/N_FS*sum(V2.V,axis=0)[min_t:]
# LFP_V_SI=1/N_SI*sum(V3.V,axis=0)[min_t:]
# LFP_V_IB=1/N_IB*sum(V4.V,axis=0)[min_t:]
# LFP_V_RSg=1/N_FS*sum(V5.V,axis=0)[min_t:]
# LFP_V_FSg=1/N_FS*sum(V6.V,axis=0)[min_t:]
# LFP_V_SId=1/N_SI*sum(V7.V,axis=0)[min_t:]
#
# f,Spectrum_LFP_V_RS=signal.periodogram(LFP_V_RS, 100000,'flattop', scaling='spectrum')
# f,Spectrum_LFP_V_FS=signal.periodogram(LFP_V_FS, 100000,'flattop', scaling='spectrum')
# f,Spectrum_LFP_V_SI=signal.periodogram(LFP_V_SI, 100000,'flattop', scaling='spectrum')
# f,Spectrum_LFP_V_IB=signal.periodogram(LFP_V_IB, 100000,'flattop', scaling='spectrum')
# f,Spectrum_LFP_V_RSg=signal.periodogram(LFP_V_RSg, 100000,'flattop', scaling='spectrum')
# f,Spectrum_LFP_V_FSg=signal.periodogram(LFP_V_FSg, 100000,'flattop', scaling='spectrum')
# f,Spectrum_LFP_V_SId=signal.periodogram(LFP_V_SId, 100000,'flattop', scaling='spectrum')
#
# figure(figsize=(10,8))
# subplot(421)
# plot(f,Spectrum_LFP_V_RS)
# ylabel('Spectrum')
# yticks([],[])
# xlim(0,100)
# title('RS cell')
# subplot(422)
# plot(f,Spectrum_LFP_V_FS)
# yticks([],[])
# xlim(0,100)
# title('FS cell')
# subplot(423)
# plot(f,Spectrum_LFP_V_SI)
# ylabel('Spectrum')
# yticks([],[])
# xlim(0,100)
# title('SI cell')
# subplot(425)
# plot(f,Spectrum_LFP_V_RSg)
# ylabel('Spectrum')
# yticks([],[])
# xlim(0,100)
# title('gran RS cell')
# subplot(426)
# plot(f,Spectrum_LFP_V_FSg)
# yticks([],[])
# xlim(0,100)
# title('gran FS cell')
# subplot(427)
# plot(f,Spectrum_LFP_V_IB)
# xlabel('Frequency (Hz)')
# ylabel('Spectrum')
# yticks([],[])
# xlim(0,100)
# title('IB cell')
# subplot(428)
# plot(f,Spectrum_LFP_V_SId)
# yticks([],[])
# xlim(0,100)
# xlabel('Frequency (Hz)')
# title('deep SI cell')
#
# tight_layout()
#FEF Plots
figure(figsize=(10,4))
subplot(121)
title('Visual Neurons')
plot(R4FEF.t,R4FEF.i+20,'r.',label='RS')
plot(R5FEF.t,R5FEF.i+0,'b.',label='FS')
xlim(0,runtime/second)
legend(loc='upper left')
subplot(122)
title('Visual-Motor Neurons')
plot(R3FEF.t,R3FEF.i+0,'c.',label='VIP')
plot(R1FEF.t,R1FEF.i+60,'r.',label='RS')
plot(R2FEF.t,R2FEF.i+40,'b.',label='SI')
xlim(0,runtime/second)
legend(loc='upper left')
# subplot(133)
# title('Motor Neurons')
# plot(R6.t,R6.i+60,'r.',label='RS')
# plot(R7.t,R7.i+40,'b.',label='SI')
# plot(R8.t,R8.i+0,'c.',label='Fix')
# xlim(0,runtime/second)
# legend(loc='upper left')
# min_t=int(50*ms*100000*Hz)
# LFP_V1=1/20*sum(V1FEF.V,axis=0)[min_t:]
# LFP_V2=1/20*sum(V2FEF.V,axis=0)[min_t:]
# LFP_V3=1/20*sum(V3FEF.V,axis=0)[min_t:]
# LFP_V4=1/20*sum(V4FEF.V,axis=0)[min_t:]
# LFP_V5=1/20*sum(V5FEF.V,axis=0)[min_t:]
## LFP_V6=1/20*sum(V6.V,axis=0)[min_t:]
## LFP_V7=1/20*sum(V7.V,axis=0)[min_t:]
#
# f,Spectrum_LFP_V1=signal.periodogram(LFP_V1, 100000,'flattop', scaling='spectrum')
# f,Spectrum_LFP_V2=signal.periodogram(LFP_V2, 100000,'flattop', scaling='spectrum')
# f,Spectrum_LFP_V3=signal.periodogram(LFP_V3, 100000,'flattop', scaling='spectrum')
# f,Spectrum_LFP_V4=signal.periodogram(LFP_V4, 100000,'flattop', scaling='spectrum')
# f,Spectrum_LFP_V5=signal.periodogram(LFP_V5, 100000,'flattop', scaling='spectrum')
## f,Spectrum_LFP_V6=signal.periodogram(LFP_V6, 100000,'flattop', scaling='spectrum')
## f,Spectrum_LFP_V7=signal.periodogram(LFP_V7, 100000,'flattop', scaling='spectrum')
#
# figure(figsize=(10,4))
# subplot(321)
# plot(f,Spectrum_LFP_V4)
# ylabel('Spectrum')
# yticks([],[])
# xlim(0,100)
# title('visual RS')
# subplot(323)
# plot(f,Spectrum_LFP_V5)
# ylabel('Spectrum')
# yticks([],[])
# xlim(0,100)
# title('visual FS')
#
# subplot(322)
# plot(f,Spectrum_LFP_V1)
# ylabel('Spectrum')
# yticks([],[])
# xlim(0,100)
# title('visual-motor gran RS')
# subplot(324)
# plot(f,Spectrum_LFP_V2)
# ylabel('Spectrum')
# yticks([],[])
# xlim(0,100)
# title('visual-motor gran SI')
# subplot(326)
# plot(f,Spectrum_LFP_V3)
# ylabel('Spectrum')
# yticks([],[])
# xlim(0,100)
# title('visual-motor deep SI')
#mdPul plots
figure()
plot(R1A.t,R1A.i+0,'r.',label='HTC')
plot(R2A.t,R2A.i+20,'y.',label='TC')
plot(RA.t,RA.i+100,'b.',label='RE lat')
plot([0,1],[225,225],'k')
plot(R1B.t,R1B.i+250,'r.')
plot(R2B.t,R2B.i+270,'y.')
plot(RB.t,RB.i+350,'b.')
xlim(0,runtime/second)
yticks([150,500],['Object I','Object II'])
legend()
# figure()
# plot(V1A.t,V1A.V[0],label='TC V')
# plot(V2A.t,V2A.V[0],label='RE V')
# legend()
# f,Spectrum_LFP_V1=signal.periodogram(V1A.V[0], 100000,'flattop', scaling='spectrum')
# figure()
# plot(f,Spectrum_LFP_V1)
# xlim(0,100)
clear_cache('cython')
| StarcoderdataPython |
1622809 | import contextlib
import datetime
import logging
import os
import tempfile
import uuid
from pathlib import Path
from typing import Dict, Iterator, List, Optional
from urllib.parse import urlparse, urlunparse
from pkg_resources import get_distribution
from scaraplate.automation.base import ProjectVCS, TemplateVCS
from scaraplate.config import get_scaraplate_yaml_options
from scaraplate.template import TemplateMeta, _call_git, get_template_meta_from_git
__all__ = ("GitCloneProjectVCS", "GitCloneTemplateVCS")
logger = logging.getLogger("scaraplate")
def scaraplate_version() -> str:
return get_distribution("scaraplate").version
class GitCloneTemplateVCS(TemplateVCS):
"""A ready to use :class:`.TemplateVCS` implementation which:
- Uses git
- Clones a git repo with the template to a temporary directory
(which is cleaned up afterwards)
- Allows to specify an inner dir inside the git repo as the template
root (which is useful for monorepos)
"""
def __init__(self, template_path: Path, template_meta: TemplateMeta) -> None:
self._template_path = template_path
self._template_meta = template_meta
@property
def dest_path(self) -> Path:
return self._template_path
@property
def template_meta(self) -> TemplateMeta:
return self._template_meta
@classmethod
@contextlib.contextmanager
def clone(
cls,
clone_url: str,
*,
clone_ref: Optional[str] = None,
monorepo_inner_path: Optional[Path] = None,
) -> Iterator["GitCloneTemplateVCS"]:
"""Provides an instance of this class by issuing ``git clone``
to a tempdir when entering the context manager. Returns a context
manager object which after ``__enter__`` returns an instance
of this class.
:param clone_url: Any valid ``git clone`` url.
:param clone_ref: Git ref to checkout after clone
(i.e. branch or tag name).
:param monorepo_inner_path: Path to the root dir of template
relative to the root of the repo. If ``None``, the root of
the repo will be used as the root of template.
"""
with tempfile.TemporaryDirectory() as tmpdir_name:
tmpdir_path = Path(tmpdir_name).resolve()
template_path = tmpdir_path / "scaraplate_template"
template_path.mkdir()
git = Git.clone(
clone_url,
target_path=template_path,
ref=clone_ref,
# We need to strip credentials from the clone_url,
# because otherwise urls generated for TemplateMeta
# would contain them, and we don't want that.
strip_credentials_from_remote=True,
)
template_path = git.cwd
if monorepo_inner_path is not None:
template_path = template_path / monorepo_inner_path
scaraplate_yaml_options = get_scaraplate_yaml_options(template_path)
template_meta = get_template_meta_from_git(
template_path, git_remote_type=scaraplate_yaml_options.git_remote_type
)
if clone_ref is not None:
assert clone_ref == template_meta.head_ref
yield cls(template_path, template_meta)
class GitCloneProjectVCS(ProjectVCS):
"""A ready to use :class:`.ProjectVCS` implementation which:
- Uses git
- Clones a git repo with the project to a temporary directory
(which is cleaned up afterwards)
- Allows to specify an inner dir inside the git repo as the project
root (which is useful for monorepos)
- Implements :meth:`.ProjectVCS.commit_changes` as
``git commit`` + ``git push``.
"""
def __init__(
self,
project_path: Path,
git: "Git",
*,
changes_branch: str,
commit_author: str,
commit_message_template: str,
) -> None:
self._project_path = project_path
self._git = git
self.changes_branch = changes_branch
self.commit_author = commit_author
self.commit_message_template = commit_message_template
self.update_time = datetime.datetime.now()
@property
def dest_path(self) -> Path:
return self._project_path
def is_dirty(self) -> bool:
return self._git.is_dirty()
def commit_changes(self, template_meta: TemplateMeta) -> None:
assert self.is_dirty()
remote_branch = self._git.remote_ref(self.changes_branch)
# Create a definitely not existing local branch:
local_branch = f"{self.changes_branch}{uuid.uuid4()}"
self._git.checkout_branch(local_branch)
self._git.commit_all(
self.format_commit_message(template_meta=template_meta),
author=self.commit_author,
)
if not self._git.is_existing_ref(remote_branch):
self._git.push(self.changes_branch)
else:
# A branch with updates already exists in the remote.
if self._git.is_same_commit(remote_branch, f"{local_branch}^1"):
# The `changes_branch` is the same as the clone branch,
# so essentially the created commit forms a linear history.
# No need for any diffs here, we just need to push that.
self._git.push(self.changes_branch)
else:
# The two branches have diverged, we need to compare them:
changes: bool = not self._git.are_one_commit_diffs_equal(
local_branch, remote_branch
)
if changes:
# We could've used force push here, but instead we delete
# the branch first, because in GitLab it would also close
# the existing MR (if any), and we want that instead of
# silently updating the old MR.
self._git.push_branch_delete(self.changes_branch)
self._git.push(self.changes_branch)
else:
logger.info(
"scaraplate did update the project, but there's "
"an already existing branch in remote which diff "
"is equal to the just produced changes"
)
# Now we should ensure that a Pull Request exists for
# the `self.changes_branch`, but this class is designed to be agnostic
# from concrete git remotes, so it should be done in a child class.
def format_commit_message(self, *, template_meta: TemplateMeta) -> str:
return self.commit_message_template.format(
# TODO retrieve path from self.clone_url and pass it here too?
# (but careful: that clone_url might contain credentials).
update_time=self.update_time,
scaraplate_version=scaraplate_version(),
template_meta=template_meta,
)
@classmethod
@contextlib.contextmanager
def clone(
cls,
clone_url: str,
*,
clone_ref: Optional[str] = None,
monorepo_inner_path: Optional[Path] = None,
changes_branch: str,
commit_author: str,
commit_message_template: str = (
"Scheduled template update ({update_time:%Y-%m-%d})\n"
"\n"
"* scaraplate version: {scaraplate_version}\n"
"* template commit: {template_meta.commit_url}\n"
"* template ref: {template_meta.head_ref}\n"
),
) -> Iterator["GitCloneProjectVCS"]:
"""Provides an instance of this class by issuing ``git clone``
to a tempdir when entering the context manager. Returns a context
manager object which after ``__enter__`` returns an instance
of this class.
:param clone_url: Any valid ``git clone`` url.
:param clone_ref: Git ref to checkout after clone
(i.e. branch or tag name).
:param monorepo_inner_path: Path to the root dir of project
relative to the root of the repo. If ``None``, the root of
the repo will be used as the root of project.
:param changes_branch: The branch name where the changes should be
pushed in the remote. Might be the same as ``clone_ref``.
Note that this branch is never force-pushed. If upon push
the branch already exists in remote and its one-commit diff
is different from the one-commit diff of the just created
local branch, then the remote branch will be deleted and
the local branch will be pushed to replace the previous one.
:param commit_author: Author name to use for ``git commit``, e.g.
``<NAME> <<EMAIL>>``.
:param commit_message_template: :meth:`str.format` template
which is used to produce a commit message when committing
the changes. Available format variables are:
- ``update_time`` [:class:`datetime.datetime`] -- the time
of update
- ``scaraplate_version`` [:class:`str`] -- scaraplate package
version
- ``template_meta`` [:class:`.TemplateMeta`] -- template meta
returned by :meth:`.TemplateVCS.template_meta`
"""
with tempfile.TemporaryDirectory() as tmpdir_name:
tmpdir_path = Path(tmpdir_name).resolve()
project_path = tmpdir_path / "scaraplate_project"
project_path.mkdir()
git = Git.clone(clone_url, target_path=project_path, ref=clone_ref)
project_path = git.cwd
if monorepo_inner_path is not None:
project_path = project_path / monorepo_inner_path
yield cls(
project_path,
git,
changes_branch=changes_branch,
commit_author=commit_author,
commit_message_template=commit_message_template,
)
class Git:
def __init__(self, cwd: Path, remote: str = "origin") -> None:
self.cwd = cwd
self.remote = remote
def remote_ref(self, ref: str) -> str:
return f"{self.remote}/{ref}"
def checkout_branch(self, branch: str) -> None:
self._git(["checkout", "-b", branch])
def commit_all(self, commit_message: str, author: Optional[str] = None) -> None:
self._git(["add", "--all"])
extra: List[str] = []
if author is not None:
extra = ["--author", author]
self._git(
["commit", "-m", commit_message, *extra],
env={
# git would fail if there's no `user.email` in the local
# git config, even if `--author` is specified.
"USERNAME": "scaraplate",
"EMAIL": "<EMAIL>",
},
)
def is_dirty(self) -> bool:
return bool(self._git(["status", "--porcelain"]))
def is_existing_ref(self, ref: str) -> bool:
try:
self._git(["rev-parse", "--verify", ref])
except RuntimeError:
return False
else:
return True
def is_same_commit(self, ref1: str, ref2: str) -> bool:
commit1 = self._git(["rev-parse", "--verify", ref1])
commit2 = self._git(["rev-parse", "--verify", ref2])
return commit1 == commit2
def are_one_commit_diffs_equal(self, ref1: str, ref2: str) -> bool:
diff1 = self._git(["diff", f"{ref1}^1..{ref1}"])
diff2 = self._git(["diff", f"{ref2}^1..{ref2}"])
return diff1 == diff2
def push_branch_delete(self, branch: str) -> None:
self._git(["push", "--delete", self.remote, branch])
def push(self, ref: str) -> None:
# https://stackoverflow.com/a/4183856
self._git(["push", self.remote, f"HEAD:{ref}"])
def _git(self, args: List[str], *, env: Optional[Dict[str, str]] = None) -> str:
return _call_git(args, cwd=self.cwd, env=env)
@classmethod
def clone(
cls,
clone_url: str,
*,
target_path: Path,
ref: str = None,
strip_credentials_from_remote: bool = False,
) -> "Git":
remote = "origin"
clone_url_without_creds = strip_credentials_from_git_remote(clone_url)
args = ["clone", clone_url]
if ref is not None:
# git-clone(1) explicitly mentions that both branches and tags
# are allowed in the `--branch`.
args.extend(["--branch", ref])
_call_git(args, cwd=target_path)
actual_items_in_target_path = os.listdir(target_path)
if len(actual_items_in_target_path) != 1:
raise RuntimeError(
f"Expected `git clone` to create exactly one directory. "
f"Directories in the target: {actual_items_in_target_path}"
)
cloned_dir, = actual_items_in_target_path
target_path = target_path / cloned_dir
if strip_credentials_from_remote:
_call_git(
["remote", "set-url", remote, clone_url_without_creds], cwd=target_path
)
return cls(cwd=target_path, remote=remote)
def strip_credentials_from_git_remote(remote: str) -> str:
parsed = urlparse(remote)
if not parsed.scheme:
# Not a URL (probably an SSH remote)
return remote
assert parsed.hostname is not None
clean = parsed._replace(netloc=parsed.hostname)
return urlunparse(clean)
| StarcoderdataPython |
1726698 | from django import template
from django.conf import settings
register = template.Library()
@register.simple_tag
def env_value():
return "localhost" | StarcoderdataPython |
157663 | # -*- coding: utf-8 -*-
import unittest
from unittest import mock
from thumbnails import get_thumbnail
from thumbnails.conf import settings
from thumbnails.images import Thumbnail
from .utils import override_settings
class GetThumbnailTestCase(unittest.TestCase):
@mock.patch('{}.get'.format(settings.THUMBNAIL_CACHE_BACKEND), lambda o, x: True)
def test_get_thumbnail_cached(self):
self.assertTrue(get_thumbnail('', '200'))
@mock.patch('{}.set'.format(settings.THUMBNAIL_CACHE_BACKEND))
@mock.patch('{}.save'.format(settings.THUMBNAIL_STORAGE_BACKEND))
@mock.patch('thumbnails.engines.base.BaseThumbnailEngine.get_thumbnail')
def test_get_thumbnail(self, mock_get_thumbnail, mock_save, mock_cache_set):
thumbnail = get_thumbnail('http://puppies.lkng.me/400x600/', '200')
self.assertTrue(mock_get_thumbnail.called)
self.assertTrue(mock_cache_set.called)
self.assertTrue(mock_save.called)
mock_save.assert_has_calls([
mock.call(thumbnail.path, b''),
mock.call(thumbnail.alternative_resolution_path(2), b''),
])
self.assertIsInstance(thumbnail, Thumbnail)
@mock.patch('{}.get'.format(settings.THUMBNAIL_CACHE_BACKEND), lambda x, y: True)
@mock.patch('{}.set'.format(settings.THUMBNAIL_CACHE_BACKEND))
@mock.patch('{}.save'.format(settings.THUMBNAIL_STORAGE_BACKEND))
@mock.patch('thumbnails.engines.base.BaseThumbnailEngine.get_thumbnail')
def test_force(self, mock_get_thumbnail, mock_save, mock_cache_set):
get_thumbnail('http://puppies.lkng.me/400x600/', '200', force=True)
get_thumbnail('http://puppies.lkng.me/400x600/', '200', force=True)
get_thumbnail('http://puppies.lkng.me/400x600/', '200')
self.assertEqual(len(mock_get_thumbnail.call_args_list), 4)
self.assertEqual(len(mock_save.call_args_list), 4)
self.assertEqual(len(mock_cache_set.call_args_list), 2)
def test_dummy(self):
with override_settings(THUMBNAIL_DUMMY=True):
self.assertEqual(
get_thumbnail('t.jpg', '200x200').url,
'http://puppies.lkng.me/200x200'
)
| StarcoderdataPython |
1616369 | #from math import pi
#from math import *
import math as mt
#import math
print(mt.pi) | StarcoderdataPython |
3364471 | <filename>lib/data_utils/visualization.py
# ---------------------------------------------------------------
# SNIPER: Efficient Multi-scale Training
# Licensed under The Apache-2.0 License [see LICENSE for details]
# by <NAME>
# ---------------------------------------------------------------
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import random
import numpy as np
def transform_im(im, pixel_means):
im = im.copy()
# put channel back
im = im.transpose((1, 2, 0))
im += pixel_means
return im.astype(np.uint8)
def visualize_dets(im, dets, scale, pixel_means, class_names, threshold=0.5, save_path='debug.png', transform=True):
if transform:
im = transform_im(im, np.array(pixel_means)[[2, 1, 0]])
plt.imshow(im)
for j, name in enumerate(class_names):
if name == '__background__': continue
color = (random.random(), random.random(), random.random())
for det in dets[j]:
bbox = det[:4] * scale
score = det[-1]
if score < threshold:
continue
rect = plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor=color, linewidth=3.5)
plt.gca().add_patch(rect)
plt.gca().text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(name, score),
bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white')
plt.savefig(save_path)
plt.cla()
plt.clf()
plt.close()
def vis_polys(polys, im_path, crop, scale):
im = misc.imread(im_path)
im = im[:, ::-1, :]
for obj in range(len(polys)):
plt.imshow(im)
n_seg = len(polys[obj])
for j in range(n_seg):
cur_len = len(polys[obj][j])
for k in range(cur_len/2):
point = plt.Circle((polys[obj][j][2*k], polys[obj][j][2*k+1]), radius=1, color='red')
plt.gca().add_patch(point)
num = np.random.randint(0,100000)
plt.savefig('debug/visualization/debug_{}_{}.png'.format(num, obj))
plt.clf()
plt.cla()
plt.close()
imc = im[int(crop[1]):int(crop[3]), int(crop[0]):int(crop[2])]
imc2 = misc.imresize(imc, scale)
plt.imshow(imc2)
h,w,_ = np.shape(imc2)
n_seg = len(polys[obj])
for j in range(n_seg):
cur_len = len(polys[obj][j])
for k in range(cur_len/2):
x1 = (polys[obj][j][2*k]-crop[0])*scale
y1 = scale*(polys[obj][j][2*k+1]-crop[1])
x1 = min(max(0, x1), w)
y1 = min(max(0, y1), h)
point = plt.Circle((x1, y1), radius=1, color='red')
plt.gca().add_patch(point)
plt.savefig('debug/visualization/debug_{}_{}_c.png'.format(num, obj))
plt.clf()
plt.cla()
plt.close()
| StarcoderdataPython |
1615667 | <gh_stars>0
import datetime
import logging
import os
import sdm_service
import sys
GRANT_TIMEOUT=60 #minutes
def get_params():
if not sys.argv or len(sys.argv) != 3:
raise Exception("Invalid number of arguments")
return sys.argv[1], sys.argv[2]
class GrantTemporaryAccess:
service = sdm_service.create_sdm_service(os.getenv("SDM_API_ACCESS_KEY"), os.getenv("SDM_API_SECRET_KEY"), logging)
def __init__(self, resource_name, user_email):
self.resource_name = resource_name
self.user_email = user_email
def __get_resource_id(self):
try:
resource = self.service.get_resource_by_name(self.resource_name)
return resource.id
except Exception as e:
raise Exception(f"Invalid resource name {self.resource_name}") from e
def __get_account_id(self):
try:
account = self.service.get_account_by_email(self.user_email)
return account.id
except Exception as e:
raise Exception(f"Invalid user email {self.user_email}") from e
def execute(self):
grant_start_from = datetime.datetime.now(datetime.timezone.utc)
grant_valid_until = grant_start_from + datetime.timedelta(minutes=GRANT_TIMEOUT)
self.service.grant_temporary_access(
self.__get_resource_id(),
self.__get_account_id(),
grant_start_from,
grant_valid_until
)
resource_name, user_email = get_params()
GrantTemporaryAccess(resource_name, user_email).execute()
print(f"Temporary grant successfullly created for {user_email} on {resource_name}")
| StarcoderdataPython |
129555 | class School:
def __init__(self, name, num_pupils, num_classrooms):
self.name = name
self.num_pupils = num_pupils
self.num_classrooms = num_classrooms
def calculate_average_pupils(self):
return self.num_pupils / self.num_classrooms
def show_info(self):
"""
>>> s = School("Eveyln Intermediate", 96, 1500)
>>> s.show_info()
Eveyln Intermediate has 15.62 pupils per room
"""
print(f"{self.name} has {self.calculate_average_pupils():.2f} pupils per room")
def collect_data():
global school_name, school_pupils, school_classrooms
school_name = input("Enter the name of the school: ")
while True:
try:
school_pupils = int(input("Enter the number of pupils the school has: "))
break
except ValueError:
print("Please enter an integer!")
while True:
try:
school_classrooms = int(input("Enter the number of classrooms the school has: "))
break
except ValueError:
print("Please enter an integer!")
if __name__ == "__main__":
collect_data()
school1 = School(school_name, school_pupils, school_classrooms)
school1.show_info()
collect_data()
school2 = School(school_name, school_pupils, school_classrooms)
school2.show_info()
| StarcoderdataPython |
75518 | <filename>tests/clpy_tests/random_tests/test_distributions.py
import unittest
import clpy
from clpy.random import distributions
from clpy import testing
@testing.parameterize(*testing.product({
'shape': [(4, 3, 2), (3, 2)],
'loc_shape': [(), (3, 2)],
'scale_shape': [(), (3, 2)],
})
)
@testing.gpu
class TestDistributions(unittest.TestCase):
_multiprocess_can_split_ = True
def check_distribution(self, dist_func, loc_dtype, scale_dtype, dtype):
loc = clpy.ones(self.loc_shape, dtype=loc_dtype)
scale = clpy.ones(self.scale_shape, dtype=scale_dtype)
out = dist_func(loc, scale, self.shape, dtype)
self.assertEqual(self.shape, out.shape)
self.assertEqual(out.dtype, dtype)
@clpy.testing.for_float_dtypes('dtype', no_float16=True)
@clpy.testing.for_float_dtypes('loc_dtype')
@clpy.testing.for_float_dtypes('scale_dtype')
def test_normal(self, loc_dtype, scale_dtype, dtype):
self.check_distribution(distributions.normal,
loc_dtype, scale_dtype, dtype)
| StarcoderdataPython |
2646 | from unittest import TestCase
from unittest.mock import Mock, patch
import sys
sys.modules['smbus'] = Mock() # Mock the hardware layer to avoid errors.
from ledshimdemo.canvas import Canvas
from ledshimdemo.effects.cheerlights import CheerLightsEffect
class TestCheerLights(TestCase):
TEST_CANVAS_SIZE = 3 # type: int
def test_cheerlight_call(self):
canvas = Canvas(self.TEST_CANVAS_SIZE)
effect = CheerLightsEffect(canvas)
self.assertIsNone(effect.get_colour_from_channel("http://ejiferfneciudwedwojcmeiocnw.com"))
@patch('ledshimdemo.effects.cheerlights.CheerLightsEffect.get_colour_from_channel', return_value=None)
def test_effect_failed_cheerlights(self, patch_function):
canvas = Canvas(self.TEST_CANVAS_SIZE)
effect = CheerLightsEffect(canvas)
effect.compose()
patch_function.assert_called_once()
for i in range(canvas.get_size()):
self.assertEqual(canvas.get_pixel(i), canvas.BLANK_PIXEL)
def test_effect_working_cheerlights(self):
canvas = Canvas(self.TEST_CANVAS_SIZE)
effect = CheerLightsEffect(canvas)
# Must check before and after in case it changes during the test.
before = effect.get_colour_from_channel(effect.URL)
effect.compose()
after = effect.get_colour_from_channel(effect.URL)
self.assertRegex(repr(effect), "^CheerLights\\(Colour:({0}|{1})\\)$".format(before, after))
| StarcoderdataPython |
120870 | <gh_stars>0
import os
from lisdf.parsing.sdf import SDF, Collision, Link, Mesh, Visual
def _handle_component(component, model_path: str) -> None:
"""
Handle component and inject URI into link component geometry
"""
if isinstance(component, Link):
for link_component in component.aggregate_order:
# TODO: are there other types we need to consider?
if isinstance(link_component, (Collision, Visual)) and isinstance(
link_component.geometry, Mesh
):
link_component.geometry.uri = os.path.join(
model_path, link_component.geometry.uri
)
def inject_absolute_path(sdf: SDF, model_path: str) -> SDF:
"""
This function replace relative paths to object and material
files with absolute paths so the sdf object is self-contained.
"""
for world in sdf.aggregate_order:
for model in world.models:
for component in model.aggregate_order:
_handle_component(component, model_path)
return sdf
def load_sdf(model_name: str, models_dir: str = "models") -> SDF:
sdf_path = os.path.join(models_dir, model_name)
with open(sdf_path) as f:
xml_str = f.read()
sdf = SDF.from_xml_string(xml_str)
for world in sdf.aggregate_order:
# Load all the includes in the world
for include in world.includes:
include_model = load_sdf(include.model_name)
world.models.append(include_model)
model_path = os.path.join(models_dir, os.path.dirname(model_name))
return inject_absolute_path(sdf, model_path)
if __name__ == "__main__": # pragma: no cover
sdf_test = "mud_test/model.sdf"
sdf_results = load_sdf(sdf_test)
# print(sdf_results)
| StarcoderdataPython |
1631816 | <filename>python/labs/shopping-list-app/starter-code/shopping_list_starter.py<gh_stars>1-10
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
choice = ""
print("Welcome to the shopping list app!")
shopping_list = []
while choice.lower() != "e":
print("Please choose your action from the following list:")
print("a. Add an item to the list")
print("b. Remove an item from the list")
print("c. Check to see if an item is on the list")
print("d. Show all items on the list")
print("e. exit")
choice = input("Enter your choice [a|b|c|d|e]:")
# Your code below! Handle the cases when the user chooses a, b, c, d, or e
if choice == "a":
adder = input("What would you like to add? Separate values with commas: ").lower()
adder = adder.split(",")
for add in adder:
if add in shopping_list:
print(add+" is already on the list!")
else:
shopping_list.append(add)
print(add+" has been added to the list.")
elif choice == "b":
remover = input ("What would you like to remove? ").lower()
if (remover in shopping_list):
a = input("Are you sure you would like to remove "+remover+" y/n ")
if a =="y":
shopping_list.remove(remover)
else:
print("Cancelled")
else:
print("I'm sorry, that was not found in the list.")
elif choice == "c":
checker = input("What would you like to check for? ").lower()
if checker in shopping_list:
print("That item is in your list already!")
else:
a = input("That item is not currently in your list, would you like to add "+checker+" y/n")
if a =="y":
shopping_list.append(checker)
elif choice == "d":
for thing in shopping_list:
print (thing)
| StarcoderdataPython |
1747719 | from django import forms
from django_measurement.forms import MeasurementField
from tests.custom_measure_base import DegreePerTime, Temperature, Time
from tests.models import MeasurementTestModel
class MeasurementTestForm(forms.ModelForm):
class Meta:
model = MeasurementTestModel
exclude = []
class LabelTestForm(forms.Form):
simple = MeasurementField(Temperature)
class SITestForm(forms.Form):
simple = MeasurementField(Time)
class BiDimensionalLabelTestForm(forms.Form):
simple = MeasurementField(DegreePerTime)
| StarcoderdataPython |
1725464 | <gh_stars>0
import pytest
from snek_case.sneks import Snek
class TestSnek(Snek):
snek_type = "test"
snek = "---:>"
def test_cannot_create() -> None:
# Assemble / Act / Assert
with pytest.raises(TypeError):
Snek() # type: ignore
def test_can_subclass() -> None:
# Assemble / Act / Assert
TestSnek()
def test_object_is_snek() -> None:
# Assemble / Act
snek = TestSnek()
# Assert
assert str(snek) == TestSnek.snek
| StarcoderdataPython |
1747905 | <gh_stars>1000+
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: inference_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='inference_service.proto',
package='com.webank.ai.fate.api.serving',
syntax='proto3',
serialized_options=_b('B\025InferenceServiceProto'),
serialized_pb=_b('\n\x17inference_service.proto\x12\x1e\x63om.webank.ai.fate.api.serving\"0\n\x10InferenceMessage\x12\x0e\n\x06header\x18\x01 \x01(\x0c\x12\x0c\n\x04\x62ody\x18\x02 \x01(\x0c\x32\xf6\x02\n\x10InferenceService\x12o\n\tinference\x12\x30.com.webank.ai.fate.api.serving.InferenceMessage\x1a\x30.com.webank.ai.fate.api.serving.InferenceMessage\x12w\n\x11startInferenceJob\x12\x30.com.webank.ai.fate.api.serving.InferenceMessage\x1a\x30.com.webank.ai.fate.api.serving.InferenceMessage\x12x\n\x12getInferenceResult\x12\x30.com.webank.ai.fate.api.serving.InferenceMessage\x1a\x30.com.webank.ai.fate.api.serving.InferenceMessageB\x17\x42\x15InferenceServiceProtob\x06proto3')
)
_INFERENCEMESSAGE = _descriptor.Descriptor(
name='InferenceMessage',
full_name='com.webank.ai.fate.api.serving.InferenceMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='com.webank.ai.fate.api.serving.InferenceMessage.header', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='body', full_name='com.webank.ai.fate.api.serving.InferenceMessage.body', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=59,
serialized_end=107,
)
DESCRIPTOR.message_types_by_name['InferenceMessage'] = _INFERENCEMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
InferenceMessage = _reflection.GeneratedProtocolMessageType('InferenceMessage', (_message.Message,), dict(
DESCRIPTOR = _INFERENCEMESSAGE,
__module__ = 'inference_service_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.api.serving.InferenceMessage)
))
_sym_db.RegisterMessage(InferenceMessage)
DESCRIPTOR._options = None
_INFERENCESERVICE = _descriptor.ServiceDescriptor(
name='InferenceService',
full_name='com.webank.ai.fate.api.serving.InferenceService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=110,
serialized_end=484,
methods=[
_descriptor.MethodDescriptor(
name='inference',
full_name='com.webank.ai.fate.api.serving.InferenceService.inference',
index=0,
containing_service=None,
input_type=_INFERENCEMESSAGE,
output_type=_INFERENCEMESSAGE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='startInferenceJob',
full_name='com.webank.ai.fate.api.serving.InferenceService.startInferenceJob',
index=1,
containing_service=None,
input_type=_INFERENCEMESSAGE,
output_type=_INFERENCEMESSAGE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='getInferenceResult',
full_name='com.webank.ai.fate.api.serving.InferenceService.getInferenceResult',
index=2,
containing_service=None,
input_type=_INFERENCEMESSAGE,
output_type=_INFERENCEMESSAGE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_INFERENCESERVICE)
DESCRIPTOR.services_by_name['InferenceService'] = _INFERENCESERVICE
# @@protoc_insertion_point(module_scope)
| StarcoderdataPython |
3261607 | import numpy as np
from glm import vec2
from ....model.model import Material, RenderCompound
from ....gl.framebuffer import FrameBuffer, FB_NONE
from ....gl.shader import ShaderProgram
from ...base import SecondPassRenderer
from ...util import sample_vertex_shader, gen_screen_mesh
# Separable convolution
vert_shader = sample_vertex_shader
# language=GLSL
frag_shader = '''\
#version 430 core
in vec2 tex_coords;
out vec4 out_color;
uniform sampler2D tex_img;
uniform vec2 direction;
void main() {
out_color = vec4(0);
{000;}
}'''
injectable_code = 'out_color.xyz += texture(tex_img, tex_coords + direction * {dir_offset}).xyz * {distr_value};\n'
class SeparableConvolutionSession(SecondPassRenderer):
def __init__(self, distribution, width, height, color_buffer_type=1):
self.radius = 1
self._make_shader(distribution)
self.first = SeparableConvolutionPass(width, height, self.shader_prog, color_buffer_type, vec2(1, 0) / width)
self.second = SeparableConvolutionPass(width, height, self.shader_prog, color_buffer_type, vec2(0, 1) / height)
self.meshes = self.first.meshes
self.fbo = self.first.fbo
def _make_shader(self, distr_values):
start_value = len(distr_values) // 2
inj_code = ''.join((injectable_code.format(dir_offset=offset, distr_value=value)
for value, offset in zip(distr_values, range(-start_value, start_value + 1))))
print('shader', frag_shader.replace('{000;}', inj_code))
self.shader_prog = ShaderProgram(vert_shader, frag_shader.replace('{000;}', inj_code))
def draw(self, out_fbo, data):
self.first.draw(self.second.fbo, data)
self.second.draw(out_fbo, self.second.meshes)
def set_radius(self, value):
self.radius = value
self.first.direction = vec2(1, 0) / self.first.fbo.width * value
self.second.direction = vec2(0, 1) / self.second.fbo.height * value
class SeparableConvolutionPass(SecondPassRenderer):
def __init__(self, width, height, program, color_buffer_type, direction):
self.shader_prog = program
self.fbo = FrameBuffer(width, height, color_buffer_type, FB_NONE)
self.meshes = (RenderCompound(gen_screen_mesh(), Material(self.fbo.color_buffers)), )
self.direction_setter = self.shader_prog.get_uniform_setter('direction', '2f')
self.direction = direction
def draw(self, out_fbo, data) -> int:
out_fbo.use()
self.shader_prog.use()
self.direction_setter(*self.direction)
i = 0
for i, elem in enumerate(data):
elem.draw()
return i
class GaussianBlur(SeparableConvolutionSession):
def __init__(self, width, height, values, color_buffer_type=1):
self.values = np.blackman(values)
self.values /= self.values.sum()
super(GaussianBlur, self).__init__(self.values, width, height, color_buffer_type)
class BoxBlur(SeparableConvolutionSession):
def __init__(self, width, height, values, color_buffer_type=1):
self.values = np.full(values, 1.0)
self.values /= self.values.sum()
super(BoxBlur, self).__init__(self.values, width, height, color_buffer_type)
| StarcoderdataPython |
139993 | <filename>mini-apps/tic-tac-toe-full/tic_tac_toe_module.py
import random
# def generate_board():
# """
# funkcija ki generira naključno tic tac toe board - igralno ploščo
# vzorci = ['X','O']
# nakljucen_vzorec = random.choice(vzorci)
# """
# return board
# def print_board(board):
# """ funkcija za lepši terminal print board """
# pass
# def display_board():
# pass
# def check_win_condition(board, player):
# """ """
# return win
# def check_tie_condition(board):
# """ """
# return tie
# def check_rows(board, player):
# """ """
# return win
# def check_columns(board, player):
# """ """
# return win
# def check_diagonals(board, player):
# """ """
# return win
# testiraj funkcijo
# print_board(generate_random_board()) | StarcoderdataPython |
3366807 | <reponame>icsi-berkeley/framework-code
"""
.. The SpecalizerTools module performs basic operations to gather information from a SemSpec
and output an n-tuple.
.. moduleauthor:: <NAME> <<EMAIL>>
------
See LICENSE.txt for licensing information.
------
"""
from nluas.utils import update, Struct
def updated(d, *maps, **entries):
"""A "functional" version of update...
"""
dd = dict(**d) if isinstance(d, dict) else Struct(d)
return update(dd, *maps, **entries)
# This just defines the interface
class NullSpecializer(object):
def specialize(self, fs):
"""Specialize fs into task-specific structures.
"""
abstract # @UndefinedVariable
class DebuggingSpecializer(NullSpecializer):
def __init__(self):
self.debug_mode = False
# Original input sentence
self._sentence = None
""" Sets debug_mode to ON/OFF """
def set_debug(self):
self.debug_mode = not self.debug_mode
class ReferentResolutionException(Exception):
def __init__(self, message):
self.message = message
class FeatureStructException(Exception):
def __init__(self, message):
self.message = message
class MoodException(FeatureStructException):
def __init__(self, message):
self.message = message
class TemplateException(Exception):
def __init__(self, message):
self.message = message
class UtilitySpecializer(DebuggingSpecializer):
def __init__(self, analyzer):
self._stacked = []
DebuggingSpecializer.__init__(self)
self.analyzer = analyzer
self.mappings = self.analyzer.get_mappings()
self.event = True
self.addressees = list() # For discourse analysis, distinct from _stacked list, which is used for general referent resolution
def is_compatible(self, typesystem, role1, role2):
return self.analyzer.issubtype(typesystem, role1, role2) or self.analyzer.issubtype(typesystem, role2, role1)
""" Input PROCESS, searches SemSpec for Adverb Modifiers. Currently just returns speed,
but could easily be modified to return general manner information. This might be made more complex
if we wanted to describe more complex motor routines with adverbs. """
def get_actionDescriptor(self, process):
tempSpeed = .5
returned=dict(speed=tempSpeed)
if hasattr(process, "speed") and str(process.speed) != "None":
tempSpeed = float(process.speed)
returned['speed'] = float(process.speed)
for i in process.__features__.values():
for role, filler in i.__items__():
if filler.typesystem() == 'SCHEMA' and self.analyzer.issubtype('SCHEMA', filler.type(), 'AdverbModification'):
if process.index() == filler.modifiedThing.index():
if (filler.value) and (filler.property.type() == "speed"):
newSpeed = float(filler.value)
if min(newSpeed, tempSpeed) < .5:
#return min(newSpeed, tempSpeed)
returned['speed'] = min(newSpeed, tempSpeed)
else:
returned['speed'] = max(newSpeed, tempSpeed)
#return max(newSpeed, tempSpeed)
#return float(filler.value)
elif (filler.value) and (filler.property.type() == "process_kind"):
returned['collaborative'] = filler.value.type()
#return filler.value.type()
else:
returned['collaborative'] = False
#return False
return returned
""" This returns a string of the specified relation of the landmark to the other RD, based on the values
and mappings encoded in the SemSpec. This needs to be fixed substantially.
"""
def get_locationDescriptor(self, goal):
#location = {}
location = ''
for i in goal.__features__.values():
for role, filler in i.__items__():
if filler.type() == "Support":
if filler.supporter.index() == goal.index():
return "on"
if filler.type() == 'Sidedness':
if filler.back.index() == goal.index():
return 'behind' #location = 'behind'
elif filler.type() == 'BoundedObject':
if filler.interior.index() == goal.index():
if hasattr(i, "m") and i.m.type() == "TrajectorLandmark":
return "in"
elif hasattr(i, "m") and i.m.type() == "SPG":
return 'into'
elif filler.type() == "NEAR_Locative":
if filler.p.proximalArea.index() == goal.index(): #i.m.profiledArea.index():
location = 'near'
#location['relation'] = 'near'
elif filler.type() == "AT_Locative":
if filler.p.proximalArea.index() == goal.index():
location = 'at'
#location['relation'] = 'at'
return location
def invert_pointers(self, goal):
final = {}
for i in goal.__features__.values():
for roles, filler in i.__items__():
# Checks: filler is schema, it exists, and it has a temporalitly
if filler.typesystem() == "SCHEMA" and filler.has_filler():
for k, v in filler.__items__():
if v.index() == goal.index():
if filler.type() not in final:
final[filler.type()] = []
final[filler.type()].append(filler)
return final
def get_processDescriptor(self, process, referent):
""" Retrieves information about a process, according to existing templates. Meant to be implemented
in specific extensions of this interface.
Can be overwritten as needed -- here, it calls the params_for_compound to gather essentially an embedded n-tuple.
"""
return list(self.params_for_compound(process))
""" Meant to match 'one-anaphora' with the antecedent. As in, "move to the big red box, then move to another one". Or,
'He likes the painting by Picasso, and I like the one by Dali.' Not yet entirely clear what information to encode
besides object type. """
def resolve_anaphoricOne(self, item):
popper = list(self._stacked)
while len(popper) > 0:
ref = popper.pop()
while ('location' in ref or 'locationDescriptor' in ref or 'referent' in ref['objectDescriptor']) and len(popper) > 0:
ref = popper.pop()
if item.givenness.type() == 'distinct':
return {'objectDescriptor': {'type': ref['objectDescriptor']['type'], 'givenness': 'distinct'}}
else:
test = self.get_objectDescriptor(item, resolving=True)
merged = self.merge_descriptors(ref['objectDescriptor'], test)
return {'objectDescriptor': merged}
raise ReferentResolutionException("Sorry, I don't know what you mean by 'one'.")
def merge_descriptors(self, old, new):
""" Merges object descriptors from OLD and NEW. Objective: move descriptions / properties from OLD
into NEW unless NEW conflicts. If a property conflicts, then use the property in NEW. """
if 'referent' in new and new['referent'] in ['anaphora', 'antecedent']:
new.pop("referent")
for key, value in old.items():
if key == 'type':
new[key] = old[key]
if not key in new:
new[key] = old[key]
return new
""" Simple reference resolution gadget, meant to unify object pronouns with potential
antecedents. """
def resolve_referents(self, item, antecedents = None, actionary=None, pred=None):
#self.find_closest_antecedent([7,8])
if antecedents is None:
antecedents = self._stacked
popper = list(antecedents)
while len(popper) > 0:
ref = popper.pop()
if self.resolves(ref, actionary, pred) and self.compatible_referents(item, ref['objectDescriptor']):
if 'partDescriptor' in ref:
return ref['partDescriptor']
ref = self.clean_referent(ref)
return ref
return {'objectDescriptor':item}
#raise ReferentResolutionException("Sorry, I did not find a suitable referent found in past descriptions.")
def clean_referent(self, ref):
ref['objectDescriptor'].pop('property', None)
return ref
def find_closest_antecedent(self, target):
""" Takes in target span/word, ranks previous spans. """
ranks = []
for k in self.np_spans:
#if self.analyzer.issubtype("CONSTRUCTION", k[0], "Pronoun"):
span = k[2]
if span[0] < target[0] and span[1] < target[1]:
ranks.insert(0, k)
#print(ranks)
def ordering(self, fs, ref):
for index, value in fs.rootconstituent.__features__.items():
if hasattr(value, "m") and value.m and value.m.type() == "RD":
print(index)
#print(repr(value))
print(value.m.ontological_category.type())
#temp = self.get_objectDescriptor(value.m)
#print(temp)
def compatible_referents(self, pronoun, ref):
for key, value in pronoun.items():
if key in ref and key != "referent" and (value and ref[key]):
if not self.is_compatible("ONTOLOGY", value, ref[key]):
return False
return True
""" Returns a boolean on whether or not the "popped" value works in the context provided. """
def resolves(self, popped, actionary=None, pred=None):
if actionary == 'be2' or actionary == 'be':
if 'location' in popped or 'locationDescriptor' in popped:
return 'relation' in pred
else:
if 'referent' in popped:
test = popped['referent'].replace('_', '-')
return self.analyzer.issubtype('ONTOLOGY', test, 'physicalEntity')
else:
return self.analyzer.issubtype('ONTOLOGY', popped['objectDescriptor']['type'], 'physicalEntity')
if actionary == 'forceapplication' or actionary == 'move':
if 'location' in popped or 'locationDescriptor' in popped:
return False
if 'partDescriptor' in popped:
pd = popped['partDescriptor']['objectDescriptor']
if 'referent' in pd:
return self.analyzer.issubtype('ONTOLOGY', pd['referent'].replace('_', '-'), 'moveable')
else:
return self.analyzer.issubtype('ONTOLOGY', pd['type'], 'moveable')
else:
if 'objectDescriptor' in popped and 'type' in popped['objectDescriptor']:
return self.analyzer.issubtype('ONTOLOGY', popped['objectDescriptor']['type'], 'moveable')
return False
# If no actionary passed in, no need to check for context
return True
def replace_mappings(self, ntuple):
""" This is supposed to replace all of the mappings in the ntuple with values from the action ontology, if applicable. """
n = ntuple
if type(ntuple) == Struct:
n = ntuple.__dict__
for k,v in n.items():
if type(v) == dict or type(v) == Struct:
n[k]= self.replace_mappings(v)
elif type(v) == list:
for value in v:
value = self.replace_mappings(value)
elif v is None:
continue
elif v in self.mappings:
n[k] = self.mappings[v]
v = self.mappings[v]
return n
def map_ontologies(self, ntuple):
""" This is supposed to replace all of the mappings in the ntuple with values from the action ontology, if applicable. """
n = ntuple
for k, v in ntuple.items():
if isinstance(v, dict):
n[k] = self.map_ontologies(v)
elif isinstance(v, list):
for value in v:
value = self.map_ontologies(value)
elif v is None:
continue
elif v in self.mappings:
n[k] = self.mappings[v]
v = self.mappings[v]
return n
| StarcoderdataPython |
3355567 | <gh_stars>0
"""
Interactive CLI menu module
"""
# Imports
import os # Os module for the 'clear' command.
import sys # Sys module for the 'exit' command.
import config # Config module for the setter functions.
import cron # Cron module for the crontab manipulations.
# Menu decorator
def menu_decorator(menu):
def wrapper():
# Header
os.system("clear")
print "Light System Monitor\n"
# Current menu
menu()
# Footer
print
# If the decorated function is a submenu:
if menu.__name__[:3] == "sub":
print "9. Back"
# Else if the decorated function is the main menu:
elif menu.__name__ == "menu_main":
print "0. Exit"
else:
print "0. Main menu"
choice = raw_input(">> ")
exec_menu(choice, menu.__name__)
return wrapper
# Main menu1
@menu_decorator
def menu_main():
print "Please choose an option:"
print "1. Crontab configuration"
print "2. Alerts configuration"
print "3. Email configuration"
# Execute menu
def exec_menu(choice, current_menu):
# If the 'choice' action exists in the current menu's options,
# use it. Otherwise remain the in the current menu.
# This can be redone to use dict.get() method actually.
try:
menu_actions[current_menu][choice]()
except KeyError:
menu_actions[current_menu][current_menu]()
# Exit program
def app_exit():
os.system("clear")
sys.exit(0)
# Crontab menu
@menu_decorator
def menu_cron():
print "Crontab configuration\n"
if cron.is_set():
print "Crontab is set\n"
print "1. Add to crontab"
print "2. Remove from crontab"
# Add to crontab
def menu_cron_add():
cron.add()
exec_menu("menu_cron", "menu_cron")
# Remove from crontab
def menu_cron_remove():
cron.remove()
exec_menu("menu_cron", "menu_cron")
# Alerts menu
@menu_decorator
def menu_alerts():
print "Alerts configuration\n"
print "1. Processes"
print "2. Thresholds"
# Alerts - Processes sub-menu
@menu_decorator
def submenu_alerts_processes():
print "Alerts configuration -> Processes\n"
print "Watched processes: {}\n".format(", ".join(config.list_processes()))
print "1. Add process"
print "2. Remove process"
# Alerts - Processes - Add
@menu_decorator
def submenu_alerts_processes_add():
print "Alerts configuration -> Processes -> Add Process\n"
process = raw_input("Please enter a process to watch: ")
if config.add_process(process):
print "{} added to the watch list!".format(process)
else:
print "Failed to add!"
raw_input("Press enter to acknowledge.")
# TODO - Maybe there's no need for this output and it's easier to see the results in the parent menu.
exec_menu("submenu_alerts_processes", "submenu_alerts_processes")
# Alerts - Processes - Remove
@menu_decorator
def submenu_alerts_processes_remove():
print "Alerts configuration -> Processes -> Remove Process\n"
process = raw_input("Please enter a process to remove from watching: ")
if config.remove_process(process):
print "{} removed from the watch list!".format(process)
else:
print "Failed to remove!"
raw_input("Press enter to acknowledge.")
# TODO - Maybe there's no need for this output and it's easier to see the results in the parent menu.
exec_menu("submenu_alerts_processes", "submenu_alerts_processes")
# Alerts - Thresholds sub-menu
@menu_decorator
def submenu_alerts_thresholds():
print "Alerts configuration -> Thresholds\n"
print "1. Set CPU percentage threshold"
print "2. Set Memory percentage threshold"
print "3. Set Swap memory percentage threshold"
print "4. Set Core Temperature threshold"
# Alerts - Thresholds sub-menu - Set CPU
@menu_decorator
def submenu_alerts_thresholds_cpu():
print "Alerts configuration -> Thresholds -> CPU percentage\n"
threshold = raw_input("Please enter the new threshold: ")
config.set_cpu_percent(int(threshold))
exec_menu("submenu_alerts_thresholds", "submenu_alerts_thresholds")
# Alerts - Thresholds sub-menu - Set Memory
@menu_decorator
def submenu_alerts_thresholds_memory():
print "Alerts configuration -> Thresholds -> Memory percentage\n"
threshold = raw_input("Please enter the new threshold: ")
config.set_memory_percent(int(threshold))
exec_menu("submenu_alerts_thresholds", "submenu_alerts_thresholds")
# Alerts - Thresholds sub-menu - Set Swap
@menu_decorator
def submenu_alerts_thresholds_swap():
print "Alerts configuration -> Thresholds -> Swap percentage\n"
threshold = raw_input("Please enter the new threshold: ")
config.set_swap_percent(int(threshold))
exec_menu("submenu_alerts_thresholds", "submenu_alerts_thresholds")
# Alerts - Thresholds sub-menu - Core temperature
@menu_decorator
def submenu_alerts_thresholds_temp_core():
print "Alerts configuration -> Thresholds -> Core temperature\n"
threshold = raw_input("Please enter the new threshold: ")
config.set_temp_core(int(threshold))
exec_menu("submenu_alerts_thresholds", "submenu_alerts_thresholds")
# Email menu
@menu_decorator
def menu_email():
print "Email configuration\n"
print "1. Recipient address"
print "2. SMTP server"
# Email - Recipient sub-menus
@menu_decorator
def submenu_email_recipient():
print "Email configuration -> Recipient address\n"
print "Currently set address: {}\n".format(config.settings["email"]["address"])
print "1. Change address"
# Email - Change the recipient
@menu_decorator
def submenu_email_recipient_change():
print "Email configuration -> Recipient address -> Address change\n"
address = raw_input("Please enter a new address: ")
config.set_email(address)
exec_menu("submenu_email_recipient", "submenu_email_recipient")
# Email - SMTP sub-menus
@menu_decorator
def submenu_email_smtp():
print "Email configuration -> SMTP configuration\n"
print "Currently set server: {}".format(config.settings["email"]["smtp_server"])
print "Currently set username: {}".format(config.settings["email"]["smtp_user"])
print "Currently set password: {}\n".format(config.settings["email"]["smtp_pass"])
print "1. Change address"
print "2. Change username"
print "3. Change password"
# Email - Change the SMTP server
@menu_decorator
def submenu_email_smtp_change():
print "Email configuration -> SMTP configuration -> Server change\n"
server = raw_input("Please enter a new server domain name or IP: ")
config.set_smtp(server)
exec_menu("submenu_email_smtp", "submenu_email_smtp")
# Email - Change the SMTP username
@menu_decorator
def submenu_email_smtp_user():
print "Email configuration -> SMTP configuration -> Username change\n"
username = raw_input("Please enter a new SMTP username: ")
config.set_smtp_user(username)
exec_menu("submenu_email_smtp_user", "submenu_email_smtp_user")
# Email - Change the SMTP password
@menu_decorator
def submenu_email_smtp_pass():
print "Email configuration -> SMTP configuration -> Password change\n"
server = raw_input("Please enter a new SMTP password: ")
config.set_smtp_pass(server)
exec_menu("submenu_email_smtp_pass", "submenu_email_smtp_pass")
# Menu definition
menu_actions = {
"menu_main": {
"menu_main": menu_main,
"1": menu_cron,
"2": menu_alerts,
"3": menu_email,
"0": app_exit,
},
"menu_cron": {
"menu_cron": menu_cron,
"1": menu_cron_add,
"2": menu_cron_remove,
"0": menu_main,
},
"menu_alerts": {
"menu_alerts": menu_alerts,
"1": submenu_alerts_processes,
"2": submenu_alerts_thresholds,
"0": menu_main,
},
"submenu_alerts_processes": {
"submenu_alerts_processes": submenu_alerts_processes,
"1": submenu_alerts_processes_add,
"2": submenu_alerts_processes_remove,
"9": menu_alerts,
"0": menu_main,
},
"submenu_alerts_thresholds": {
"submenu_alerts_thresholds": submenu_alerts_thresholds,
"1": submenu_alerts_thresholds_cpu,
"2": submenu_alerts_thresholds_memory,
"3": submenu_alerts_thresholds_swap,
"4": submenu_alerts_thresholds_temp_core,
"9": menu_alerts,
"0": menu_main,
},
"menu_email": {
"menu_email": menu_email,
"1": submenu_email_recipient,
"2": submenu_email_smtp,
"0": menu_main,
},
"submenu_email_recipient": {
"submenu_email_recipient": submenu_email_recipient,
"1": submenu_email_recipient_change,
"9": menu_email,
"0": menu_main,
},
"submenu_email_smtp": {
"submenu_email_smtp": submenu_email_smtp,
"1": submenu_email_smtp_change,
"2": submenu_email_smtp_user,
"3": submenu_email_smtp_pass,
"9": menu_email,
"0": menu_main,
}
}
| StarcoderdataPython |
1775257 | #Parte 1:
from argparse import BooleanOptionalAction
from asyncio import current_task
from msilib.schema import Directory
from tkinter.messagebox import ABORTRETRYIGNORE
from tkinter.tix import REAL
from pkg_resources import NullProvider
tipo CUENTA estructura
saldo: REAL
descubierto: REAL
invariante
#El descubierto no está autorizado
descubierto = 0
#El saldo debe ser superior al descubierto autorizado
saldo >= descubierto
fin CUENTA
Algoritmo 1: Definición de abrir CUENTA
abrir( c:CUENTA, saldo_inicial: REAL)
precondición:
saldo_inicial > 0
realización:
c.descubierto = 0
c.saldo = saldo_inicial
postcondicion
c.descubierto = 0
#no está autorizado
antiguo (saldo_inicial) = saldo_inicial
c.saldo = saldo_inicial
fin abrir
#Parte2
Algoritmo 2: Abonar una cuenta
abonar(c:CUENTA, credito: REAL))
precondicion
c.saldo ≠ NULO
crédito ≠ NULO
realizacion
c.saldo = c.saldo + credito
postcondicion
#El descubierto autorizado y el importe del crédito no se modifican
antiguo(c).descubierto = descubierto
antiguo(c).credito = credito
#El saldo aumenta con el credito
c.saldo = antiguo(c).saldo + credito
fin abonar
#Parte 3
Algoritmo 3: Cargar una cuenta
cargar (c: CUENTA, debito: REAL)
precondicion
c.saldo ≠ NULO
debito ≠ NULO
c.saldo + c.descubierto >= debito >= 0
realización
abonar (c, -debito)
postcondicion
#El descubierto autorizado y el importe del debito no se modifican
antiguo(c).descubierto = descubierto
antiguo (debito) = debito
#Al saldo se le resta el debito
c.saldo = antiguo(c).saldo - debito
fin cargar
#Parte 4
Algoritmo 4: Consultar una cuenta
consultar(c:CUENTA): REAL
precondición
c.saldo ≠ NULO
realizacion
resultado = c.saldo
postcondicion
resultado = c.saldo
fin consultar
#Parte 5
Algoritmo 5: Dfinición es_acredora y es_deudora
es_acredora (c:CUENENTA): BOOLEANO
precondicion
c.saldo ≠ NULO
realizacion
resultado = (c.saldo >= 0)
postcondicion
resultado = (c.saldo >= 0)
fin es_acredora
es_deudora (c:CUENENTA): BOOLEANO
precondicion
c.saldo ≠ NULO
realizacion
resultado = (-c.saldo <= c.saldo <=0)
postcondicion
resultado = (-c.saldo <= c.saldo <=0)
fin es_deudora
#2 DESCUBIERTO AUTORIZADO
#Parte 1
tipo CUENTA estructura
saldo: REAL
descubierto: REAL
invariante
#El descubierto está autorizado
descubierto >= 0
#El saldo debe ser superior al descubierto autorizado
saldo >= descubierto
fin CUENTA
Algoritmo 1: Definición de abrir CUENTA
abrir( c:CUENTA, saldo_inicial: REAL)
precondición:
saldo_inicial > 0
descubierto_MAX >= 0
realización:
c.descubierto = descubierto_MAX
c.saldo = saldo_inicial
postcondicion
c.descubierto = descubierto_MAX
c.saldo = saldo_inicial
fin abrir
#3 CUENTA Y TIEMPO
#Parte 1
tipo CUENTA estructura
saldo: REAL
descubierto: REAL
fecha_descubierto: FECHA #Fecha de inicio del último descubierto
duracion_max : FECHA #Duración máxima del descubierto
invariante
#El descubierto está autorizado durante un tiempo limitado
descubierto >= 0
fecha_descubierto ≠ 0 => fecha_descubierto + duracion_max <= fecha_actual
#El saldo debe ser superior al descubierto autorizado
saldo >= descubierto
fin CUENTA
#Partedo
Algoritmo 2:Definición de abrir cuenta
abrir
Entrada
c: CUENTA
saldo_inicial: REAL
descubierto_MAX: REAL
duracion_MAX: FECHA
Precondicion:
saldo_inicial> 0
descubierto_MAX >=0
duracion_MAX >= 0
Realizacion
c.descubierto = c.desubierto_MAX
c.saldo = saldo_inicial
d.fecha_descubierto = 0
c.duracion_max = duracion_max
postcondicion:
c.descubierto = c.desubierto_MAX
c.saldo = saldo_inicial
d.fecha_descubierto = 0
c.duracion_max = duracion_max
fin abrir | StarcoderdataPython |
1647694 | <filename>prepare/input/prepare_pre_select.py
from __future__ import print_function
from itertools import permutations
import numpy as np
import os
import glob
import cv2
import re
import sys
import matplotlib.pyplot as plt
ROUNDS = 100
DIFF = 5
DIFF_RANGE = 0
MODE = 'all'
MIN_DIR = int(sys.argv[1])
MAX_DIR = int(sys.argv[2])
SIZE = 4
SAME = 4
#array = np.arange(min_nr, max_nr+1)
order = np.arange(0, SIZE)
orders = np.array(list(permutations(order)))
labels_file = open("odo_image_labels.txt", 'w')
keys_file = open("odo_image_keys.txt", 'w')
select_name = "select.txt"
select_file = open(select_name)
select = {}
for s in select_file.readlines():
s = s.split()
idx = int(s[0])
nr = int(s[1])
if idx not in select: select[idx] = []
select[idx].append(nr)
#base_name = "/srv/glusterfs/patilv/Datasets/kitti/raw/extracted"
##local_file_dir = "2011_09_26_drive_0005_sync/image_02/data/"
#local_file_dir = "image_02/data/"
base_name = "/srv/glusterfs/patilv/Datasets/kitti/visual_odometry/dataset/sequences"
local_file_dir = "image_0/"
dirs = glob.glob(os.path.join(base_name, "*"))
dir_size = []
total_size = 0
for d in dirs[:]:
base_dir = os.path.basename(d)
if not (MIN_DIR <= int(base_dir) <= MAX_DIR):
dirs.remove(d)
for d in dirs:
int_dir = int(os.path.basename(d))
if int_dir not in select: select[int_dir] = []
local_dir = os.path.join(base_name, d, local_file_dir)
#len(os.listdir(local_dir))
dir_size.append(len(select[int_dir]))
total_size += dir_size[-1]
size_to_idx = np.zeros(total_size, dtype=np.int32)
cur_idx = 0
for i in range(len(dirs)):
#for j in range(dir_size[i]):
size_to_idx[cur_idx:cur_idx+dir_size[i]] = i
cur_idx += dir_size[i]
for r in range(ROUNDS):
# prepare
dir_idx = size_to_idx[np.random.randint(total_size)]
local_dir = os.path.join(base_name, dirs[dir_idx], local_file_dir)
min_nr = 0
max_nr = dir_size[dir_idx]
#print(int(os.path.basename(dirs[dir_idx])), len(select[int(os.path.basename(dirs[dir_idx]))]), max_nr)
idx = select[int(os.path.basename(dirs[dir_idx]))][np.random.randint(min_nr, max_nr)]
files = np.zeros(SIZE, dtype=np.int32) #np.arange(idx - DIFF, idx + DIFF + 1, DIFF)
files[0] = idx
for i in range(1, SIZE):
files[i] = files[i-1] + DIFF + np.random.randint(-DIFF_RANGE, DIFF_RANGE + 1)
# create pair
indices = np.random.choice(np.arange(0, orders.shape[0]), SAME, False)
for index in indices:
order = orders[index]
out = files[order]
print(r, local_dir, out, min_nr, max_nr)
for idx in out:
keys_file.write("{}{:06}.png".format(local_dir, idx))
keys_file.write(" ")
keys_file.write("\n")
labels_file.write("{}\n".format(index))
print(total_size) | StarcoderdataPython |
3371895 | <filename>datedfolders.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 22 08:34:18 2020
@author: anthonysafarik
"""
import subprocess
import os
import shutil
def check_output(cmd):
try:
output = subprocess.check_output(cmd)
return output
except:
return ''
def get_exif_date_all(inpath):
'''
alternate, slower way that checks each tag on each acceptable extension
'''
(basename, ext) = os.path.splitext(inpath)
#exts = ['.JPG','.ARW','.MP4','.MOV','.AVI']
tags = ['-QuickTime:CreateDate','-EXIF:CreateDate','-RIFF:DateTimeOriginal','-XMP:CreateDate','File:FileModifyDate']
#if ext.upper() in exts:
for tag in tags:
output = ''
output = check_output(['exiftool',tag,'-d', '%Y-%m-%d', '-s', '-s', '-s',inpath])
if len(output)==11:
output = str(output)[2:12]
break
if output: print (output,inpath,'T')
def get_exif_date_by_ext(inpath):
(basename, ext) = os.path.splitext(inpath)
photo_exts = ['.JPG','.ARW']
qt_exts = ['.MP4','.MOV']
video_exts = ['.AVI']
#tags = ['-QuickTime:CreateDate','-EXIF:CreateDate','-RIFF:DateTimeOriginal','-XMP:CreateDate','File:FileModifyDate']
output = ''
if ext.upper() in photo_exts:
output = check_output(['exiftool','-EXIF:CreateDate','-d', '%Y-%m-%d', '-s', '-s', '-s',inpath])
tag_to_id ='-EXIF:CreateDate'
elif ext.upper() in qt_exts:
output = check_output(['exiftool','-QuickTime:CreateDate','-d', '%Y-%m-%d', '-s', '-s', '-s',inpath])
tag_to_id ='-QuickTime:CreateDate'
elif ext == '.avi':
output = check_output(['exiftool','-XMP:CreateDate','-d', '%Y-%m-%d', '-s', '-s', '-s',inpath])
tag_to_id ='-XMP:CreateDate'
elif ext == '.AVI':
output = check_output(['exiftool','-RIFF:DateTimeOriginal','-d', '%Y-%m-%d', '-s', '-s', '-s',inpath])
tag_to_id ='-RIFF:DateTimeOriginal'
if not len(output)==11:
output = check_output(['exiftool','-File:FileModifyDate','-d', '%Y-%m-%d', '-s', '-s', '-s',inpath])
tag_to_id ='-File:FileModifyDate'
if len(output)==11:
print (tag_to_id)
return str(output)[2:12]
else:
return ''
#(basename, ext) = os.path.splitext(fname)
def make_dated_folders(inpath,outpath):
for path, dirs, files in os.walk(inpath):
for fname in files:
fpath = path+os.path.sep+fname
date = get_exif_date_by_ext(fpath)
if date:
date_folder = outpath+os.path.sep+date[0:4]+os.path.sep+date
dst_fpath = date_folder+os.path.sep+fname
if not os.path.exists(date_folder):
os.makedirs(date_folder)
if os.path.exists(dst_fpath):
print('path exists... ',dst_fpath)
else:
try:
shutil.move(fpath, dst_fpath)
print (dst_fpath)
except:
print ('could not move...',fname)
else:
print('could not extract date...',fname)
inpath = input('enter inpath... ')
outpath = input('enter outpath... ')
make_dated_folders(inpath,outpath)
| StarcoderdataPython |
148199 | <filename>dags/s3topostgres_dag.py<gh_stars>0
"""Airflow DAG S3 to postgres
@author:Shaurya
@date: 2021-01-03
"""
import os,sys,inspect
current_dir=os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir=os.path.dirname(current_dir)
sys.path.insert(0,parent_dir)
from pprint import pprint
from airflow import DAG
from airflow.operators.python import PythonOperator
from airflow.operators.dummy import DummyOperator
from airflow.operators.postgres_operator import PostgresOperator
from airflow.utils.dates import days_ago
from datetime import datetime
from Tasks.s3_postgres import S3toPostgres
default_args={
'owner':'shaurya',
'provide_context':True,
'depends_on_past':False,
'wait_for_downstream':False,
'email_on_failure':False,
'email_on_retry':False,
'retries':0,
'params':{'template_dir':'/home/abyssnlp/projects/airflow-test-suite/config/templates/',
'template_file':'connections.yaml',
'config_file':'/home/abyssnlp/projects/airflow-test-suite/config/config.ini',
'log_level':'info',
'date':datetime(2021,1,10),
'bucket_name':'airflow-s3-bucket-test',
'table_name':'marketing_data'}
}
# s3 to postgres dag
lake_to_db=DAG(
dag_id='lake_to_db',
default_args=default_args,
schedule_interval=None, # manual or conditional trigger
start_date=datetime(2021,1,10),
tags=['s3','postgres','marketing']
)
# start
begin=DummyOperator(
task_id='start_dag',
dag=lake_to_db
)
# create table to ingest the data into
make_table=PostgresOperator(
task_id='create_marketing_data',
postgres_conn_id='MarketingDataPostgres',
sql='sql/marketing_attribution_table.sql'
)
# Actual python callable
etl_data=PythonOperator(
task_id='s3_to_postgres',
python_callable=S3toPostgres.airflow_runner,
dag=lake_to_db
)
# end
end=DummyOperator(
task_id='end_dag',
dag=lake_to_db
)
begin >> make_table >> etl_data >> end | StarcoderdataPython |
3214076 | <filename>acmicpc/2457/2457.py<gh_stars>1-10
n = int(input()) # 꽃의 개수
def quick_sort_in_list(unsorted:list, start, end)->list:
if end - start <= 0:
return
pivot = unsorted[end]
i = start
for j in range(start, end):
if unsorted[j] <= pivot:
unsorted[i], unsorted[j] = unsorted[j], unsorted[i]
i += 1
unsorted[i], unsorted[end] = unsorted[end], unsorted[i]
quick_sort_in_list(unsorted, start, i - 1)
quick_sort_in_list(unsorted, i + 1, end)
def quick_sort(a):
quick_sort_in_list(a, 0, len(a) -1)
for i in range(n):
input_date = list(map(int, input().split()))
quick_sort(input_date)
print(input_date)
def tuple():
for i in range(n):
blooming_date = () # (month, day)
falling_date = ()
# input_date = tuple(map(int, input().split())) # (1, 1, 5, 31)
for j in range(len(date)//2):
blooming_date += (input_date[j],)
for j in range(2, 4):
falling_date += (input_date[j],)
# 지는 날 기준으로 오름차순 정렬 (quick sort) : 피어있는 기간이 제일 긴 것을 먼저 정렬하면 중복이 최소화되지 않을까?
| StarcoderdataPython |
67010 | <gh_stars>1000+
import h2o
h2o.init()
weather_hex = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/junit/weather.csv")
# Get a summary of the data
weather_hex.describe()
| StarcoderdataPython |
1753440 | <filename>tess/utils.py
from datetime import datetime
from tess.data.vulnerability import Vulnerability
class Utils:
@staticmethod
def get_available_feature_schema(data, force_base_entries=True):
cwe_entries = []
capec_entries = []
keywords_entries = []
for el in data:
cwe_entries.extend([item.lower() for item in el.details.cwe])
capec_entries.extend([item[0].lower() for item in el.details.capec])
keywords_entries.extend([item.lower() for item in el.details.keywords])
if force_base_entries:
addon = ['__cvss_expl', '__ref_number', '__days_diff']
else:
addon = []
return list(set(keywords_entries)) + list(set(capec_entries)) + list(set(cwe_entries)) + addon
@staticmethod
def get_element_feature(schema, vulnerability, time):
features = [0] * len(schema)
for feature in (vulnerability.keywords + [item[0] for item in vulnerability.capec]):
try:
index = schema.index(feature.lower())
features[index] = 1
except ValueError:
pass
if '__days_diff' in schema:
features[schema.index('__days_diff')] = (time - vulnerability.published_date.replace(tzinfo=None)).days
elif '__ref_number' in schema:
features[schema.index('__ref_number')] = vulnerability.references_number
elif '__cvss_expl' in schema:
features[schema.index('__cvss_expl')] = vulnerability.e_score
return features
@staticmethod
def get_target_function_value(data, vuln_event):
valid_events = []
if vuln_event.details.target is not None:
return vuln_event.details.target
for item in data:
if vuln_event.id != item.id:
continue
diff = (vuln_event.date - item.date).days
if 0 <= diff <= 31 and item != vuln_event:
valid_events.append(item)
pos = len([item.outcome for item in valid_events if item.outcome == True])
if len(valid_events) == 0:
return vuln_event.details.e_score
return vuln_event.details.e_score * (pos / (len(valid_events)))
@staticmethod
def get_filtered_schema(schema, filter):
ret = []
for i in range(len(schema)):
if filter[i]:
ret.append(schema[i])
return ret
@staticmethod
def get_vulnerability(cve_id, cve_search, key_parser, skip_capec=False, skip_keywords=False, skip_cwe=False, target = None):
info = cve_search.find_cve_by_id(cve_id)
if skip_keywords:
keywords = []
else:
keywords = key_parser.parse(info['cve']['description']['description_data'][0]['value'])
if skip_capec:
capec = []
else:
try:
capec = [(item['id'], item['name']) for item in info['capec']]
except KeyError:
capec = []
if skip_cwe:
cwe = []
else:
cwe = []
problems = info['cve']['problemtype']['problemtype_data']
for problem in problems:
details = problem['description']
for el in details:
if el['value'].startswith('CWE'):
cwe.append(el['value'])
try:
exploitability_score = info['impact']['baseMetricV3']['exploitabilityScore']
cvss_vector = info['impact']['baseMetricV3']['cvssV3']['vectorString']
except KeyError:
return None
vuln_details = Vulnerability(keywords, capec, cwe, exploitability_score, cvss_vector,
len(info['cve']['references']['reference_data']),
datetime.strptime(info['publishedDate'], '%Y-%m-%dT%H:%MZ'), info['history'], target=target)
return vuln_details
"""
@staticmethod
def batch(iterable, size):
it = iter(iterable)
while item := list(itertools.islice(it, size)):
yield item
"""
| StarcoderdataPython |
44889 | <gh_stars>1-10
#!/usr/bin/env python
import s3p_openstack_tools as s3p
from datetime import datetime
import argparse
import sys
import os
import pdb
from time import sleep
debug_mode=False
verbosity_level=0
# cloud test control: check main() for definition of cloud_info using these
validate_existing = True
attach_to_router=True
pin_instances = True
network_list=[]
subnet_list=[]
hypervisor_list=[]
hypervisor_set=set()
server_set = set()
def isodate():
"""prints the date in a pseudo-ISO format (Y-M-D H:M:S)"""
d = datetime.now()
return d.isoformat()
def debug_print(stringIn='', dbg_level=0):
"""docstring for dbgPrint"""
global verbosity_level
if debug_mode & (dbg_level <= verbosity_level):
print("[{2}] DEBUG({1}): {0}".format( stringIn, dbg_level, isodate()))
def logprint(message):
"""prints a message in 'log format' like '[date] message' """
print("[{0}] {1}".format(isodate(), message))
def get_auth_args_openstackrc():
""" This function should source an openstackrc file for auth_args """
pass
def get_auth_args():
service_host_ip = os.getenv('SERVICE_HOST')
auth_args = {
'auth_url': 'http://' + service_host_ip + ':5000/v2.0',
'project_name': 'demo',
'username': 'admin',
'password': '<PASSWORD>',
}
return auth_args
def create_instance(conn, instance_name, hypervisor_name, network_id,
resource_ids, smoketest=True):
"""
Creates an S3P server (OpenStack tenant instance) on the specified
hypervisor, attached to a specific network
"""
global validate_existing
debug_print("validate_existing = {0}".format(validate_existing), 2)
global server_set
global cloud_info
""" check if instance is already created """
if not(instance_name in server_set):
# create instance
network_name = s3p.get_network_name(conn, network_id)
logmsg = "Creating server {0} on network {1}".format(
instance_name, network_name)
if not(cloud_info['pin_instances']):
# unset the hypervisor hostname if don't need to pin instances to hosts
hypervisor_name=''
else:
logmsg = logmsg + ", on host {0} with CLI library".format(hypervisor_name)
logprint(logmsg)
t1=datetime.now()
os_instance = s3p.create_server(conn,
instance_name,
resource_ids['image_id'],
resource_ids['flavor_id'],
network_id,
cloud_info['secgrp_name'],
hypervisor_name,
resource_ids['project_id']
)
t2=datetime.now()
debug_print( "os_instance = {0}".format(os_instance) , 3)
debug_print( "type(os_instance) = {0}".format(type(os_instance)) , 3)
if type(os_instance) == type(None):
logprint("ERROR: Server creation failed for:\nserver name: {0}".format(
instance_name))
sys.exit(1)
else:
logprint("Server Creation took {0} seconds".format((t2-t1).total_seconds()))
server_set.add(os_instance.name)
else:
os_instance = s3p.get_server_detail(conn, instance_name)
debug_print("Type of os_instance = {0}".format(type(os_instance)), 3)
debug_print("os_instance = {0}".format(os_instance), 3)
debug_print("WARNING: an OpenStack instance with name '{0}' ({1})".format(os_instance.name, instance_name) +
"already exists, skipping creation", 1)
smoketest = validate_existing
if smoketest:
os_network = conn.network.find_network(network_id)
debug_print("os_instance = {0}".format(os_instance), 3)
os_instance = s3p.get_server_detail(conn, instance_name)
smoke_test_server(conn, os_instance, os_network)
def smoke_test_server(conn, os_instance, os_network):
"""
Smoke Test == ping new OpenStack tenant instance until it responds
a.k.a. wait_for_tenant
This smoke test enters the DHCP network namespace (netns) on the service
host that corresponds to the server's network
"""
debug_print("os_instance = {0}".format(os_instance), 3)
logprint("Waiting for instance {0} to respond to ping on network {1}...".format(
os_instance.name, os_network.name))
debug_print("os_instance = {0}".format(os_instance), 3)
ip_addr = os_instance.addresses[os_network.name][0]['addr']
""" timing: get instance IP address and network_id """
NETNS = 'qdhcp-' + os_network.id
logprint("Server '{0}' obtained IPV4 address: {1}".format(os_instance.name, ip_addr))
""" enter netns and ping instance IP """
command = "sudo ip netns exec qdhcp-" + os_network.id + " ping -c 1 " + ip_addr
debug_print("Smoke test: {0}".format(command), 2)
""" TODO: This smoke test is very coarse - could be much better"""
t1 =datetime.now()
response = os.system(command)
while response != 0:
""" timing: enter netns & ping server until it responds """
response = os.system(command)
sleep(0.1)
t2=datetime.now()
""" print/accumulate timing info for server boot & smoke test """
logprint("SmokeTest: {0} seconds for tenant '{1}' to respond to ping".format(
(t2-t1).total_seconds(), os_instance.name))
def delete_instance(conn, instance_name):
""" Deletes an S3P server (OpenStack tenant instance) """
logprint("Deleting instance \"{0}\"".format(instance_name))
s3p.delete_server(conn, instance_name)
""" network management functions """
def determine_net_index(comp_id, num_networks, host_id, numberingType='one_net'):
"""Function determines which network will be used
modulo_num_networks == modulous of number of networks (evenly distributed)
one_per_physhost == one network per physical host,
all instances on that host are only on that network
i.e. comp-11-13 and comp-11-12 share a network
a.k.a. "Vertical networks"
one_per_wave == one network common to each "wave" of compute hosts
i.e. comp-11-13 & comp-39-13 share a network
a.k.a. "Horizontal networks"
"""
if numberingType == 'modulo_num_networks':
networkIdx = int(comp_id) % num_networks
elif numberingType == 'one_per_wave':
networkIdx = int(comp_id)
elif numberingType == 'one_per_physhost':
networkIdx = int(host_id)
else:
""" one network to rule them all """
networkIdx = 0
return networkIdx
def create_security_group_and_rules(conn, secgrp_name, project_id):
"""creates s3p security group and adds rules for SSH and ICMP """
os_security_group = conn.network.find_security_group(secgrp_name)
if os_security_group == None:
logprint("Creating security group {0}".format(secgrp_name))
os_security_group = s3p.create_security_group(conn, secgrp_name, project_id)
s3p.add_security_group_rules_ssh(conn, os_security_group.id)
s3p.add_security_group_rules_icmp(conn, os_security_group.id)
else:
logprint("Using existing security group '{0}'".format(secgrp_name))
return os_security_group
def create_network_and_subnet(conn, network_name, network_ix):
"""creates an openstack network and subnet"""
global network_set
global attach_to_router
router_name = 'router1'
network_set = s3p.get_network_set(conn)
if network_name in network_set:
debug_print("Using existing OpenStack network with name '{0}'".format(
network_name), 1)
os_network = conn.network.find_network(network_name)
else:
logprint("Creating OpenStack network with name: {0}".format(network_name))
t1 = datetime.now()
os_network = s3p.create_network_raw(conn, network_name)
t2 = datetime.now()
network_set.add(os_network.name)
network_list.append(os_network.name)
if os_network != None:
logprint("Network Creation: {0} seconds".format((t2-t1).total_seconds()))
subnet_name = network_name+'-sub'
parent_network_id = os_network.id
cidr = '10.0.'+str(network_ix)+'.0/24'
gateway_ip = '10.0.'+str(network_ix)+'.1'
logprint("Creating OpenStack subnet with name: {0}".format(network_name+"-sub"))
t1 = datetime.now()
os_subnet = s3p.create_subnet_raw(
conn,
subnet_name,
parent_network_id,
cidr,
gateway_ip)
if attach_to_router:
os_router = s3p.get_os_router(conn, router_name)
s3p.router_add_subnet(conn, os_router, os_subnet.id)
t2 = datetime.now()
logprint("Subnet Creation: {0} seconds".format((t2-t1).total_seconds()))
else:
logprint("ERROR: Failed to create openstack network '{0}'".format(
network_name))
sys.exit(1)
return os_network.id
def delete_network_and_subnet(conn, os_network):
"""
Deletes a network and its associated subnets
Each network should have a list of subnets associated with it through
OpenStack or through a global variable herein
"""
name = os_network.name
logprint("Deleting network \"{0}\"".format(name))
s3p.delete_network(conn, os_network)
logprint("Network \"{0}\" Successfully deleted".format(name))
# cleanup
def cleanup(conn, cloud_info):
"""removes all allocated OpenStack resources incl. servers, networks, subnets"""
global server_set
global network_list
global network_set
logprint("Removing instances and networks from OpenStack Cloud...")
# delete servers
server_prefix = cloud_info['server_prefix']
for server in conn.compute.servers():
if server_prefix in server.name:
delete_instance(conn, server.id)
server_set = s3p.get_server_set(conn)
# delete networks
net_prefix = cloud_info['network_prefix']
for network in conn.network.networks():
if net_prefix in network.name:
name = network.name
delete_network_and_subnet(conn, network)
network_list = s3p.list_networks_by_name(conn)
network_set = s3p.get_network_set(conn, cloud_info['network_prefix'])
# COMPLETED:
def unit_tests(conn):
""" tests functions defined here or in s3p_openstack_tools """
"""
The following functions are working:
"""
s3p.print_images_list(conn)
s3p.print_server_list(conn)
node_id="21-11"
compute_host="compute-"+node_id
server_name="tenant-"+node_id+"-1"
logprint("{0}, {1}".format(compute_host, server_name))
s3p.create_server(conn, server_name, compute_host)
# List network resources
s3p.print_network_list(conn)
print("")
s3p.print_subnet_list(conn)
print("")
s3p.print_security_group_list(conn)
print("")
s3p.print_network_agent_list(conn)
print("")
s3p.list_net_availability_zones(conn)
print("")
s3p.list_comp_availability_zones(conn)
print("")
# create a network:
s3p.create_network(conn, network_name)
# delete a network & it's subnets:
s3p.delete_network(conn, network_name)
def get_resource_ids(conn, names):
"""gets resource identifiers (OpenStack resource IDs) for default resources"""
defaults = {}
# get project id
os_project = conn.identity.find_project(names['project_name'])
defaults['project_id'] = os_project.id
# get security group id
os_secgrp = conn.network.find_security_group(names['secgrp_name'])
defaults['secgrp_id'] = os_secgrp.id
# get image id
os_image = conn.compute.find_image(names['image_name'])
defaults['image_id'] = os_image.id
# get flavor id
os_flavor = conn.compute.find_flavor(names['flavor_name'])
defaults['flavor_id'] = os_flavor.id
debug_print("S3P Resource IDs:", 2)
debug_print("{0}: {1}".format(names['project_name'], defaults['project_id']), 2)
debug_print("{0}: {1}".format(names['secgrp_name'], defaults['secgrp_id']), 2)
debug_print("{0}: {1}".format(names['image_name'], defaults['image_id']), 2)
debug_print("{0}: {1}".format(names['flavor_name'], defaults['flavor_id']), 2)
return defaults
def parse_ids_from_hypervisor_name(hypervisor_name):
"""
Returns component identifiers from a provided hyperfisor name
example: for hypervisor 'compute-5-11', this function will return two
strings: host_id=5 and comp_id=11
"""
host_id = hypervisor_name.split('-')[1]
comp_id = hypervisor_name.split('-')[2]
return host_id, comp_id
"""
" main testing loop
"""
def main():
global network_list
global network_set
global subnet_list
global hypervisor_list
global hypervisor_set
global server_set
global debug_mode
global cloud_info
global verbosity_level
global attach_to_router
verbosity_level=0
"""
parse input args with argparse
input args:
--cleanup - deletes all s3p-created instances and networks
--debug - enables debug_mode
NOTE on verbosity level:
0: prints only stats messages
1: prints WARNING: messages
2: prints control messages
3: prints details of resource creation
4: prints ???
TODO:
operation - arguments to describe how many networks, servers, etc are created
operation['num_networks']
operation['num_servers']
operation['servers_per_host']
"""
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--cleanup",
help="cleanup cluster by deleting all instances and networks",
action="store_true")
parser.add_argument("-d", "--debug",
help="enable debug mode which increases output",
action="store_true")
parser.add_argument("-v", "--verbosity", type=int, choices=[0,1,2,3,4],
help="verbosity level for prints in debug mode (must enable debug mode)")
args = parser.parse_args()
debug_mode = True
debug_mode = args.debug
debug_print("args.debug = {0}".format(debug_mode))
verbosity_level = args.verbosity
debug_print("args.verbosity = {0}".format(verbosity_level), 1)
cleanup_mode = args.cleanup
debug_print("args.cleanup = {0}".format(cleanup_mode), 1)
logprint("Obtaining OpenStack credentials")
conn = s3p.get_openstack_connection()
cloud_info = {
'project_name': 'demo',
'secgrp_name': 's3p_secgrp',
'image_name': 'cirros-0.3.4-x86_64-uec',
'flavor_name': 'cirros256',
'network_prefix': 's3p-net-',
'server_prefix': 'tenant-',
'attach_to_router': True,
'pin_instances': True,
'validate_existing': True
}
attach_to_router = cloud_info['attach_to_router']
validate_existing = cloud_info['validate_existing']
pin_instances = cloud_info['pin_instances']
os_project = conn.identity.find_project(cloud_info['project_name'])
# create security group if it doesn't yet exist
os_secgrp = create_security_group_and_rules(conn,
cloud_info['secgrp_name'], os_project.id)
s3p_resource_ids = get_resource_ids(conn, cloud_info)
if cleanup_mode:
# cleanup resources
# TODO: ask user if they REALLY want to delete all the servers and networks in the cluster
# server_set = s3p.get_server_set(conn)
# network_set = s3p.get_network_set(conn)
print("WARNING:: You are about to remove\n" +
"\tall server instances matching '{0}' \n".format(cloud_info['server_prefix']) +
"\tall networks matching '{0}'\n".format(cloud_info['network_prefix']))
answer = raw_input('Are you sure you want to proceed? ')
if (answer == 'y') | (answer == 'Y' ):
cleanup(conn, cloud_info)
else:
logprint("Cleanup operation aborted - no changes to cloud.")
else:
""" Assumptions:
quotas and s3p_secgrp are alreay created
"""
# get list of networks by name
network_list = s3p.list_networks_by_name(conn)
debug_print("Network list: {0}".format(network_list), 2)
network_set = s3p.get_network_set(conn, cloud_info['network_prefix'])
debug_print("Network set: {0}".format(network_set), 2)
# get list of hypervisors by name
hypervisor_list = s3p.list_hypervisors_by_name(conn)
hypervisor_set = s3p.get_hypervisor_set(conn, prefix='compute-')
debug_print("Hypervisor List: {0}".format(hypervisor_list), 2)
debug_print("Hypervisor Set: {0}".format(hypervisor_set ), 2)
# get list of servers by name
server_set = s3p.get_server_set(conn)
debug_print("Server Set: {0}".format(server_set), 2)
servers_per_host = 1
max_networks = 6
net_numbering_type = 'modulo_num_networks'
# loop through hypervisors, creating tenants on each
for hypervisor_name in hypervisor_list:
phys_host_id, comp_id = parse_ids_from_hypervisor_name(hypervisor_name)
network_ix = determine_net_index(
comp_id,
max_networks,
phys_host_id,
net_numbering_type)
network_name = 's3p-net-' + str(network_ix)
network_id = create_network_and_subnet(conn,
network_name,
network_ix)
hypervisor_ID = phys_host_id + "-" + comp_id
# only create one tenant per host for now
instance_name = 'tenant-' + hypervisor_ID + "-1"
t1 = datetime.now()
create_instance(conn,
instance_name,
hypervisor_name,
network_id,
s3p_resource_ids,
smoketest=True)
t2 = datetime.now()
debug_print("Server and network creation took {0} s".format((t2-t1).total_seconds()), 1)
# subnet_list = s3p.print_subnet_list(conn)
# hypervisor_list = s3p.print_hypervisor_list(conn)
logprint("Done")
if __name__ == '__main__':
main()
# vim: set et sw=4 ts=4 ai :
| StarcoderdataPython |
1788989 | <gh_stars>1-10
from numpy.core.numeric import Infinity
__author__ = '<NAME> <<EMAIL>>'
# TODO: this class will be removed in the future
class TimeRangeVO:
startDate = -Infinity
endDate = Infinity
def __init__(self, start_date, end_date):
self.startDate = start_date
self.endDate = end_date
def __repr__(self):
return "TimeRangeVO. start date is: " + str(self.startDate) + ', end date is : ' + str(self.endDate)
def __str__(self):
return "TimeRangeVO. start date is: " + str(self.startDate) + ', end date is : ' + str(self.endDate)
| StarcoderdataPython |
27425 | # Import the Evernote client
from evernote.api.client import EvernoteClient
# Define access token either:
# Developer Tokens (https://dev.evernote.com/doc/articles/dev_tokens.php)
# or OAuth (https://dev.evernote.com/doc/articles/authentication.php)
access_token = "insert dev or oauth token here"
# Setup the client
client = EvernoteClient(token = access_token, sandbox = True)
# Get note store object
note_store = client.get_note_store()
# GUID of the note to attach the application data to
note_guid = "insert note GUID to attach key-value storage to here"
# Value of the key for the storage
# 3rd party apps are only allowed 1
key = "your-consumer-key"
# the value of the application data entry
# containg a string arbitray length
value = "this is the value of the application data"
# Each note is given this 4kb map of arbitrary data, shared by all third-party applications.
# adding new data may cause the field's value to exceed the the 4kb limit.
# In this case, an instance of EDAMUserException is thrown with the BAD_DATA_FORMAT error code.
# Setting this value will overwrite any existing data
usn = note_store.setNoteApplicationDataEntry(note_guid, key, value)
print "Application data set for note with GUID, '%s' with the key '%s' and value '%s' (USN %s)" % (note_guid, key, value, usn)
| StarcoderdataPython |
3391834 | from .conn import LogicalConnection
| StarcoderdataPython |
1601003 | <reponame>saschajullmann/sedotra
from app.crud.base import CRUDBase
from app.models.team import Team
from app.schemas.team import TeamCreate, TeamUpdate
class CRUDTeam(CRUDBase[Team, TeamCreate, TeamUpdate]):
pass
team = CRUDTeam(Team)
| StarcoderdataPython |
191836 | import os
from blockstack.client import BlockstackClient
token = os.environ.get('BK_TOKEN')
client = BlockstackClient(base_uri=('%s/api' % os.environ['BK_INSTANCE']), token=token)
alice = client.wallets.get('Blue')
bob = client.wallets.get('Red')
alice_oracle = client.oracles.get('Blue')
bob_oracle = client.oracles.get('Red')
alice_txs = alice.transactions
bob_txs = bob.transactions
send = alice_txs.create(asset='TRY', address=bob.assetAddress, amount=1)
print(send)
| StarcoderdataPython |
120759 | # -*- coding: utf-8 -*-
#
# File: cooper.py
# Author: <NAME> <<EMAIL>>
# Date: Fri Jan 20 16:12:23 2012
#
#
# Copyright (c) 2012, 2015 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
Tests for cooper.
"""
import cooper
from itertools import repeat
import unittest
def make_test_hierarchy(trace, decorator=lambda x:x, metacls=type):
@decorator
class _A(object):
__metaclass__ = metacls
@cooper.cooperate
def __init__(self):
trace.append(_A.__init__)
@cooper.cooperative
def method(self, mparam):
self._a_mparam = mparam
trace.append(_A.method)
@cooper.cooperative
def post_method(self, pmparam):
self._a_pmparam = pmparam
trace.append(_A.post_method)
@decorator
class _B(_A):
__metaclass__ = metacls
@cooper.cooperate
def __init__(self, b_param = 'b_param'):
self._b_param = b_param
trace.append(_B.__init__)
@cooper.cooperate
def method(self, mparam, b_mparam='b_mparam'):
self._b_mparam = b_mparam
trace.append(_B.method)
@cooper.post_cooperate
def post_method(self, pmparam, b_pmparam='b_mparam'):
self._b_pmparam = b_pmparam
trace.append(_B.post_method)
@decorator
class _C(_A):
__metaclass__ = metacls
@cooper.cooperate
def __init__(self):
trace.append(_C.__init__)
@cooper.cooperate
def method(self, mparam):
self._c_mparam = mparam
trace.append(_C.method)
@cooper.post_cooperate
def post_method(self, pmparam):
self._c_pmparam = pmparam
trace.append(_C.post_method)
@decorator
class _D(_B, _C):
__metaclass__ = metacls
@cooper.cooperate
def __init__(self, d_param = 'd_param'):
self._d_param = d_param
trace.append(_D.__init__)
@cooper.cooperate
def method(self, mparam, d_mparam='d_mparam'):
self._d_mparam = d_mparam
trace.append(_D.method)
@cooper.post_cooperate
def post_method(self, pmparam, d_pmparam='d_mparam'):
self._d_pmparam = d_pmparam
trace.append(_D.post_method)
@decorator
class _F(_D, _A):
__metaclass__ = metacls
return _A, _B, _C, _D, _F
class TestCoop(unittest.TestCase):
cls_decorator = cooper.cooperative_class
cls_meta = type
def setUp(self):
self._trace = []
self._A, self._B, self._C, self._D, self._F = make_test_hierarchy(
self._trace,
decorator = self.cls_decorator.im_func,
metacls = self.cls_meta)
def test_init_parameter_passing(self):
obj = self._D()
self.assertEqual(obj._b_param, 'b_param')
self.assertEqual(obj._d_param, 'd_param')
obj = self._D(b_param = 'new_b_param')
self.assertEqual(obj._b_param, 'new_b_param')
self.assertEqual(obj._d_param, 'd_param')
obj = self._D(d_param = 'new_d_param')
self.assertEqual(obj._b_param, 'b_param')
self.assertEqual(obj._d_param, 'new_d_param')
obj = self._D(d_param = 'new_d_param',
b_param = 'new_b_param')
self.assertEqual(obj._b_param, 'new_b_param')
self.assertEqual(obj._d_param, 'new_d_param')
def test_init_check_no_positional(self):
def make_cls():
@self.cls_decorator.im_func
class _Bad(object):
__metaclass__ = self.cls_meta
@cooper.cooperate
def __init__(self, positional):
pass
self.assertRaises (cooper.CooperativeError, make_cls)
def test_init_check_no_variadic(self):
def make_cls():
@self.cls_decorator.im_func
class _Bad(object):
__metaclass__ = self.cls_meta
@cooper.cooperate
def __init__(self, *a):
pass
self.assertRaises (cooper.CooperativeError, make_cls)
def test_init_check_no_variadic_keywords(self):
def make_cls():
@self.cls_decorator.im_func
class _Bad(object):
__metaclass__ = self.cls_meta
@cooper.cooperate
def __init__(self, **k):
pass
self.assertRaises (cooper.CooperativeError, make_cls)
def test_init_must_cooperate(self):
def make_cls():
@self.cls_decorator.im_func
class _Bad(object):
__metaclass__ = self.cls_meta
def __init__(self):
pass
self.assertRaises (cooper.CooperativeError, make_cls)
def test_init_must_override(self):
def make_cls():
@self.cls_decorator.im_func
class _Bad(object):
__metaclass__ = self.cls_meta
@cooper.cooperative
def __init__(self):
pass
self.assertRaises (cooper.CooperativeError, make_cls)
def test_super_params_sends_params(self):
@self.cls_decorator.im_func
class _Fixed(self._F):
__metaclass__ = self.cls_meta
@cooper.cooperate_with_params(b_param='fixed_b_param')
def __init__(self):
pass
obj = _Fixed()
self.assertEqual(obj._b_param, 'fixed_b_param')
def test_manual_init(self):
outer_self = self
@self.cls_decorator.im_func
class _Manual(self._D):
__metaclass__ = self.cls_meta
@cooper.manual_cooperate
def __init__(self, *a, **k):
super(_Manual, self).__init__(*a, **k)
outer_self._trace.append(_Manual.__init__)
self._clear_trace()
_Manual()
self._check_trace_calls_with_mro(_Manual.__init__)
def test_can_mix_non_cooperative_superclass_single_inherit(self):
class NonCooperativeSuperClass(object):
pass
@self.cls_decorator.im_func
class _Good(NonCooperativeSuperClass):
__metaclass__ = self.cls_meta
self.assertTrue(isinstance(_Good(), _Good))
def test_can_not_mix_non_cooperative_superclass_multi_inherit(self):
class NonCooperativeSuperClass1(object):
pass
class NonCooperativeSuperClass2(object):
pass
def make_class():
@self.cls_decorator.im_func
class _Bad(NonCooperativeSuperClass1,
NonCooperativeSuperClass2):
__metaclass__ = self.cls_meta
self.assertRaises(cooper.CooperativeError, make_class)
def test_can_mix_non_cooperative_subclass(self):
class _Good(self._D):
pass
self._clear_trace()
_Good()
self._check_trace_calls_with_mro(self._D.__init__)
def test_abstract_method_forbids_instantiation(self):
@self.cls_decorator.im_func
class _ABC(self._D):
__metaclass__ = self.cls_meta
@cooper.abstract
def abstract_method(self):
return 0
self.assertRaises(TypeError, _ABC)
def test_override_abstract_method_enables_instantiation(self):
@self.cls_decorator.im_func
class _ABC(self._D):
__metaclass__ = self.cls_meta
@cooper.abstract
def abstract_method(self):
self._result = 1
@self.cls_decorator.im_func
class _Concrete(_ABC):
__metaclass__ = self.cls_meta
@cooper.cooperate
def abstract_method(self):
return self._result
self.assertEqual(_Concrete().abstract_method(), 1)
def test_compatible_abstract_method_forbids_instantiation(self):
import abc
@self.cls_decorator.im_func
class _ABC(self._D):
__metaclass__ = self.cls_meta
@abc.abstractmethod
def abstract_method(self):
return 0
self.assertRaises(TypeError, _ABC)
def test_compatible_override_abstract_method_enables_instantiation(self):
import abc
@self.cls_decorator.im_func
class _ABC(self._D):
__metaclass__ = self.cls_meta
@abc.abstractmethod
def abstract_method(self):
return 0
class _Concrete(_ABC):
def abstract_method(self):
return super(_Concrete, self).abstract_method()
self.assertEqual(_Concrete().abstract_method(), 0)
def test_conflict_raises_error(self):
@self.cls_decorator.im_func
class _A1(object):
__metaclass__ = self.cls_meta
@cooper.cooperative
def method(self):
pass
@self.cls_decorator.im_func
class _A2(object):
__metaclass__ = self.cls_meta
@cooper.cooperative
def method(self):
pass
def make_class():
@self.cls_decorator.im_func
class _A12(_A1, _A2):
__metaclass__ = self.cls_meta
@cooper.cooperate
def method(self):
pass
self.assertRaises(cooper.CooperativeError, make_class)
def test_mro_call_order(self):
for cls in (self._D, self._C, self._B, self._A):
obj = cls()
self._clear_trace()
obj.method(1)
self._check_trace_calls_with_mro(cls.method)
def test_post_mro_call_order(self):
for cls in (self._D, self._C, self._B, self._A):
obj = cls()
self._clear_trace()
obj.post_method(1)
self._check_trace_calls_with_mro(cls.post_method, reverse=True)
def test_mro_init_call_order(self):
for cls in (self._D, self._C, self._B, self._A):
self._clear_trace()
cls()
self._check_trace_calls_with_mro(cls.__init__)
def test_mro_does_not_decorate_undefined_init(self):
self._clear_trace()
self._F()
self._check_trace_calls_with_mro(self._D.__init__)
def test_inner_cooperate(self):
outer_self = self
@self.cls_decorator.im_func
class _Cls(self._D):
__metaclass__ = self.cls_meta
@cooper.inner_cooperate
def method(self, next_method, param):
next_method(b_mparam='new_b_mparam')
outer_self._trace.append(_Cls.method)
obj = _Cls()
self._clear_trace()
obj.method(1)
self._check_trace_calls_with_mro(_Cls.method)
self.assertEqual(obj._b_mparam, 'new_b_mparam')
def test_inner_error_call_too_much(self):
@self.cls_decorator.im_func
class _Cls(self._D):
__metaclass__ = self.cls_meta
@cooper.inner_cooperate
def method(self, next_method, param):
next_method()
next_method()
obj = _Cls()
self.assertRaises(cooper.CooperativeError, obj.method, 1)
def test_inner_error_not_call(self):
@self.cls_decorator.im_func
class _Cls(self._D):
__metaclass__ = self.cls_meta
@cooper.inner_cooperate
def method(self, next_method, param):
pass
obj = _Cls()
self.assertRaises(cooper.CooperativeError, obj.method, 1)
def _clear_trace(self):
self._trace[:] = []
def _check_trace_calls_with_mro(self, method, reverse=False):
cls = method.im_class
name = method.__name__
mro = cls.__mro__[:-1] # discard object
self.assertEqual(zip(mro if reverse else mro[::-1], repeat(name)),
[(m.im_class, m.__name__) for m in self._trace])
class TestCoopMeta(TestCoop):
cls_decorator = lambda x:x
cls_meta = cooper.CooperativeMeta
def test_meta_works_on_subclasses(self):
outer_self = self
class _NewClass(self._D):
@cooper.cooperate
def __init__(self):
outer_self._trace.append(_NewClass.__init__)
self._clear_trace()
_NewClass()
self._check_trace_calls_with_mro(_NewClass.__init__)
class _TestBase(object):
def __init__(self, param=None,*a, **k):
super(_TestBase, self).__init__(*a, **k)
assert param == 'param'
class _TestDeriv(_TestBase):
def __init__(self, *a, **k):
super(_TestDeriv, self).__init__(param='param',*a, **k)
class _CoopTestBase(cooper.Cooperative):
@cooper.cooperate
def __init__(self, param=None):
assert param == 'param'
class _CoopTestDeriv(_CoopTestBase):
@cooper.cooperate_with_params(param='param')
def __init__(self):
pass
class _SimpleTestBase(object):
def __init__(self, *a, **k):
super(_SimpleTestBase, self).__init__(*a, **k)
class _SimpleTestDeriv(_SimpleTestBase):
def __init__(self, *a, **k):
super(_SimpleTestDeriv, self).__init__(*a, **k)
class _CoopSimpleTestBase(cooper.Cooperative):
@cooper.cooperate
def __init__(self):
pass
class _CoopSimpleTestDeriv(_CoopSimpleTestBase):
@cooper.cooperate
def __init__(self):
pass
class _SuperSimpleTestBase(object): pass
class _SuperSimpleTestDeriv(_SuperSimpleTestBase): pass
class _SuperCoopSimpleTestBase(cooper.Cooperative): pass
class _SuperCoopSimpleTestDeriv(_SuperCoopSimpleTestBase): pass
class TestCoopPerformance(unittest.TestCase):
test_number = 1<<8
def test_performance_overhead_override(self):
import timeit
t1 = min(timeit.repeat(_SimpleTestDeriv, number=self.test_number))
t2 = min(timeit.repeat(_CoopSimpleTestDeriv, number=self.test_number))
print
print "Simple override -- "
print " Manual: ", t1
print " Coop: ", t2
print " Ratio: ", t2/t1
def test_performance_overhead_no_override(self):
import timeit
t1 = min(timeit.repeat(_SuperSimpleTestDeriv, number=self.test_number))
t2 = min(timeit.repeat(_SuperCoopSimpleTestDeriv, number=self.test_number))
print
print "No override -- "
print " Manual: ", t1
print " Coop: ", t2
print " Ratio: ", t2/t1
def test_performance_overhead_with_params(self):
import timeit
t1 = min(timeit.repeat(_TestDeriv, number=self.test_number))
t2 = min(timeit.repeat(_CoopTestDeriv, number=self.test_number))
print
print "Params -- "
print " Manual: ", t1
print " Coop: ", t2
print " Ratio: ", t2/t1
| StarcoderdataPython |
3265056 | <gh_stars>0
#Find Perfect Numbers
##TOOLS
def Divisors(num):
from math import sqrt as mmsq
s=set([1])
i=1
a=int(mmsq(num)+1)
while i<=a:
if(num//i==num):
i+=1
continue
if (num%i==0):
if (num//i!=i):
s.add(num//i)
s.add(i)
i+=1
return s
############################
##THE PROGRAM
def PerfectNumber(num):
return sum(Divisors(num))==num
def AbundantNumber(num):
return sum(Divisors(num))>num
def DeficientNumber(num):
return sum(Divisors(num))<num
def QuasiPerfectNumber(num):
return sum(Divisors(num))-num==1
def AlmostPerfectNumber(num):
return sum(Divisors(num))-num==-1
def SemiPerfectNumber(n):
factors = Divisors(n)
if(sum(factors)==n):
return True
factors = list(factors)[:-1]
num_factors = len(factors)
subset = [[0 for i in range(n + 1)]for j in range(num_factors + 1)]
for i in range(num_factors + 1):
subset[i][0]= True
for i in range(1, n + 1):
subset[0][i] = False
for i in range(1, num_factors + 1):
for j in range(1, n + 1):
if j < factors[i - 1]:
subset[i][j] = subset[i - 1][j]
else:
subset[i][j] = subset[i - 1][j] or subset[i - 1][j - factors[i - 1]]
if subset[num_factors][n] == 0:
return False
return True
def HemiPerfectNumber(num):
dv=sum(Divisors(num))+num
return (dv*2/num) % 1 * 100==0 and (dv*2/num)%2!=-0
def SuperPerfect(n,m=2,k=2):
dv=sum(Divisors(n))+n
for i in range(m-1):
dv=sum(Divisors(dv))+dv
return dv==k*n
def doTest(toPrint=False,toProgress=False,start=0,toEnd=1000,algo="perfect"):
s=set()
KK=10000
from IPython.display import clear_output
for i in range(start,toEnd+1):
if(toProgress and (i<KK or (i>=KK and i%(KK/100)==0))):
clear_output(wait=True)
print(i,end="\t")
if(algo=="perfect"):
pf=PerfectNumber(i)
elif(algo=="abundant"):
pf=AbundantNumber(i)
elif(algo=="deficient"):
pf=DeficientNumber(i)
elif(algo=="quasi"):
pf=Quasiperfect(i)
elif(algo=="almost"):
pf=AlmostPerfectNumber(i)
elif(algo=="semi"):
pf=SemiPerfectNumber(i)
elif(algo=="hemi"):
pf=HemiPerfectNumber(i)
elif(algo=="super"):
pf=SuperPerfect(i)
if(pf):
s.add(i)
if(toPrint and not toProgress):
print(i,end=", ")
if(toProgress and (i<KK or (i>=KK and i%(KK/100)==0))):
print(s)
if(not toPrint):
return s
#PerfectNumber(28)
#AbundantNumber(18)
#DeficientNumber(23)
#Quasiperfect(20)
#AlmostPerfectNumber(32)
#SemiPerfectNumber(12)
#HemiPerfectNumber(24)
#SuperPerfect(64)
#doTest(True,False,0,20000) #348 ms
#doTest(True,False,0,20000,"abundant")
#doTest(True,False,0,20000,"deficient")
#doTest(True,False,0,20000,"quasi")
#doTest(True,False,0,20000,"almost")
#doTest(True,False,0,5000,"semi")
#doTest(True,False,1,5000,"hemi")
#doTest(True,False,0,20000,"super") | StarcoderdataPython |
8461 | <reponame>dumbPy/beancount_bot
import traceback
import telebot
from telebot import apihelper
from telebot.types import InlineKeyboardMarkup, InlineKeyboardButton, MessageEntity, Message, CallbackQuery
from beancount_bot import transaction
from beancount_bot.config import get_config, load_config
from beancount_bot.dispatcher import Dispatcher
from beancount_bot.i18n import _
from beancount_bot.session import get_session, SESS_AUTH, get_session_for, set_session
from beancount_bot.task import load_task, get_task
from beancount_bot.transaction import get_manager
from beancount_bot.util import logger
apihelper.ENABLE_MIDDLEWARE = True
bot = telebot.TeleBot(token=None, parse_mode=None)
@bot.middleware_handler(update_types=['message'])
def session_middleware(bot_instance, message):
"""
Session middleware
:param bot_instance:
:param message:
:return:
"""
bot_instance.session = get_session_for(message.from_user.id)
#######
# Authentication #
#######
def check_auth() -> bool:
"""
Check if you log in
:return:
"""
return SESS_AUTH in bot.session and bot.session[SESS_AUTH]
@bot.message_handler(commands=['start'])
def start_handler(message: Message):
"""
First chat time authentication
:param message:
:return:
"""
auth = get_session(message.from_user.id, SESS_AUTH, False)
if auth:
bot.reply_to(message, _("Have been authenticated!"))
return
# 要求鉴权
bot.reply_to(message, _("Welcome to the accounting robot!Please enter the authentication token:"))
def auth_token_handler(message: Message):
"""
Login token callback
:param message:
:return:
"""
if check_auth():
return
# Unconfirmation is considered an authentication token
auth_token = get_config('bot.auth_token')
if auth_token == message.text:
set_session(message.from_user.id, SESS_AUTH, True)
bot.reply_to(message, _("Authentic success!"))
else:
bot.reply_to(message, _("Authentication token error!"))
#######
# instruction #
#######
@bot.message_handler(commands=['reload'])
def reload_handler(message):
"""
Overload configuration instruction
:param message:
:return:
"""
if not check_auth():
bot.reply_to(message, _("Please conduct authentication first!"))
return
load_config()
load_task()
bot.reply_to(message, _("Successful overload configuration!"))
@bot.message_handler(commands=['help'])
def help_handler(message):
"""
Help instruction
:param message:
:return:
"""
cmd = message.text
dispatchers = get_manager().dispatchers
if cmd == '/help':
# Create a message button
markup = InlineKeyboardMarkup()
for ind, d in zip(range(len(dispatchers)), dispatchers):
help_btn = _("help:{name}").format(name=d.get_name())
markup.add(InlineKeyboardButton(help_btn, callback_data=f'help:{ind}'))
# 帮助信息
command_usage = [
_("/start - Authentication"),
_("/help - Using help"),
_("/reload - Reload the configuration file"),
_("/task - View, run the task"),
]
help_text = \
_("Account bill Bot\n\nAvailable instruction list:\n{command}\n\nTrade statement syntax help, select the corresponding module,Use /help [Module name] Check.").format(
command='\n'.join(command_usage))
bot.reply_to(message, help_text, reply_markup=markup)
else:
# Display detailed help
name: str = cmd[6:]
flag_found = False
for d in dispatchers:
if name.lower() == d.get_name().lower():
show_usage_for(message, d)
flag_found = True
if not flag_found:
bot.reply_to(message, _("The corresponding name of the transaction statement processor does not exist!"))
def show_usage_for(message: Message, d: Dispatcher):
"""
Show the method of use of a specific processor
:param message:
:param d:
:return:
"""
usage = _("help:{name}\n\n{usage}").format(name=d.get_name(), usage=d.get_usage())
bot.reply_to(message, usage)
@bot.callback_query_handler(func=lambda call: call.data[:4] == 'help')
def callback_help(call: CallbackQuery):
"""
Help statement detailed help
:param call:
:return:
"""
try:
d_id = int(call.data[5:])
dispatchers = get_manager().dispatchers
show_usage_for(call.message, dispatchers[d_id])
except Exception as e:
logger.error(f'{call.id}:Unknown error!', e)
logger.error(traceback.format_exc())
bot.answer_callback_query(call.id, _("Unknown error!\n"+traceback.format_exc()))
@bot.message_handler(commands=['task'])
def task_handler(message):
"""
Task instruction
:param message:
:return:
"""
if not check_auth():
bot.reply_to(message, _("Please conduct authentication first!"))
return
cmd = message.text
tasks = get_task()
if cmd == '/task':
# Show all tasks
all_tasks = ', '.join(tasks.keys())
bot.reply_to(message,
_("Current registration task:{all_tasks}\n"
"able to pass /task [Task Name] Active trigger").format(all_tasks=all_tasks))
else:
# Run task
dest = cmd[6:]
if dest not in tasks:
bot.reply_to(message, _("Task does not exist!"))
return
task = tasks[dest]
task.trigger(bot)
#######
# trade #
#######
@bot.message_handler(func=lambda m: True)
def transaction_query_handler(message: Message):
"""
Trading statement processing
:param message:
:return:
"""
if not check_auth():
auth_token_handler(message)
return
# Treated
manager = get_manager()
try:
tx_uuid, tx = manager.create_from_str(message.text)
# Create a message button
markup = InlineKeyboardMarkup()
markup.add(InlineKeyboardButton(_("Revoke trading"), callback_data=f'withdraw:{tx_uuid}'))
# 回复
bot.reply_to(message, transaction.stringfy(tx), reply_markup=markup)
except ValueError as e:
logger.info(f'{message.from_user.id}:Unable to add transactions', e)
bot.reply_to(message, e.args[0])
except Exception as e:
logger.error(f'{message.from_user.id}:An unknown mistake!Adding a transaction failed.', e)
bot.reply_to(message, _("An unknown mistake!Adding a transaction failed.\n"+traceback.format_exc()))
@bot.callback_query_handler(func=lambda call: call.data[:8] == 'withdraw')
def callback_withdraw(call: CallbackQuery):
"""
Transaction withdrawal callback
:param call:
:return:
"""
auth = get_session(call.from_user.id, SESS_AUTH, False)
if not auth:
bot.answer_callback_query(call.id, _("Please conduct authentication first!"))
return
tx_uuid = call.data[9:]
manager = get_manager()
try:
manager.remove(tx_uuid)
# Modify the original message reply
message = _("Transaction has been withdrawn")
code_format = MessageEntity('code', 0, len(message))
bot.edit_message_text(message,
chat_id=call.message.chat.id,
message_id=call.message.message_id,
entities=[code_format])
except ValueError as e:
logger.info(f'{call.id}:Unable to create trading', e)
bot.answer_callback_query(call.id, e.args[0])
except Exception as e:
logger.error(f'{call.id}:An unknown mistake!Withdrawal of the transaction failed.', e)
bot.answer_callback_query(call.id, _("An unknown mistake!Withdrawal of the transaction failed."))
def serving():
"""
start up Bot
:return:
"""
# set up Token
token = get_config('bot.token')
bot.token = token
# Set a proxy
proxy = get_config('bot.proxy')
if proxy is not None:
apihelper.proxy = {'https': proxy}
# start up
bot.infinity_polling()
| StarcoderdataPython |
101204 | """
Parameters and syntactic sugar.
"""
def dec(func):
def wrapper(*args, **kwargs):
print('Top decoration')
rv = func(*args, **kwargs)
print('Bottom decoration')
return rv
return wrapper
@dec
def sum_it(a, b):
return(a + b)
x = sum_it(10, 5)
print(x)
| StarcoderdataPython |
1764392 | <gh_stars>1-10
"""TEST MODULE TEMPLATE"""
from advent_of_code.utils.parse import parse_guard_records
from advent_of_code.y2018.d4 import solution_1
from advent_of_code.y2018.d4 import solution_2
def test_solution_1():
example_input = """[1518-11-01 00:00] Guard #10 begins shift
[1518-11-01 00:05] falls asleep
[1518-11-01 00:25] wakes up
[1518-11-01 00:30] falls asleep
[1518-11-01 00:55] wakes up
[1518-11-01 23:58] Guard #99 begins shift
[1518-11-02 00:40] falls asleep
[1518-11-02 00:50] wakes up
[1518-11-03 00:05] Guard #10 begins shift
[1518-11-03 00:24] falls asleep
[1518-11-03 00:29] wakes up
[1518-11-04 00:02] Guard #99 begins shift
[1518-11-04 00:36] falls asleep
[1518-11-04 00:46] wakes up
[1518-11-05 00:03] Guard #99 begins shift
[1518-11-05 00:45] falls asleep
[1518-11-05 00:55] wakes up"""
example_result = 240
assert solution_1(parse_guard_records(example_input)) == example_result
def test_solution_2():
example_input = """[1518-11-01 00:00] Guard #10 begins shift
[1518-11-01 00:05] falls asleep
[1518-11-01 00:25] wakes up
[1518-11-01 00:30] falls asleep
[1518-11-01 00:55] wakes up
[1518-11-01 23:58] Guard #99 begins shift
[1518-11-02 00:40] falls asleep
[1518-11-02 00:50] wakes up
[1518-11-03 00:05] Guard #10 begins shift
[1518-11-03 00:24] falls asleep
[1518-11-03 00:29] wakes up
[1518-11-04 00:02] Guard #99 begins shift
[1518-11-04 00:36] falls asleep
[1518-11-04 00:46] wakes up
[1518-11-05 00:03] Guard #99 begins shift
[1518-11-05 00:45] falls asleep
[1518-11-05 00:55] wakes up"""
example_result = 4455
assert solution_2(parse_guard_records(example_input)) == example_result
| StarcoderdataPython |
14213 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 2 13:09:55 2018
@author: mali
"""
#import time
import pickle
import pyNN.utility.plotting as plot
import matplotlib.pyplot as plt
import comn_conversion as cnvrt
import prnt_plt_anmy as ppanmy
# file and folder names =======================================================
fldr_name = 'rslts/icub64x64/'
pickle_filename = 'TDXY.pickle'
file_pth = cnvrt.read_flenfldr_ncrntpth(fldr_name, pickle_filename )
with open(file_pth , 'rb') as tdxy:
TDXY = pickle.load( tdxy )
print '### lenght of TDXY : {}'.format( len(TDXY) ) # 2+ 2*n_orn )
pop = TDXY[0]
t_ist = 1040
print 'check pop: L_rtna_TDXY'
print '### T : {}'.format(pop[0][t_ist]) # dimension 4 x t_stp x depend
print '### 1D : {}'.format(pop[1][t_ist]) # dimension 4 x t_stp x depend
print '### X : {}'.format(pop[2][t_ist]) # dimension 4 x t_stp x depend
print '### Y : {}'.format(pop[3][t_ist]) # dimension 4 x t_stp x depend
print pop[0]
print pop[1]
#required variables============================================================
n_rtna = 2 # till now should be two
n_orn = 4
rtna_w = 64
rtna_h = 64
krnl_sz = 5
rf_w = rtna_w - krnl_sz +1
rf_h = rtna_h - krnl_sz +1
subplt_rws = n_rtna
subplt_cls = n_orn+1
########### to make animation fast as scale now in micro second ###############
#first to scale be divide over 10 or 100 ======================================
T=TDXY[0][0]
t10u=T [0:T[-1]:100]
#print '### t_10u : {}'.format(t10u)
# second find all times has spikes any one of the rtna or rf ==================
t_spks=[]
for pop in range ( len(TDXY) ):
for inst in range( len(TDXY[pop][0]) ):
if TDXY[pop][2][inst]!=[] :
t_spks.append( TDXY[pop][0][inst] )
print pop, TDXY[pop][0][inst]
t_spks.sort()
for each in t_spks:
count = t_spks.count(each)
if count > 1:
t_spks.remove(each)
print 't_spks : {}'.format( t_spks )
#animate the rtna_rf =========================================================
#print 'abplt_rw, sbplt_cl, rtna_w, rtna_h, rf_w, rf_h: {}, {}, {}, {}, {}, {} '.format(subplt_rws, subplt_cls, rtna_w, rtna_h, rf_w, rf_h)
fig, axs = plt.subplots(subplt_rws, subplt_cls, sharex=False, sharey=False) #, figsize=(12,5))
axs = ppanmy.init_fig_mxn_sbplt_wxh_res (fig, axs, rtna_h, rtna_w, rf_w, rf_h, subplt_rws, subplt_cls)
plt.grid(True)
plt.show(block=False)
plt.pause(.01)
#for i in t_spks: #t10u:
# axs = ppanmy.init_fig_mxn_sbplt_wxh_res (fig, axs, rtna_h, rtna_w, rf_w, rf_h, subplt_rws, subplt_cls)
# plt.suptitle('rtna_rf_orn_3: t= {} usec'.format( i ) )
# if subplt_rws==1:
# axs[0].scatter( TDXY[0][2][i], TDXY[0][3][i] )
# for col in range (subplt_cls):
# axs[col].scatter( TDXY[col+1][2][i], TDXY[col+1][3][i] )
## plt.savefig( 'fgrs/anmy_1/{}_t{}.png'.format(vrjn, i) )
# plt.show(block=False)
# plt.pause(2)
# for col in range(subplt_cls):
# axs[col].cla()
#
# elif subplt_rws==2:
# for col in range (subplt_cls):
# axs[0][0].scatter( TDXY[0][2][i], TDXY[0][3][i] )
# axs[1][0].scatter( TDXY[1][2][i], TDXY[1][3][i] )
# for col in range(1,n_orn+1):
# row=0
# axs[row][col].scatter( TDXY[col+1][2][i], TDXY[col+1][3][i] )
# for col in range(1,n_orn):
# row=1
# axs[row][col].scatter( TDXY[n_orn+1+col][2][i], TDXY[n_orn+1+col][3][i] )
## plt.savefig( 'fgrs/anmy_1/{}_t{}.png'.format(vrjn, i) )
# plt.show(block=False)
# plt.pause(2)
# for row in range(subplt_rws):
# for col in range (subplt_cls):
# axs[row][col].cla()
#
print '##### required variables: \n n_rtna={}, TDXY_len={}, rtna_w={}, rtna_h={}, krnl_sz={}, rf_w={} , rf_h={}'.format( n_rtna , len(TDXY), rtna_w, rtna_h, krnl_sz, rf_w , rf_h )
plt.show(block=False)
last_t_spks=-310
for i in range( len(t_spks) ): #t10u:
# plt.pause(2)
if t_spks[i]-last_t_spks > 300:
#clear
if subplt_rws==2:
for row in range(subplt_rws):
for col in range (subplt_cls):
axs[row][col].cla()
elif subplt_rws==1:
for col in range(subplt_cls):
axs[col].cla()
axs = ppanmy.init_fig_mxn_sbplt_wxh_res (fig, axs, rtna_h, rtna_w, rf_w, rf_h, subplt_rws, subplt_cls)
plt.suptitle('rtna_rf_orn: t= {} usec'.format( t_spks[i] ) )
plt.pause(1.5)
#--------------------------------------------------------------------------
if subplt_rws==1:
axs[0].scatter( TDXY[0][2][t_spks[i]], TDXY[0][3][t_spks[i]] )
for col in range (subplt_cls):
axs[col].scatter( TDXY[col+1][2][t_spks[i]], TDXY[col+1][3][t_spks[i]] )
# plt.savefig( 'fgrs/anmy_1/{}_t{}.png'.format(vrjn, i) )
elif subplt_rws==2:
for col in range (subplt_cls):
axs[0][0].scatter( TDXY[0][2][t_spks[i]], TDXY[0][3][t_spks[i]] )
axs[1][0].scatter( TDXY[1][2][t_spks[i]], TDXY[1][3][t_spks[i]] )
for col in range(1,n_orn+1):
row=0
axs[row][col].scatter( TDXY[col+1][2][t_spks[i]], TDXY[col+1][3][t_spks[i]] )
for col in range(1,n_orn+1):
row=1
axs[row][col].scatter( TDXY[n_orn+1+col][2][t_spks[i]], TDXY[n_orn+1+col][3][t_spks[i]] )
# plt.savefig( 'fgrs/anmy_1/{}_t{}.png'.format(vrjn, i) )
#--------------------------------------------------------------------------
plt.pause(.5)
else: #====================================================================
#--------------------------------------------------------------------------
if subplt_rws==1:
axs[0].scatter( TDXY[0][2][t_spks[i]], TDXY[0][3][t_spks[i]] )
for col in range (subplt_cls):
axs[col].scatter( TDXY[col+1][2][t_spks[i]], TDXY[col+1][3][t_spks[i]] )
# plt.savefig( 'fgrs/anmy_1/{}_t{}.png'.format(vrjn, i) )
elif subplt_rws==2:
for col in range (subplt_cls):
axs[0][0].scatter( TDXY[0][2][t_spks[i]], TDXY[0][3][t_spks[i]] )
axs[1][0].scatter( TDXY[1][2][t_spks[i]], TDXY[1][3][t_spks[i]] )
for col in range(1,n_orn+1):
row=0
axs[row][col].scatter( TDXY[col+1][2][t_spks[i]], TDXY[col+1][3][t_spks[i]] )
for col in range(1,n_orn+1):
row=1
axs[row][col].scatter( TDXY[n_orn+1+col][2][t_spks[i]], TDXY[n_orn+1+col][3][t_spks[i]] )
# plt.savefig( 'fgrs/anmy_1/{}_t{}.png'.format(vrjn, i) )
#--------------------------------------------------------------------------
plt.pause(.5)
last_t_spks = t_spks[i]
# suing builtin animation function ===========================================
#strt_tm = TDXY[0][0][0]
#stop_tm = TDXY[0][0][-1]
#print '\n### n_orn x n_rtna : {}x{}'.format(n_orn, n_rtna)
#print '\n### strt_tm - stop_tm : {} - {}'.format(strt_tm, stop_tm)
#ppanmy.anmy_rtna_rf_orn( TDXY, rtna_h, rtna_w, n_rtna, krnl_sz, strt_tm , stop_tm)
| StarcoderdataPython |
1799678 | <filename>gyp/ios.gyp
{
'includes': [
'../ios/app/mapboxgl-app.gypi',
'../ios/benchmark/benchmark-ios.gypi',
],
}
| StarcoderdataPython |
3230091 | import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from torch import optim
import numpy as np
NUM_CLASSES = 21
class SimpleClassifier(nn.Module):
def __init__(self):
super(SimpleClassifier, self).__init__()
self.conv1 = nn.Conv2d(3, 64, 5)
self.conv2 = nn.Conv2d(64, 32, 3)
self.conv3 = nn.Conv2d(32, 16, 3)
self.pool = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(16 * 26 * 26, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, NUM_CLASSES)
self.relu = nn.ReLU()
def forward(self, x):
x = self.pool(self.relu(self.conv1(x)))
x = self.pool(self.relu(self.conv2(x)))
x = self.pool(self.relu(self.conv3(x)))
x = x.view(x.size()[0], 16 * 26 * 26)
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return x
class TestNet(nn.Module):
def __init__(self):
super(TestNet, self).__init__()
self.conv1 = nn.Conv2d(3, 32, 3)
self.bn1 = nn.BatchNorm2d(32)
self.conv1_1 = nn.Conv2d(32, 64, 3)
self.bn1_1 = nn.BatchNorm2d(64)
self.dropout1 = nn.Dropout(0.2)
self.conv2 = nn.Conv2d(64, 32, 3)
self.bn2 = nn.BatchNorm2d(32)
self.dropout2 = nn.Dropout(0.3)
self.conv3 = nn.Conv2d(32, 32, 3)
self.bn3 = nn.BatchNorm2d(32)
self.dropout3 = nn.Dropout(0.4)
self.conv4 = nn.Conv2d(32, 16, 3)
self.bn4 = nn.BatchNorm2d(16)
self.dropout4 = nn.Dropout(0.5)
self.pool = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(16 * 12 * 12, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, NUM_CLASSES)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.bn1(self.conv1(x)))
x = self.relu(self.bn1_1(self.conv1_1(x)))
# x = self.dropout1(x)
x = self.pool(x)
x = self.relu(self.bn2(self.conv2(x)))
# x = self.dropout2(x)
x = self.pool(x)
x = self.relu(self.bn3(self.conv3(x)))
# x = self.dropout3(x)
x = self.pool(x)
x = self.relu(self.bn4(self.conv4(x)))
# x = self.dropout4(x)
x = self.pool(x)
x = x.view(x.size()[0], 16 * 12 * 12)
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return x
# MultiScale VGG Network
class MSVGG16(nn.Module):
# image size: n, 3, 227, 227
def __init__(self):
super(MSVGG16, self).__init__()
d = 16
self.conv1 = nn.Conv2d(3, d, 3, padding=1)
self.conv1_1 = nn.Conv2d(d, d, 3, padding=1)
self.bn1 = nn.BatchNorm2d(d)
self.drop1 = nn.Dropout(0.1)
self.conv2 = nn.Conv2d(d, 2*d, 3, padding=1)
self.conv2_1 = nn.Conv2d(2*d, 2*d, 3, padding=1)
self.bn2 = nn.BatchNorm2d(2*d)
self.drop2 = nn.Dropout(0.2)
self.conv3 = nn.Conv2d(2*d, 4*d, 3, padding=1)
self.conv3_1 = nn.Conv2d(4*d, 4*d, 3, padding=1)
self.conv3_2 = nn.Conv2d(4*d, 4*d, 3, padding=1)
self.bn3 = nn.BatchNorm2d(4*d)
self.drop3 = nn.Dropout(0.3)
self.conv4 = nn.Conv2d(4*d, 2*d, 3, padding=1)
self.conv4_1 = nn.Conv2d(2*d, 2*d, 3, padding=1)
self.conv4_2 = nn.Conv2d(2*d, 2*d, 3, padding=1)
self.bn4 = nn.BatchNorm2d(2*d)
self.drop4 = nn.Dropout(0.5)
self.conv5= nn.Conv2d(2*d, d, 3, padding=1)
self.conv5_1 = nn.Conv2d(d, d, 3, padding=1)
self.conv5_2 = nn.Conv2d(d, d, 3, padding=1)
self.bn5 = nn.BatchNorm2d(d)
self.drop5 = nn.Dropout(0.5)
self.pool = nn.MaxPool2d(2, 2)
self.pool_4 = nn.MaxPool2d(4, 4)
self.pool_8 = nn.MaxPool2d(8, 8)
# self.fc1 = nn.Linear(14*14*(16+32+64+128), 10000)
self.fc1 = nn.Linear(7*7*d, 4096)
self.dropout = nn.Dropout(0.5)
self.fc2 = nn.Linear(4096, 4096)
self.fc3 = nn.Linear(4096, NUM_CLASSES)
self.relu = nn.ReLU()
def forward(self, x):
# x: 3 x 227 x 227
conv1 = self.relu(self.bn1(self.conv1(x)))
conv1 = self.relu(self.bn1(self.conv1_1(conv1)))
conv1 = self.drop1(conv1)
conv1 = self.pool(conv1)
# conv1: d x 113 x 113
conv2 = self.relu(self.bn2(self.conv2(conv1)))
conv2 = self.relu(self.bn2(self.conv2_1(conv2)))
conv2 = self.drop2(conv2)
conv2 = self.pool(conv2)
# conv2: 128 x 56 x 56
conv3 = self.relu(self.bn3(self.conv3(conv2)))
conv3 = self.relu(self.bn3(self.conv3_1(conv3)))
conv3 = self.relu(self.bn3(self.conv3_2(conv3)))
conv3 = self.drop3(conv3)
conv3 = self.pool(conv3)
# conv3: 256 x 28 x 28
conv4 = self.relu(self.bn4(self.conv4(conv3)))
conv4 = self.relu(self.bn4(self.conv4_1(conv4)))
conv4 = self.relu(self.bn4(self.conv4_2(conv4)))
conv4 = self.drop4(conv4)
conv4 = self.pool(conv4)
# conv4: 512 x 14 x 14
conv5 = self.relu(self.bn5(self.conv5(conv4)))
conv5 = self.relu(self.bn5(self.conv5_1(conv5)))
conv5 = self.relu(self.bn5(self.conv5_2(conv5)))
conv5 = self.drop5(conv5)
conv5 = self.pool(conv5)
# conv5: 512 x 7 x 7
# MultiScale Feature from conv1, conv2, and conv3
multi_scale1 = self.pool_8(conv1) # 16 x 14 x 14
multi_scale2 = self.pool_4(conv2) # 32 x 14 x 14
multi_scale3 = self.pool(conv3) # 64 x 14 x 14
#
flat1 = multi_scale1.view(multi_scale1.size()[0], 16 * 14 * 14)
flat2 = multi_scale2.view(multi_scale2.size()[0], 32 * 14 * 14)
flat3 = multi_scale3.view(multi_scale3.size()[0], 64 * 14 * 14)
flat4 = conv4.view(conv4.size()[0], 32 * 14 * 14)
flat5 = conv5.view(conv5.size()[0], 16 * 7 * 7)
multi_scale_all = torch.cat((flat1, flat2, flat3, flat4), dim = 1)
fc1 = self.relu(self.fc1(multi_scale_all))
# fc1 = self.relu(self.fc1(flat5))
fc1 = self.dropout(fc1)
fc2 = self.relu(self.fc2(fc1))
fc2 = self.dropout(fc2)
fc3 = self.fc3(fc2)
return fc3
# Network based on ResNet
| StarcoderdataPython |
1655414 | <reponame>SDomarecki/WSEOptimizer<gh_stars>1-10
from datetime import date
from app.config import Config
def test_init_validData_createsValidConfig():
test__fetch_database_config_validData_fetchesValidVariables()
test__fetch_simulation_config_validData_fetchesValidVariables()
test__fetch_selection_config_validData_fetchesValidVariables()
test__fetch_crossover_config_validData_fetchesValidVariables()
test__fetch_mutation_config_validData_fetchesValidVariables()
test__fetch_wallet_config_validData_fetchesValidVariables()
test__fetch_genes_config_validData_fetchesValidVariables()
def test_init_emptyData_createsDefaultConfig():
test__fetch_database_config_emptyDict_returnsDefaultSettings()
test__fetch_simulation_config_emptyDict_returnsDefaultSettings()
test__fetch_selection_config_emptyDict_returnsDefaultSettings()
test__fetch_crossover_config_emptyDict_returnsDefaultSettings()
test__fetch_mutation_config_emptyDict_returnsDefaultSettings()
test__fetch_wallet_config_emptyDict_returnsDefaultSettings()
test__fetch_genes_config_emptyDict_returnsDefaultSettings()
def test__fetch_database_config_validData_fetchesValidVariables():
db_config = {
"min_circulation": 1,
"max_circulation": 1,
"sectors": ["sector"],
"companies": ["company"],
"chunks": 1,
}
config = Config()
config._fetch_database_config(db_config)
assert (
config.min_circulation == 1
and config.max_circulation == 1
and config.sectors == ["sector"]
and config.companies == ["company"]
and config.chunks == 1
)
def test__fetch_database_config_emptyDict_returnsDefaultSettings():
db_config = {}
config = Config()
config._fetch_database_config(db_config)
assert (
config.min_circulation == 0
and config.max_circulation == 0
and config.sectors == []
and config.companies == []
and config.chunks == 0
)
def test__fetch_simulation_config_validData_fetchesValidVariables():
sim_config = {
"timedelta": 1,
"iterations": 1,
"initial_population": 1,
"learning": {"start_date": "2010-01-01", "end_date": "2020-01-01"},
"testing": [{"start_date": "2010-01-01", "end_date": "2020-01-01"}],
}
config = Config()
config._fetch_simulation_config(sim_config)
assert (
config.timedelta == 1
and config.iterations == 1
and config.initial_population == 1
and config.start_date == date(2010, 1, 1)
and config.end_date == date(2020, 1, 1)
and config.validations[0][0] == date(2010, 1, 1)
and config.validations[0][1] == date(2020, 1, 1)
)
def test__fetch_simulation_config_emptyDict_returnsDefaultSettings():
sim_config = {}
config = Config()
config._fetch_simulation_config(sim_config)
assert (
config.timedelta == 0
and config.iterations == 0
and config.initial_population == 0
and config.start_date == date(2000, 1, 1)
and config.end_date == date(2000, 1, 1)
and config.validations == []
)
def test__fetch_selection_config_validData_fetchesValidVariables():
sel_config = {"method": "roulette", "agents_to_save": 1.0}
config = Config()
config._fetch_selection_config(sel_config)
assert config.selection_method == "roulette" and config.agents_to_save == 1.0
def test__fetch_selection_config_emptyDict_returnsDefaultSettings():
sel_config = {}
config = Config()
config._fetch_selection_config(sel_config)
assert config.selection_method == "roulette" and config.agents_to_save == 0.0
def test__fetch_crossover_config_validData_fetchesValidVariables():
cross_config = {"constant_length": False, "initial_genes": 1, "max_genes": 1}
config = Config()
config._fetch_crossover_config(cross_config)
assert (
config.constant_length is False
and config.initial_length == 1
and config.max_genes == 1
)
def test__fetch_crossover_config_emptyDict_returnsDefaultSettings():
cross_config = {}
config = Config()
config._fetch_crossover_config(cross_config)
assert (
config.constant_length is True
and config.initial_length == 0
and config.max_genes == 0
)
def test__fetch_mutation_config_validData_fetchesValidVariables():
mutation_config = {"method": "gene_creation", "rate": 0.1}
config = Config()
config._fetch_mutation_config(mutation_config)
assert config.mutation_method == "gene_creation" and config.mutation_rate == 0.1
def test__fetch_mutation_config_emptyDict_returnsDefaultSettings():
mutation_config = {}
config = Config()
config._fetch_mutation_config(mutation_config)
assert config.mutation_method == "normalization" and config.mutation_rate == 0.0
def test__fetch_wallet_config_validData_fetchesValidVariables():
wallet_config = {
"start_cash": 1,
"return_method": "sharpe",
"benchmark": "wig",
"risk_free_return": 1.0,
"fees": {"min": 1.0, "rate": 1.0, "added": 1.0, "max": 1.0},
}
config = Config()
config._fetch_wallet_config(wallet_config)
assert (
config.start_cash == 1
and config.return_method == "sharpe"
and config.benchmark == "wig"
and config.risk_free_return == 1.0
and config.fee_min == 1.0
and config.fee_rate == 1.0
and config.fee_added == 1.0
and config.fee_max == 1.0
)
def test__fetch_wallet_config_emptyDict_returnsDefaultSettings():
wallet_config = {}
config = Config()
config._fetch_wallet_config(wallet_config)
assert (
config.start_cash == 0
and config.return_method == "total_value"
and config.benchmark == ""
and config.risk_free_return == 0.0
and config.fee_min == 0.0
and config.fee_rate == 0.0
and config.fee_added == 0.0
and config.fee_max == 0.0
)
def test__fetch_genes_config_validData_fetchesValidVariables():
genes_config = {
"fin_statement_lag": 123,
"logic_to_all": 1.0,
"fundamental_to_all": 1.0,
}
config = Config()
config._fetch_genes_config(genes_config)
assert (
config.fin_statement_lag == 123
and config.logic_to_all == 1.0
and config.fundamental_to_all == 1.0
)
def test__fetch_genes_config_emptyDict_returnsDefaultSettings():
genes_config = {}
config = Config()
config._fetch_genes_config(genes_config)
assert (
config.fin_statement_lag == 135
and config.logic_to_all == 0.0
and config.fundamental_to_all == 0.0
)
| StarcoderdataPython |
4821547 | <filename>src/helpers/split_email.py
def split_email(email):
"""
Input: string
Returns: "username"
If the "email = x" argument is provided to the main function, split_email
is called. Splits string containing an email address on the '@',
returns 0th element.
"""
username = str.split(email,'@')[0]
return username
| StarcoderdataPython |
3386291 | <filename>3_data_cleaning.py<gh_stars>1-10
import pymongo
import pandas as pd
def convert_to_excel(pdict):
columns = ['职位ID', '公司ID', '国家', '经度', '纬度', '行业领域', '教育水平', '工作经验', '城市', '区域', '职位诱惑',
'最低工资', '最高工资', '平均工资', '职位名称', '公司规模', '公司缩写名', '财务阶段', '工作性质', '公司标签',
'职位标签', '行业标签', '公司全名', '第一类别', '第二类别', '第三类别', '技术标签']
cc_dict = {}
for c in columns:
cc_dict[c] = []
for k, v in pdict.items():
print(k)
for c in columns:
cc_dict[c].append(v.get(c))
df = pd.DataFrame(data=cc_dict, columns=columns)
df.to_excel('拉勾网——数据分析岗位.xlsx', na_rep="NULL")
def data_clean():
position_dict = {}
for w in collection.find():
positionId = w.get("positionId")
print(positionId)
salary = w.get("salary")
low_salary = salary.lower().split("-")[0].replace("k", "000")
high_salary = salary.lower().split("-")[1].replace("k", "000")
average_salary = (int(low_salary)+int(high_salary))/2
position_dict[positionId] = {
"职位ID": positionId,
"经度": w.get("longitude"),
"纬度": w.get("latitude"),
"国家": "中国",
"公司ID": w.get("companyId"),
"行业领域": w.get("industryField"),
"教育水平": w.get("education"),
"工作经验": w.get("workYear"),
"城市": w.get("city"),
"区域": w.get("district"),
"职位诱惑": w.get("positionAdvantage"),
"最低工资": low_salary,
"最高工资": high_salary,
"平均工资": average_salary,
"职位名称": w.get("positionName"),
"公司规模": w.get("companySize"),
"公司缩写名": w.get("companyShortName"),
"财务阶段": w.get("financeStage"),
"工作性质": w.get("jobNature"),
"公司标签": w.get("companyLabelList"),
"职位标签": w.get("positionLables"),
"行业标签": w.get("industryLables"),
"公司全名": w.get("companyFullName"),
"第一类别": w.get("firstType"),
"第二类别": w.get("secondType"),
"第三类别": w.get("thirdType"),
"技术标签": w.get("skillLables")
}
return position_dict
def main():
position_dict = data_clean()
convert_to_excel(position_dict)
if __name__ == '__main__':
client = pymongo.MongoClient(host='localhost', port=27017)
db = client['lagou']
collection = db["position"]
main()
| StarcoderdataPython |
1707783 | import copy
from spec_classes.types.missing import _MissingType, MISSING
def test_missing():
assert MISSING is _MissingType()
assert bool(MISSING) is False
assert repr(MISSING) == "MISSING"
assert copy.copy(MISSING) is MISSING
assert copy.deepcopy(MISSING) is MISSING
| StarcoderdataPython |
1720600 | ######################################################################
# @author : bidaya0 (<EMAIL>@$HOSTNAME)
# @file : api_route
# @created : Tuesday Aug 17, 2021 17:43:27 CST
#
# @description :
######################################################################
from django.db import models
from dongtai.utils.settings import get_managed
from dongtai.models.agent import IastAgent
class HttpMethod(models.Model):
method = models.CharField(max_length=100, blank=True)
class Meta:
managed = get_managed()
db_table = 'iast_http_method'
class IastApiMethod(models.Model):
method = models.CharField(max_length=100, blank=True)
http_method = models.ManyToManyField(
HttpMethod, blank=True, through='IastApiMethodHttpMethodRelation')
class Meta:
managed = get_managed()
db_table = 'iast_api_methods'
class IastApiMethodHttpMethodRelation(models.Model):
api_method = models.ForeignKey(IastApiMethod,
on_delete=models.CASCADE,
db_constraint=False,
db_column='api_method_id')
http_method = models.ForeignKey(HttpMethod,
on_delete=models.CASCADE,
db_constraint=False,
db_column='http_method_id')
class Meta:
managed = get_managed()
db_table = 'iast_http_method_relation'
unique_together = ['api_method_id', 'http_method_id']
class IastApiRoute(models.Model):
path = models.CharField(max_length=255, blank=True)
code_class = models.CharField(max_length=255,
blank=True,
db_column='code_class')
description = models.CharField(max_length=500, blank=True)
method = models.ForeignKey(IastApiMethod,
on_delete=models.DO_NOTHING,
db_constraint=False,
db_index=True,
db_column='method_id')
code_file = models.CharField(max_length=500,
blank=True,
db_column='code_file')
controller = models.CharField(max_length=100, blank=True)
agent = models.ForeignKey(IastAgent,
on_delete=models.CASCADE,
db_constraint=False,
db_index=True,
db_column='agent_id')
class Meta:
managed = get_managed()
db_table = 'iast_api_route'
unique_together = ['path', 'method']
class IastApiParameter(models.Model):
name = models.CharField(max_length=100, blank=True)
parameter_type = models.CharField(max_length=100,
blank=True,
db_column='type')
annotation = models.CharField(max_length=500, blank=True)
route = models.ForeignKey(IastApiRoute,
on_delete=models.CASCADE,
db_constraint=False,
db_index=True,
db_column='route_id')
class Meta:
managed = get_managed()
db_table = 'iast_api_parameter'
unique_together = ['name', 'route_id']
class IastApiResponse(models.Model):
return_type = models.CharField(max_length=100, blank=True)
route = models.ForeignKey(IastApiRoute,
on_delete=models.CASCADE,
db_constraint=False,
db_index=True,
db_column='route_id')
class Meta:
managed = get_managed()
db_table = 'iast_api_response'
unique_together = ['return_type', 'route_id']
| StarcoderdataPython |
1647972 | import random
import time
from airtest.core.api import *
from airtest.core.error import TargetNotFoundError
from airtest.core.helper import (G, delay_after_operation)
def connect_windows(name):
""" 连接win设备
"""
try:
connect_device("windows:///?title_re=%s" % name)
except Exception as e:
print("connect failed! Please check or report it: ", e)
return 1
return 0
def connect_android(seq):
""" 连接安卓设备
"""
try:
connect_device("Android:///%s" % seq)
except Exception as e:
print("connect android failed! Please check or report it: ", e)
return 1
print(G.DEVICE.get_display_info())
return 0
def random_pos(pos, xmin, xmax, zmin=-1, zmax=-1):
if zmin == -1 or zmax == -1:
zmin, zmax = xmin, xmax
return (pos[0] + random.randrange(xmin, xmax),
pos[1] + random.randrange(zmin, zmax))
def touch_pos(pos, times=1, **kwargs):
for _ in range(times):
G.DEVICE.touch(pos, **kwargs)
time.sleep(0.05)
delay_after_operation()
def image(path, rpos, rs):
return Template(r"../assets/{}".format(path), record_pos=rpos, resolution=rs)
def get_current_resolution():
return G.DEVICE.get_current_resolution()
def wati_util(v, condition, timeout=60, interval=0.5):
"""等待符合条件的对象
"""
start_time = time.time()
while True:
ret = find_all(v)
if condition(ret):
return ret
if (time.time() - start_time) > timeout:
raise TargetNotFoundError('Continue %s not found in screen' % v)
time.sleep(interval)
def select(vs, timeout=60, interval=0.5):
"""等待多个对象,返回第一个匹配到的对象
"""
start_time = time.time()
while True:
for idx, v in enumerate(vs):
ret = find_all(v)
if ret:
return idx, ret
if (time.time() - start_time) > timeout:
raise TargetNotFoundError('Continue %s not found in screen' % v)
time.sleep(interval)
| StarcoderdataPython |
57113 | <reponame>diatomsRcool/checklists<gh_stars>1-10
#this code changes the file names from geonames id to country name
#it creates a directory for each country and places the tsv file in that directory
#the country name is all lower case with underscores for spaces
#be sure to change the file paths for your local machine
import os
import pickle
import re
import shutil
f = open('country_dict.p', 'rb')
country_ids = pickle.load(f)
for filename in os.listdir('/Volumes/PCCOMP/effechecka_country_results/checklist/'): #this path needs to point to the unzipped effechecka output
if not filename.startswith('.'): #this ignores hidden files. I'm not sure why they are there
name = re.sub('.tsv', '', filename)
country = country_ids[name]
if not os.path.exists('/Volumes/PCCOMP/effechecka_country_results/' + country + '/'):
os.makedirs('/Volumes/PCCOMP/effechecka_country_results/' + country + '/')
shutil.copy('/Volumes/PCCOMP/effechecka_country_results/checklist/' + filename, '/Volumes/PCCOMP/effechecka_country_results/' + country + '/' + country)
else:
os.remove(filename) #this removes the hidden files | StarcoderdataPython |
58923 | <filename>hcc_october_inservice_2021/simple_nn.py<gh_stars>0
# simple_nn.py
# A simple neural network with one node that has two inputs and one output.
train_X = [1, 2, 4, 5, 6, 7]
train_Y = [3, 5, 4, 6, 7, 2]
def initialize_parameters: | StarcoderdataPython |
58350 | <reponame>furious-luke/polecat<gh_stars>1-10
from polecat.db.schema import IntColumn, RelatedColumn, Schema, Table
def create_table(name=None, related_table=None, schema=None):
columns = [
IntColumn('id', primary_key=True),
IntColumn('col1'),
IntColumn('col2')
]
if related_table:
columns.append(
RelatedColumn('col3', related_table, related_column='a_tables')
)
table = Table(name or 'a_table', columns)
if schema is None:
schema = Schema()
schema.add_table(table)
schema.bind()
return table
| StarcoderdataPython |
184613 | '''
This is a simple service class which accepts http requests and return data based on the request in JSON format.
Services -
1./emp - return all rows of the csv file as a JSON array.
2./emp/column/{csv column name} It return all values of a csv column.
E.g./emp/column/City will return all Cities like "Seattle, Fairfax, ALDIE, Chantilly, Herndon"
3./emp/search/{search string} - It returns all rows of the csv file in a json array, where each row contains given string.
4./emp/search/{csv column name}/{searchString} It works same like above service but the search is performed only in the given column name instead of on all columns.
Sample Reqeust URL
http://localhost:50012/emp/search/Position/Analyst
Follwoing are expected to run this servies.
1. Python 2.7.10 or later
2. Port shoudl be accessible on which the services are run. Here i am using the port "50012" you can update your port here.
'''
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import SocketServer
import simplejson as json
import csv
searchDict = {}
allDicts = []
SocketServer.TCPServer.allow_reuse_address
class MockServices(BaseHTTPRequestHandler):
print 'Service started'
reader = csv.DictReader(open('EmployeeData.csv'))
key = 0
for row in reader:
arrSearchValues = []
col1 = row["EmpId"]
col2 = row["FirstName"]
col3 = row["LastName"]
col4 = row["Position"]
col5 = row["Manager"]
col6 = row["Street"]
col7 = row["City"]
col8 = row["State"]
col9 = row["Zip"]
col10 = row["Country"]
arrSearchValues.append(col1)
arrSearchValues.append(col2)
arrSearchValues.append(col3)
arrSearchValues.append(col4)
arrSearchValues.append(col5)
arrSearchValues.append(col6)
arrSearchValues.append(col7)
arrSearchValues.append(col8)
arrSearchValues.append(col9)
arrSearchValues.append(col10)
allValues = ''.join(arrSearchValues)
# All values is the main string in which search is performed.
# If you want the serach operation should be perforced on only few columnns then you can update the stirng with only those columns.
searchDict[allValues] = row
allDicts.append(row);
# This function return all the values for a column name in the csv
def getDetailsOfRecord(self, cellName):
responsesTring = (list({d[cellName] for d in searchDict.values()}))
return self.wfile.write(json.dumps(responsesTring))
# This function Search in a column and return entire row of columns which met filtered in search
def searchStringInColumn(self,searchColumn,searchStr,inputDicts):
resultlist = [d for d in inputDicts if d[searchColumn] == searchStr]
responsesTring = (json.dumps(resultlist))
return self.wfile.write(responsesTring)
# This function search given string in each row of the csv and return filtered rows
def searchString(self,searchStr,inputDicts):
matches = {x for x in inputDicts.keys() if searchStr in x}
returnDicts = []
for tempStr in matches:
returnDicts.append(searchDict[tempStr])
responsesTring = (json.dumps(returnDicts))
return self.wfile.write(responsesTring)
# This function return all rows of the csv
def do_GET(self):
print(self.path)
requestPath = self.path.split('/')
print(requestPath)
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
if len(requestPath) > 0:
serviceName = requestPath[1]
if serviceName=="emp" :
if len(requestPath) > 2:
operationName = requestPath[2]
print operationName
if operationName=="search":
if len(requestPath) > 4:
# search data in the column name
searchColumnName = requestPath[4]
return self.searchStringInColumn(requestPath[3],requestPath[4],allDicts)
else:
return self.searchString(requestPath[3],searchDict)
elif operationName=="column":
return self.getDetailsOfRecord(requestPath[3])
else:
responsesTring = (json.dumps(searchDict))
return self.wfile.write(responsesTring)
else:
responsesTring = (json.dumps(allDicts))
return self.wfile.write(responsesTring)
Handler = MockServices
httpd = SocketServer.TCPServer(("", 50012), Handler)
httpd.serve_forever() | StarcoderdataPython |
146189 | <gh_stars>1-10
from typing import Any
from dagster.core.errors import DagsterInvalidConfigError
from ...config import Shape
from ..execution.context.logger import InitLoggerContext, UnboundInitLoggerContext
from .logger_definition import LoggerDefinition
def logger_invocation_result(logger_def: LoggerDefinition, init_context: UnboundInitLoggerContext):
"""Using the provided context, call the underlying `logger_fn` and return created logger."""
logger_config = _resolve_bound_config(init_context.logger_config, logger_def)
bound_context = InitLoggerContext(
logger_config, logger_def, init_context.pipeline_def, init_context.run_id
)
return logger_def.logger_fn(bound_context)
def _resolve_bound_config(logger_config: Any, logger_def: "LoggerDefinition") -> Any:
from dagster.config.validate import process_config
validated_config = None
outer_config_shape = Shape({"config": logger_def.get_config_field()})
config_evr = process_config(
outer_config_shape, {"config": logger_config} if logger_config else {}
)
if not config_evr.success:
raise DagsterInvalidConfigError(
"Error in config for logger ",
config_evr.errors,
logger_config,
)
validated_config = config_evr.value.get("config")
mapped_config_evr = logger_def.apply_config_mapping({"config": validated_config})
if not mapped_config_evr.success:
raise DagsterInvalidConfigError(
"Error in config mapping for logger ", mapped_config_evr.errors, validated_config
)
validated_config = mapped_config_evr.value.get("config")
return validated_config
def _get_default_if_exists(logger_def: LoggerDefinition):
return (
logger_def.config_field.default_value
if logger_def.config_field and logger_def.config_field.default_provided
else None
)
| StarcoderdataPython |
1797096 | <reponame>rlan/LeetCode
#
# LeetCode
# Algorithm 136 Single Number
#
# <NAME>, May 6, 2017.
# See LICENSE
#
# Test case(s):
# [1]
# [0,2,0]
#
# Your runtime beats 83.04 % of python submissions.
#
class Solution(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
A number XOR with itself will be zero.
If with XOR every element in the array, then the result will be the single one.
"""
secret = nums[0]
for val in nums[1:]:
secret = secret ^ val
return secret
| StarcoderdataPython |
3359225 | <reponame>saai-sudarsanan-d/Alpha-v1<filename>intent_keys.py
import re
intent_keywords = {
'self':['your','you','yourself'],
'greet': ['hi','morning','hello','welcome','Hey','Nice to meet you'],
'time': ['time', 'clock'],
'date':['date','day','calendar','today'],
"search":['search',"when","what","why","how","who","where"],
'relax':["relax"],
'location':['find','track'],
'close': ['close'],
'exit':['close','shut up','stop','exit']
}
patterns = {intent: re.compile('|'.join(keys)) for intent, keys in intent_keywords.items()}
def get_intent(message):
intents=[]
for intent, pattern in patterns.items():
# Check if the pattern occurs in the message
if pattern.search(message):
intents.append(intent)
if(len(intents)!=0):
return intents
else:
return(['default'])
def ques_check(message):
que = False
for i in ["when","what","why","how","who","where"]:
if i in message:
que = True
return que | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.