id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
3269487 | <reponame>dewoolkaridhish4/C104
import csv
with open("height-weight.csv",newline="") as f:
reader=csv.reader(f)
filedata=list(reader)
filedata.pop(0)
newdata = []
for i in range(len(filedata)):
n_num=filedata[i][1]
newdata.append(float(n_num))
n=len(newdata)
total=0
for x in newdata:
total+=x
mean=total/n
print("Mean = " +str(mean))
import csv
with open("Internet Users.csv",newline="") as f:
reader=csv.reader(f)
filedata=list(reader)
filedata.pop(0)
newdata = []
for i in range(len(filedata)):
n_num=filedata[i][1]
newdata.append(float(n_num))
n=len(newdata)
total=0
for x in newdata:
total+=x
mean=total/n
print("Mean = " +str(mean))
| StarcoderdataPython |
11368 | #!/usr/bin/env python3
import logging
import torch.nn as nn
from fairseq import checkpoint_utils
from fairseq.models import BaseFairseqModel, register_model
from pytorch_translate import rnn
from pytorch_translate.rnn import (
LSTMSequenceEncoder,
RNNDecoder,
RNNEncoder,
RNNModel,
base_architecture,
)
from pytorch_translate.tasks.pytorch_translate_task import PytorchTranslateTask
logger = logging.getLogger(__name__)
@register_model("dual_learning")
class DualLearningModel(BaseFairseqModel):
"""
An architecture to jointly train primal model and dual model by leveraging
distribution duality, which exist for both parallel data and monolingual
data.
"""
def __init__(self, args, task, primal_model, dual_model, lm_model=None):
super().__init__()
self.args = args
self.task_keys = ["primal", "dual"]
self.models = nn.ModuleDict(
{"primal": primal_model, "dual": dual_model, "lm": lm_model}
)
def forward(self, src_tokens, src_lengths, prev_output_tokens=None):
"""
If batch is monolingual, need to run beam decoding to generate
fake prev_output_tokens.
"""
# TODO: pass to dual model too
primal_encoder_out = self.models["primal"].encoder(src_tokens, src_lengths)
primal_decoder_out = self.models["primal"].decoder(
prev_output_tokens, primal_encoder_out
)
return primal_decoder_out
def max_positions(self):
return {
"primal_source": (
self.models["primal"].encoder.max_positions(),
self.models["primal"].decoder.max_positions(),
),
"dual_source": (
self.models["dual"].encoder.max_positions(),
self.models["dual"].decoder.max_positions(),
),
"primal_parallel": (
self.models["primal"].encoder.max_positions(),
self.models["primal"].decoder.max_positions(),
),
"dual_parallel": (
self.models["dual"].encoder.max_positions(),
self.models["dual"].decoder.max_positions(),
),
}
@register_model("dual_learning_rnn")
class RNNDualLearningModel(DualLearningModel):
"""Train two models for a task and its duality jointly.
This class uses RNN arch, but can be extended to take arch as an arument.
This class takes translation as a task, but the framework is intended
to be general enough to be applied to other tasks as well.
"""
def __init__(self, args, task, primal_model, dual_model, lm_model=None):
super().__init__(args, task, primal_model, dual_model, lm_model)
@staticmethod
def add_args(parser):
rnn.RNNModel.add_args(parser)
parser.add_argument(
"--unsupervised-dual",
default=False,
action="store_true",
help="Train with dual loss from monolingual data.",
)
parser.add_argument(
"--supervised-dual",
default=False,
action="store_true",
help="Train with dual loss from parallel data.",
)
@classmethod
def build_model(cls, args, task):
""" Build both the primal and dual models.
For simplicity, both models share the same arch, i.e. the same model
params would be used to initialize both models.
Support for different models/archs would be added in further iterations.
"""
base_architecture(args)
if args.sequence_lstm:
encoder_class = LSTMSequenceEncoder
else:
encoder_class = RNNEncoder
decoder_class = RNNDecoder
encoder_embed_tokens, decoder_embed_tokens = RNNModel.build_embed_tokens(
args, task.primal_src_dict, task.primal_tgt_dict
)
primal_encoder = encoder_class(
task.primal_src_dict,
embed_dim=args.encoder_embed_dim,
embed_tokens=encoder_embed_tokens,
cell_type=args.cell_type,
num_layers=args.encoder_layers,
hidden_dim=args.encoder_hidden_dim,
dropout_in=args.encoder_dropout_in,
dropout_out=args.encoder_dropout_out,
residual_level=args.residual_level,
bidirectional=bool(args.encoder_bidirectional),
)
primal_decoder = decoder_class(
src_dict=task.primal_src_dict,
dst_dict=task.primal_tgt_dict,
embed_tokens=decoder_embed_tokens,
vocab_reduction_params=args.vocab_reduction_params,
encoder_hidden_dim=args.encoder_hidden_dim,
embed_dim=args.decoder_embed_dim,
out_embed_dim=args.decoder_out_embed_dim,
cell_type=args.cell_type,
num_layers=args.decoder_layers,
hidden_dim=args.decoder_hidden_dim,
attention_type=args.attention_type,
dropout_in=args.decoder_dropout_in,
dropout_out=args.decoder_dropout_out,
residual_level=args.residual_level,
averaging_encoder=args.averaging_encoder,
)
primal_task = PytorchTranslateTask(
args, task.primal_src_dict, task.primal_tgt_dict
)
primal_model = rnn.RNNModel(primal_task, primal_encoder, primal_decoder)
if args.pretrained_forward_checkpoint:
pretrained_forward_state = checkpoint_utils.load_checkpoint_to_cpu(
args.pretrained_forward_checkpoint
)
primal_model.load_state_dict(pretrained_forward_state["model"], strict=True)
print(
f"Loaded pretrained primal model from {args.pretrained_forward_checkpoint}"
)
encoder_embed_tokens, decoder_embed_tokens = RNNModel.build_embed_tokens(
args, task.dual_src_dict, task.dual_tgt_dict
)
dual_encoder = encoder_class(
task.dual_src_dict,
embed_dim=args.encoder_embed_dim,
embed_tokens=encoder_embed_tokens,
cell_type=args.cell_type,
num_layers=args.encoder_layers,
hidden_dim=args.encoder_hidden_dim,
dropout_in=args.encoder_dropout_in,
dropout_out=args.encoder_dropout_out,
residual_level=args.residual_level,
bidirectional=bool(args.encoder_bidirectional),
)
dual_decoder = decoder_class(
src_dict=task.dual_src_dict,
dst_dict=task.dual_tgt_dict,
embed_tokens=decoder_embed_tokens,
vocab_reduction_params=args.vocab_reduction_params,
encoder_hidden_dim=args.encoder_hidden_dim,
embed_dim=args.decoder_embed_dim,
out_embed_dim=args.decoder_out_embed_dim,
cell_type=args.cell_type,
num_layers=args.decoder_layers,
hidden_dim=args.decoder_hidden_dim,
attention_type=args.attention_type,
dropout_in=args.decoder_dropout_in,
dropout_out=args.decoder_dropout_out,
residual_level=args.residual_level,
averaging_encoder=args.averaging_encoder,
)
dual_task = PytorchTranslateTask(args, task.dual_src_dict, task.dual_tgt_dict)
dual_model = rnn.RNNModel(dual_task, dual_encoder, dual_decoder)
if args.pretrained_backward_checkpoint:
pretrained_backward_state = checkpoint_utils.load_checkpoint_to_cpu(
args.pretrained_backward_checkpoint
)
dual_model.load_state_dict(pretrained_backward_state["model"], strict=True)
print(
f"Loaded pretrained dual model from {args.pretrained_backward_checkpoint}"
)
# TODO (T36875783): instantiate a langauge model
lm_model = None
return RNNDualLearningModel(args, task, primal_model, dual_model, lm_model)
| StarcoderdataPython |
4814897 | #!/usr/bin/env python
# coding: utf-8
from multiprocessing import Pool
from tqdm import tqdm
import numpy as np
test_num = 500000000
output_path = "feature_output"
base_dir = "dataset"
prob_dir = output_path
val_t_correct_index = np.load(
base_dir + "/wikikg90m_kddcup2021/processed/val_t_correct_index.npy",
mmap_mode="r")
train_hrt = np.load(
base_dir + "/wikikg90m_kddcup2021/processed/train_hrt.npy", mmap_mode="r")
val_hr = np.load(
base_dir + "/wikikg90m_kddcup2021/processed/val_hr.npy", mmap_mode="r")
val_t_candidate = np.load(
base_dir + "/wikikg90m_kddcup2021/processed/val_t_candidate.npy",
mmap_mode="r")
test_hr = np.load(
base_dir + "/wikikg90m_kddcup2021/processed/test_hr.npy",
mmap_mode="r")[:test_num]
test_t_candidate = np.load(
base_dir + "/wikikg90m_kddcup2021/processed/test_t_candidate.npy",
mmap_mode="r")[:test_num]
# HT
def f(x):
res = np.zeros_like(x)
unique, counts = np.unique(x, return_counts=True)
mapper_dict = {}
for idx, count in zip(unique, counts):
mapper_dict[idx] = count
def mp(entry):
return mapper_dict[entry]
mp = np.vectorize(mp)
return mp(x)
# valid
val_h_sorted_index = np.argsort(val_hr[:, 0], axis=0)
val_h_sorted = val_hr[val_h_sorted_index]
val_h_sorted_index_part = []
last_start = -1
tmp = []
for i in tqdm(range(len(val_h_sorted) + 1)):
if i == len(val_h_sorted):
val_h_sorted_index_part.append(tmp)
break
if val_h_sorted[i][0] > last_start:
if last_start != -1:
val_h_sorted_index_part.append(tmp)
tmp = []
last_start = val_h_sorted[i][0]
tmp.append(i)
val_h_sorted_index_arr = [
np.array(
idx, dtype="int32") for idx in val_h_sorted_index_part
]
inputs = [
val_t_candidate[val_h_sorted_index[arr]] for arr in val_h_sorted_index_arr
]
mapped_array = None
with Pool(20) as p:
mapped_array = list(tqdm(p.imap(f, inputs), total=len(inputs)))
ht_feat = np.zeros_like(val_t_candidate)
for (arr, mapped) in zip(val_h_sorted_index_arr, mapped_array):
ht_feat[val_h_sorted_index[arr]] = mapped
np.save("%s/valid_feats/ht_feat.npy" % output_path, ht_feat.astype(np.float32))
# test
test_h_sorted_index = np.argsort(test_hr[:, 0], axis=0)
test_h_sorted = test_hr[test_h_sorted_index]
test_h_sorted_index_part = []
last_start = -1
tmp = []
for i in tqdm(range(len(test_h_sorted) + 1)):
if i == len(test_h_sorted):
test_h_sorted_index_part.append(tmp)
break
if test_h_sorted[i][0] > last_start:
if last_start != -1:
test_h_sorted_index_part.append(tmp)
tmp = []
last_start = test_h_sorted[i][0]
tmp.append(i)
test_h_sorted_index_arr = [
np.array(
idx, dtype="int32") for idx in test_h_sorted_index_part
]
inputs = [
test_t_candidate[test_h_sorted_index[arr]]
for arr in test_h_sorted_index_arr
]
mapped_array = None
with Pool(20) as p:
mapped_array = list(tqdm(p.imap(f, inputs), total=len(inputs)))
ht_feat = np.zeros_like(test_t_candidate)
for (arr, mapped) in zip(test_h_sorted_index_arr, mapped_array):
ht_feat[test_h_sorted_index[arr]] = mapped
np.save("%s/test_feats/ht_feat.npy" % output_path, ht_feat.astype(np.float32))
| StarcoderdataPython |
1670405 | <reponame>codebyravi/otter
"""Code related to gathering data to inform convergence."""
import re
from functools import partial
from effect import catch, parallel
from effect.do import do, do_return
from pyrsistent import pmap
from toolz.curried import filter, groupby, keyfilter, map
from toolz.dicttoolz import assoc, get_in, merge
from toolz.functoolz import compose, curry, identity
from toolz.itertoolz import concat
from otter.auth import NoSuchEndpoint
from otter.cloud_client import (
list_servers_details_all,
list_stacks_all,
service_request
)
from otter.cloud_client.clb import (
CLBNotFoundError,
get_clb_health_monitor,
get_clb_node_feed,
get_clb_nodes,
get_clbs
)
from otter.constants import ServiceType
from otter.convergence.model import (
CLB,
CLBNode,
CLBNodeCondition,
HeatStack,
NovaServer,
RCv3Description,
RCv3Node,
get_stack_tag_for_group,
group_id_from_metadata
)
from otter.indexer import atom
from otter.models.cass import CassScalingGroupServersCache
from otter.util.http import append_segments
from otter.util.retry import (
exponential_backoff_interval, retry_effect, retry_times)
from otter.util.timestamp import timestamp_to_epoch
def _retry(eff):
"""Retry an effect with a common policy."""
return retry_effect(
eff, retry_times(5), exponential_backoff_interval(2))
def get_all_server_details(changes_since=None, batch_size=100):
"""
Return all servers of a tenant.
:param datetime changes_since: Get changes since this time. Must be UTC
:param int batch_size: number of servers to fetch *per batch*.
:return: list of server objects as returned by Nova.
NOTE: This really screams to be a independent fxcloud-type API
"""
query = {'limit': [str(batch_size)]}
if changes_since is not None:
query['changes-since'] = ['{0}Z'.format(changes_since.isoformat())]
return list_servers_details_all(query)
def get_all_scaling_group_servers(changes_since=None,
server_predicate=identity):
"""
Return tenant's servers that belong to any scaling group as
{group_id: [server1, server2]} ``dict``. No specific ordering is guaranteed
:param datetime changes_since: Get server since this time. Must be UTC
:param server_predicate: function of server -> bool that determines whether
the server should be included in the result.
:return: dict mapping group IDs to lists of Nova servers.
"""
def has_group_id(s):
return 'metadata' in s and isinstance(s['metadata'], dict)
def group_id(s):
return group_id_from_metadata(s['metadata'])
servers_apply = compose(keyfilter(lambda k: k is not None),
groupby(group_id),
filter(server_predicate),
filter(has_group_id))
return get_all_server_details(changes_since).on(servers_apply)
def mark_deleted_servers(old, new):
"""
Given dictionaries containing old and new servers, return a list of all
servers, with the deleted ones annotated with a status of DELETED.
:param list old: List of old servers
:param list new: List of latest servers
:return: List of updated servers
"""
def sdict(servers):
return {s['id']: s for s in servers}
old = sdict(old)
new = sdict(new)
deleted_ids = set(old.keys()) - set(new.keys())
for sid in deleted_ids:
old[sid] = assoc(old[sid], "status", "DELETED")
return merge(old, new).values()
@curry
def server_of_group(group_id, server):
"""
Return True if server belongs to group_id. False otherwise
"""
return group_id_from_metadata(server.get('metadata', {})) == group_id
@do
def get_scaling_group_servers(tenant_id, group_id, now,
all_as_servers=get_all_scaling_group_servers,
all_servers=get_all_server_details,
cache_class=CassScalingGroupServersCache):
"""
Get a group's servers taken from cache if it exists. Updates cache
if it is empty from newly fetched servers
# NOTE: This function takes tenant_id even though the whole effect is
# scoped on the tenant because cache calls require tenant_id. Should
# they also not take tenant_id and work on the scope?
:return: Servers as list of dicts
:rtype: Effect
"""
cache = cache_class(tenant_id, group_id)
cached_servers, last_update = yield cache.get_servers(False)
if last_update is None:
servers = (yield all_as_servers()).get(group_id, [])
else:
current = yield all_servers()
servers = mark_deleted_servers(cached_servers, current)
servers = list(filter(server_of_group(group_id), servers))
yield do_return(servers)
def get_all_stacks(stack_tag=None):
query = {}
if stack_tag is not None:
query['tags'] = stack_tag
return list_stacks_all(query)
def get_scaling_group_stacks(group_id, get_all_stacks=get_all_stacks):
return get_all_stacks(stack_tag=get_stack_tag_for_group(group_id))
@do
def get_clb_contents():
"""
Get Rackspace Cloud Load Balancer contents as list of `CLBNode`. CLB
health monitor information is also returned as a pmap of :obj:`CLB` objects
mapped on LB ID.
:return: Effect of (``list`` of :obj:`CLBNode`, `pmap` of :obj:`CLB`)
:rtype: :obj:`Effect`
"""
# If we get a CLBNotFoundError while fetching feeds, we should throw away
# all nodes related to that load balancer, because we don't want to act on
# data that we know is invalid/outdated (for example, if we can't fetch a
# feed because CLB was deleted, we don't want to say that we have a node in
# DRAINING with draining time of 0; we should just say that the node is
# gone).
def gone(r):
return catch(CLBNotFoundError, lambda exc: r)
lb_ids = [lb['id'] for lb in (yield _retry(get_clbs()))]
node_reqs = [_retry(get_clb_nodes(lb_id).on(error=gone([])))
for lb_id in lb_ids]
healthmon_reqs = [
_retry(get_clb_health_monitor(lb_id).on(error=gone(None)))
for lb_id in lb_ids]
all_nodes_hms = yield parallel(node_reqs + healthmon_reqs)
all_nodes, hms = all_nodes_hms[:len(lb_ids)], all_nodes_hms[len(lb_ids):]
lb_nodes = {
lb_id: [CLBNode.from_node_json(lb_id, node)
for node in nodes]
for lb_id, nodes in zip(lb_ids, all_nodes)}
clbs = {
str(lb_id): CLB(bool(health_mon))
for lb_id, health_mon in zip(lb_ids, hms) if health_mon is not None}
draining = [n for n in concat(lb_nodes.values())
if n.description.condition == CLBNodeCondition.DRAINING]
feeds = yield parallel(
[_retry(get_clb_node_feed(n.description.lb_id, n.node_id).on(
error=gone(None)))
for n in draining]
)
nodes_to_feeds = dict(zip(draining, feeds))
deleted_lbs = set([
node.description.lb_id
for (node, feed) in nodes_to_feeds.items() if feed is None])
def update_drained_at(node):
feed = nodes_to_feeds.get(node)
if node.description.lb_id in deleted_lbs:
return None
if feed is not None:
node.drained_at = extract_clb_drained_at(feed)
return node
nodes = map(update_drained_at, concat(lb_nodes.values()))
yield do_return((
list(filter(bool, nodes)),
pmap(keyfilter(lambda k: k not in deleted_lbs, clbs))))
_DRAINING_CREATED_RE = (
"^Node successfully created with address: '.+', port: '\d+', "
"condition: 'DRAINING', weight: '\d+'$")
_DRAINING_UPDATED_RE = (
"^Node successfully updated with address: '.+', port: '\d+', "
"weight: '\d+', condition: 'DRAINING'$")
_DRAINING_RE = re.compile(
"({})|({})".format(_DRAINING_UPDATED_RE, _DRAINING_CREATED_RE))
def extract_clb_drained_at(feed):
"""
Extract time when node was changed to DRAINING from a CLB atom feed. Will
return node's creation time if node was created with DRAINING. Return None
if couldnt find for any reason.
:param list feed: ``list`` of atom entry :class:`Elements`
:returns: drained_at EPOCH in seconds
:rtype: float
"""
for entry in feed:
if _DRAINING_RE.match(atom.summary(entry)):
return timestamp_to_epoch(atom.updated(entry))
return None
def get_rcv3_contents():
"""
Get Rackspace Cloud Load Balancer contents as list of `RCv3Node`.
"""
eff = service_request(ServiceType.RACKCONNECT_V3, 'GET',
'load_balancer_pools')
def on_listing_pools(lblist_result):
_, body = lblist_result
return parallel([
service_request(ServiceType.RACKCONNECT_V3, 'GET',
append_segments('load_balancer_pools',
lb_pool['id'], 'nodes')).on(
partial(on_listing_nodes,
RCv3Description(lb_id=lb_pool['id'])))
for lb_pool in body
])
def on_listing_nodes(rcv3_description, lbnodes_result):
_, body = lbnodes_result
return [
RCv3Node(node_id=node['id'], description=rcv3_description,
cloud_server_id=get_in(('cloud_server', 'id'), node))
for node in body
]
return eff.on(on_listing_pools).on(
success=compose(list, concat),
error=catch(NoSuchEndpoint, lambda _: []))
def get_all_launch_server_data(
tenant_id,
group_id,
now,
get_scaling_group_servers=get_scaling_group_servers,
get_clb_contents=get_clb_contents,
get_rcv3_contents=get_rcv3_contents):
"""
Gather all launch_server data relevant for convergence w.r.t given time,
in parallel where possible.
Returns an Effect of {'servers': [NovaServer], 'lb_nodes': [LBNode],
'lbs': pmap(LB_ID -> CLB)}.
"""
return parallel(
[get_scaling_group_servers(tenant_id, group_id, now)
.on(map(NovaServer.from_server_details_json)).on(list),
get_clb_contents(),
get_rcv3_contents()]
).on(lambda (servers, clb_nodes_and_clbs, rcv3_nodes): {
'servers': servers,
'lb_nodes': clb_nodes_and_clbs[0] + rcv3_nodes,
'lbs': clb_nodes_and_clbs[1]
})
def get_all_launch_stack_data(
tenant_id,
group_id,
now,
get_scaling_group_stacks=get_scaling_group_stacks):
"""
Gather all launch_stack data relevant for convergence w.r.t given time
Returns an Effect of {'stacks': [HeatStack]}.
"""
eff = (get_scaling_group_stacks(group_id)
.on(map(HeatStack.from_stack_details_json)).on(list)
.on(lambda stacks: {'stacks': stacks}))
return eff
| StarcoderdataPython |
3329307 | <reponame>koshiishide/calendar
from django import template
import datetime
register = template.Library()
#0,1,2
@register.simple_tag
@register.filter
def test2(day):
tmp=day.weekday()
if ((tmp==5)or(tmp==6)):
return 1
else:
return 0
@register.simple_tag
@register.filter
def test(a):
dict = {1:0,2:0}
list = a
for k in list:
try:
if (int(k.type_name) >1):
dict[1]+=1
dict[2]+=1
elif(int(k.type_name) == 1):
dict[1]+=1
else:
continue
except:
continue
if (((dict[1])>=2) and (dict[2]>=2)):
return 0
else:
return 1 | StarcoderdataPython |
1600854 | from copy import copy
from random import Random
import numpy as np
class SimulationPolicy(object):
def __init__(self, random_state, **kwargs):
self.local_random = Random()
self.local_random.setstate(random_state)
def get_random_state(self):
return self.local_random.getstate()
class RandomSimulationPolicy(SimulationPolicy):
def __init__(self, random_state, **kwargs):
super().__init__(random_state)
def choose_action(self, state, possible_actions):
available_acts = tuple(possible_actions)
chosen_action = self.local_random.choice(available_acts)
return chosen_action
| StarcoderdataPython |
1605477 | <gh_stars>0
import pytorch_lightning as pl
from {{cookiecutter.project_name}} import models
parser = ArgumentParser(description="{{cookiecutter.project_name}} model training script")
parser.add_argument(
"--epochs",
type=int,
default=50,
metavar="N",
help="number of epochs to train (default: 50)",
)
parser.add_argument(
"--model",
type=str,
default="",
help="Model specification, refer to base.py",
)
parser.add_argument(
"--cpu", type=bool, default=False, help="Force CPU model (default: False"
)
parser.add_argument(
"--sweep",
type=bool,
default=False,
help="If using wandb for a sweep (default: False",
)
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--dataset", type=str, default="devset.h5")
parser.add_argument("--clip", type=float, default=0.)
# this grabs the model choice without running parse_args
temp_args, _ = parser.parse_known_args()
# pick out the model we want to train
model_choice = models.get(temp_args.model)
logger = pl.loggers.TensorBoardLogger()
| StarcoderdataPython |
135701 | <filename>examples/02_decoding/plot_haxby_space_net.py
"""
Decoding with SpaceNet: face vs house object recognition
=========================================================
Here is a simple example of decoding with a SpaceNet prior (i.e Graph-Net,
TV-l1, etc.), reproducing the Haxby 2001 study on a face vs house
discrimination task.
See also the SpaceNet documentation: :ref:`space_net`.
"""
##############################################################################
# Load the Haxby dataset
# ------------------------
from nilearn.datasets import fetch_haxby
data_files = fetch_haxby()
# Load behavioral data
import numpy as np
behavioral = np.recfromcsv(data_files.session_target[0], delimiter=" ")
# Restrict to face and house conditions
conditions = behavioral['labels']
condition_mask = np.logical_or(conditions == b"face",
conditions == b"house")
# Split data into train and test samples, using the chunks
condition_mask_train = np.logical_and(condition_mask, behavioral['chunks'] <= 6)
condition_mask_test = np.logical_and(condition_mask, behavioral['chunks'] > 6)
# Apply this sample mask to X (fMRI data) and y (behavioral labels)
# Because the data is in one single large 4D image, we need to use
# index_img to do the split easily
from nilearn.image import index_img
func_filenames = data_files.func[0]
X_train = index_img(func_filenames, condition_mask_train)
X_test = index_img(func_filenames, condition_mask_test)
y_train = conditions[condition_mask_train]
y_test = conditions[condition_mask_test]
# Compute the mean epi to be used for the background of the plotting
from nilearn.image import mean_img
background_img = mean_img(func_filenames)
##############################################################################
# Fit SpaceNet with a Graph-Net penalty
# --------------------------------------
from nilearn.decoding import SpaceNetClassifier
# Fit model on train data and predict on test data
decoder = SpaceNetClassifier(memory="nilearn_cache", penalty='graph-net')
decoder.fit(X_train, y_train)
y_pred = decoder.predict(X_test)
accuracy = (y_pred == y_test).mean() * 100.
print("Graph-net classification accuracy : %g%%" % accuracy)
#############################################################################
# Visualization of Graph-net weights
# ------------------------------------
from nilearn.plotting import plot_stat_map, show
coef_img = decoder.coef_img_
plot_stat_map(coef_img, background_img,
title="graph-net: accuracy %g%%" % accuracy,
cut_coords=(-52, -5), display_mode="yz")
# Save the coefficients to a nifti file
coef_img.to_filename('haxby_graph-net_weights.nii')
##############################################################################
# Now Fit SpaceNet with a TV-l1 penalty
# --------------------------------------
decoder = SpaceNetClassifier(memory="nilearn_cache", penalty='tv-l1')
decoder.fit(X_train, y_train)
y_pred = decoder.predict(X_test)
accuracy = (y_pred == y_test).mean() * 100.
print("TV-l1 classification accuracy : %g%%" % accuracy)
#############################################################################
# Visualization of TV-L1 weights
# -------------------------------
coef_img = decoder.coef_img_
plot_stat_map(coef_img, background_img,
title="tv-l1: accuracy %g%%" % accuracy,
cut_coords=(-52, -5), display_mode="yz")
# Save the coefficients to a nifti file
coef_img.to_filename('haxby_tv-l1_weights.nii')
show()
###################################
# We can see that the TV-l1 penalty is 3 times slower to converge and
# gives the same prediction accuracy. However, it yields much
# cleaner coefficient maps
| StarcoderdataPython |
1720210 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Run with:
# PYTHONPATH=.. python3 blocking_s3_put_get.py
#
"""
Boilerplate for some s3 utility code.
create_configured_boto3_session groks environment variables and profile to
create a Session configured with the required AWS access key ID/password/token
The most interesting is probably delete_bucket() which first deletes all the
objects in the bucket by first listing using a Paginator then iterating over
the pages to retrieve the maximum result set to enable batched object deletes.
The get_object/put_object wrappers parse s3 URI of the form s3://<bucket>/<key>
but the main use is to make it easy to launch those calls from Executors
"""
import sys
assert sys.version_info >= (3, 8) # Bomb out if not running Python3.8
import os
from utils.logger import init_logging
logger = init_logging(log_name=__name__)
def create_configured_session(module):
"""
boto3.client() doesn't have a (direct) way to set profile_name
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html#boto3.session.Session.client
so we first create a Session instance and use that to create the client.
The precedence below is taken from from
https://docs.aws.amazon.com/cli/latest/topic/config-vars.html#id1
AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY
AWS_PROFILE
"""
name = module.__name__
aws_access_key_id = os.environ.get("AWS_ACCESS_KEY_ID")
aws_secret_access_key = os.environ.get("AWS_SECRET_ACCESS_KEY")
aws_session_token = os.environ.get("AWS_SESSION_TOKEN")
aws_profile = os.environ.get("AWS_PROFILE")
if aws_access_key_id and aws_secret_access_key:
logger.info(f"Creating {name} Session from AWS_ACCESS_KEY_ID env var")
session = module.Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token
)
else:
available_profiles = module.Session().available_profiles
profile = aws_profile if aws_profile else os.getlogin()
profile = profile if profile in available_profiles else "minio"
if profile in available_profiles:
logger.info(f"Creating {name} Session from profile: {profile}")
session = module.Session(profile_name=profile)
else:
logger.info(f"Creating default {name} Session")
session = module.Session()
return session
def parse_s3_uri(s3_uri):
"""
Given a URI of the form s3://<bucket>/<key> parse into bucket, key tuple
For a trivial parse this is faster than using urlparse(s3_uri)
"""
return s3_uri.replace("s3://", "").split("/", 1)
def get_object(s3, s3_uri):
bucket, key = parse_s3_uri(s3_uri)
try:
# First get get_object response from s3
obj = s3.get_object(Bucket=bucket, Key=key)
# Then read actual object from StreamingBody response.
# https://botocore.amazonaws.com/v1/documentation/api/latest/reference/response.html#botocore.response.StreamingBody
# TODO make exception handling more useful
#value = obj["Body"].read().decode("utf-8")
value = obj["Body"].read()
return value
except s3.exceptions.NoSuchBucket as e:
logger.info(e)
except s3.exceptions.NoSuchKey as e:
logger.info(e)
except Exception as e:
code = type(e).__name__
messsage = f"get_object caused unhandled exception: {code}: {str(e)}"
logger.error(messsage)
return b""
def put_object(s3, s3_uri, body):
bucket, key = parse_s3_uri(s3_uri)
# TODO make exception handling more useful
try:
s3.put_object(Body=body, Bucket=bucket, Key=key)
except s3.exceptions.NoSuchBucket as e:
logger.info(e)
except Exception as e:
code = type(e).__name__
messsage = f"put_object caused unhandled exception: {code}: {str(e)}"
logger.error(messsage)
def create_bucket(s3, bucket_name):
try:
logger.info("Creating bucket {} for running test".format(bucket_name))
response = s3.create_bucket(
Bucket=bucket_name
)
except s3.exceptions.BucketAlreadyExists as e:
logger.info(e)
except s3.exceptions.BucketAlreadyOwnedByYou as e:
logger.info(e)
except Exception as e:
code = type(e).__name__
messsage = f"create_bucket caused unhandled exception: {code}: {str(e)}"
logger.error(messsage)
def purge_and_delete_bucket(s3, bucket_name):
# Delete the objects we created then the bucket to tidy things up up
try:
"""
To deal with lots of response values from list_objects_v2 use a
paginator to make it easy to iterate through batches of results.
"""
paginator = s3.get_paginator("list_objects_v2")
pages = paginator.paginate(Bucket=bucket_name)
for page in pages:
contents = page.get("Contents", [])
"""
Can't just use "Contents" list as "Objects" value in delete_objects
request so use list comprehension to create valid "Objects" list.
"""
delete_list = [{"Key": obj["Key"]} for obj in contents]
if delete_list:
s3.delete_objects(
Bucket=bucket_name,
Delete={"Objects": delete_list, "Quiet": True}
)
logger.info("Deleting bucket {}".format(bucket_name))
response = s3.delete_bucket(Bucket=bucket_name)
except s3.exceptions.NoSuchBucket as e:
logger.info(e)
except Exception as e:
code = type(e).__name__
messsage = f"purge_and_delete_bucket caused unhandled exception: {code}: {str(e)}"
logger.error(messsage)
| StarcoderdataPython |
3321106 | import logging
from typing import List
from bson.objectid import ObjectId
from pymongo import ReturnDocument
from core.config import (
DOCTYPE_CONTRACT,
ERROR_MONGODB_DELETE,
ERROR_MONGODB_UPDATE,
)
from db.mongo import get_collection
from models.contract import Contract, ContractCreate, ContractInDB, ContractUpdate
from crud.utils import (
delete_empty_keys,
fields_in_create,
fields_in_update,
raise_bad_request,
raise_not_found,
raise_server_error,
)
async def find_many(limit: int, skip: int):
collection = get_collection(DOCTYPE_CONTRACT)
contracts: List[Contract] = []
cursor = collection.find({}, limit=limit, skip=skip)
async for row in cursor:
contracts.append(row)
return contracts
async def find_many_by_license(slug: str, limit: int, skip: int):
collection = get_collection(DOCTYPE_CONTRACT)
contracts: List[Contract] = []
cursor = collection.find({"license": slug}, limit=limit, skip=skip)
async for row in cursor:
contracts.append(row)
return contracts
async def find_one(slug: str, id: str):
collection = get_collection(DOCTYPE_CONTRACT)
return await collection.find_one({"license": slug, "_id": ObjectId(id)})
async def insert(slug: str, client: str, data: ContractCreate):
try:
contract = ContractInDB(**data.dict(), license=slug, clientId=client)
props = fields_in_create(contract)
collection = get_collection(DOCTYPE_CONTRACT)
rs = await collection.insert_one(props)
if rs.inserted_id:
return await collection.find_one({"_id": rs.inserted_id})
except Exception as e:
raise_server_error(str(e))
async def update(slug: str, id: str, data: ContractUpdate):
try:
props = delete_empty_keys(data)
collection = get_collection(DOCTYPE_CONTRACT)
contract = await collection.find_one_and_update(
{"_id": ObjectId(id), "license": slug},
{"$set": fields_in_update(props)},
return_document=ReturnDocument.AFTER
)
if contract:
return contract
except Exception as e:
raise_server_error(str(e))
async def delete(slug: str, id: str):
try:
collection = get_collection(DOCTYPE_CONTRACT)
contract = await collection.find_one_and_delete(
{"_id": ObjectId(id), "license": slug},
{"_id": True}
)
if contract:
return {"message": "Contract has been deleted."}
except Exception as e:
raise_server_error(str(e))
| StarcoderdataPython |
55786 | from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import pointnet2_utils
class StackSAModuleMSG(nn.Module):
def __init__(self, *, radii: List[float], nsamples: List[int], mlps: List[List[int]],
use_xyz: bool = True, pool_method='max_pool'):
"""
Args:
radii: list of float, list of radii to group with
nsamples: list of int, number of samples in each ball query
mlps: list of list of int, spec of the pointnet before the global pooling for each scale
use_xyz:
pool_method: max_pool / avg_pool
"""
super().__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz))
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
shared_mlps = []
for k in range(len(mlp_spec) - 1):
shared_mlps.extend([
nn.Conv2d(mlp_spec[k], mlp_spec[k + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp_spec[k + 1]),
nn.ReLU()
])
self.mlps.append(nn.Sequential(*shared_mlps))
self.pool_method = pool_method
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0)
def forward(self, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features=None, empty_voxel_set_zeros=True):
"""
:param xyz: (N1 + N2 ..., 3) tensor of the xyz coordinates of the features
:param xyz_batch_cnt: (batch_size), [N1, N2, ...]
:param new_xyz: (M1 + M2 ..., 3)
:param new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
:param features: (N1 + N2 ..., C) tensor of the descriptors of the the features
:return:
new_xyz: (M1 + M2 ..., 3) tensor of the new features' xyz
new_features: (M1 + M2 ..., \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
for k in range(len(self.groupers)):
new_features, ball_idxs = self.groupers[k](
xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features
) # (M1 + M2, C, nsample)
new_features = new_features.permute(1, 0, 2).unsqueeze(dim=0) # (1, C, M1 + M2 ..., nsample)
new_features = self.mlps[k](new_features) # (1, C, M1 + M2 ..., nsample)
if self.pool_method == 'max_pool':
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
elif self.pool_method == 'avg_pool':
new_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
else:
raise NotImplementedError
new_features = new_features.squeeze(dim=0).permute(1, 0) # (M1 + M2 ..., C)
new_features_list.append(new_features)
new_features = torch.cat(new_features_list, dim=1) # (M1 + M2 ..., C)
return new_xyz, new_features
class StackSAModulePyramid(nn.Module):
def __init__(self, *, mlps: List[List[int]], nsamples, use_xyz: bool = True, pool_method='max_pool'):
"""
Args:
nsamples: list of int, number of samples in each ball query
mlps: list of list of int, spec of the pointnet before the global pooling for each scale
use_xyz:
pool_method: max_pool / avg_pool
"""
super().__init__()
self.num_pyramid_levels = len(nsamples)
assert len(nsamples) == len(mlps)
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(self.num_pyramid_levels):
nsample = nsamples[i]
self.groupers.append(pointnet2_utils.QueryAndGroupPyramid(nsample, use_xyz=use_xyz))
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
shared_mlps = []
for k in range(len(mlp_spec) - 1):
shared_mlps.extend([
nn.Conv2d(mlp_spec[k], mlp_spec[k + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp_spec[k + 1]),
nn.ReLU()
])
self.mlps.append(nn.Sequential(*shared_mlps))
self.pool_method = pool_method
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0)
def forward(self, xyz, xyz_batch_cnt, new_xyz_list, new_xyz_r_list, new_xyz_batch_cnt_list, features=None, batch_size=None, num_rois=None):
"""
:param xyz: (N1 + N2 ..., 3) tensor of the xyz coordinates of the features
:param xyz_batch_cnt: (batch_size), [N1, N2, ...]
:param new_xyz_list: [(B, N x grid_size^3, 3)]
:param new_xyz_r_list: [(B, N x grid_size^3, 1)]
:param new_xyz_batch_cnt_list: (batch_size)
:param features: (N1 + N2 ..., C) tensor of the descriptors of the the features
:return:
new_xyz: (M1 + M2 ..., 3) tensor of the new features' xyz
new_features: (M1 + M2 ..., \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
for i in range(self.num_pyramid_levels):
new_xyz = new_xyz_list[i]
new_xyz_r = new_xyz_r_list[i]
new_xyz_batch_cnt = new_xyz_batch_cnt_list[i]
new_xyz = new_xyz.view(-1, 3).contiguous()
new_xyz_r = new_xyz_r.view(-1, 1).contiguous()
new_features, _ = self.groupers[i](
xyz, xyz_batch_cnt, new_xyz, new_xyz_r, new_xyz_batch_cnt, features
)
new_features = new_features.permute(1, 0, 2).unsqueeze(dim=0) # (1, C, M1 + M2 ..., nsample)
new_features = self.mlps[i](new_features) # (1, C, M1 + M2 ..., nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
new_features = new_features.squeeze(dim=0).permute(1, 0) # (M1 + M2 ..., C)
num_features = new_features.shape[1]
new_features = new_features.view(batch_size * num_rois, -1, num_features)
new_features_list.append(new_features)
new_features = torch.cat(new_features_list, dim=1) # (B x N, \sum(grid_size^3), C)
return new_features
class StackSAModuleMSGDeform(nn.Module):
"""
Set abstraction with single radius prediction for each roi
"""
def __init__(self, *, temperatures: List[float], div_coefs: List[float], radii: List[float],
nsamples: List[int], predict_nsamples: List[int],
mlps: List[List[int]], pmlps: List[List[int]], pfcs: List[List[int]],
grid_size: int, use_xyz: bool = True):
"""
:param radii: list of float, list of radii to group with
:param nsamples: list of int, number of samples in each ball query
:param mlps: list of list of int, spec of the pointnet before the global pooling for each scale
:param use_xyz:
:param pool_method: max_pool / avg_pool
"""
super().__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.grid_size = grid_size
self.MIN_R = 0.01
self.radii_list = radii
self.div_coef_list = div_coefs
self.norm_groupers = nn.ModuleList()
self.deform_groupers = nn.ModuleList()
self.feat_mlps = nn.ModuleList()
self.predict_mlps = nn.ModuleList()
self.predict_fcs = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
predict_nsample = predict_nsamples[i]
temperature = temperatures[i]
self.norm_groupers.append(
pointnet2_utils.QueryAndGroup(radius, predict_nsample, use_xyz=use_xyz)
)
self.deform_groupers.append(
pointnet2_utils.QueryAndGroupDeform(temperature, nsample, use_xyz=use_xyz)
)
mlp_spec = mlps[i]
predict_mlp_spec = pmlps[i]
if use_xyz:
mlp_spec[0] += 3
predict_mlp_spec[0] += 3
self.feat_mlps.append(self._make_mlp_layer(mlp_spec))
self.predict_mlps.append(self._make_mlp_layer(predict_mlp_spec))
fc_spec = pfcs[i]
self.predict_fcs.append(self._make_fc_layer(fc_spec))
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_mlp_layer(self, mlp_spec):
mlps = []
for i in range(len(mlp_spec) - 1):
mlps.extend([
nn.Conv2d(mlp_spec[i], mlp_spec[i + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp_spec[i + 1]),
nn.ReLU()
])
return nn.Sequential(*mlps)
def _make_fc_layer(self, fc_spec):
assert len(fc_spec) == 2
return nn.Linear(fc_spec[0], fc_spec[1], bias = True)
def forward(self, xyz, xyz_batch_cnt, rois, roi_features, features=None, temperature_decay=None):
"""
:param xyz: (N1 + N2 ..., 3) tensor of the xyz coordinates of the features
:param xyz_batch_cnt: (batch_size), [N1, N2, ...]
:param rois: (B, num_rois, grid_size^3, 3) roi grid points
:param roi_features: (B, num_rois, C) roi features
:param features: (N1 + N2 ..., C) tensor of the descriptors of the the features
:return:
new_xyz: (M1 + M2 ..., 3) tensor of the new features' xyz
new_features: (M1 + M2 ..., \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
batch_size = rois.shape[0]
num_rois = rois.shape[1]
new_xyz = rois.view(batch_size, -1, 3).contiguous()
new_xyz_batch_cnt = new_xyz.new_full((batch_size), new_xyz.shape[1]).int()
new_xyz = new_xyz.view(-1, 3).contiguous()
new_features_list = []
for k in range(len(self.norm_groupers)):
# radius prediction
predicted_features, ball_idxs = self.norm_groupers[k](
xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features
) # (M, C, nsample)
predicted_features = predicted_features.permute(1, 0, 2).unsqueeze(dim=0) # (1, C, M, nsample)
predicted_features = self.predict_mlps[k](predicted_features) # (1, C, M, nsample)
predicted_features = F.max_pool2d(
predicted_features, kernel_size=[1, predicted_features.size(3)]
).squeeze(dim=-1) # (1, C, M)
# M = batch_size * num_rois * grid_size^3
predicted_features = predicted_features.squeeze(0).permute(0, 1).contiguous() # (M, C)
num_predicted_features = predicted_features.shape[1]
predicted_features = predicted_features.view(batch_size, num_rois, self.grid_size ** 3, num_predicted_features)
predicted_features = predicted_features.view(batch_size, num_rois, -1).contiguous()
predicted_residual_r = self.predict_fcs[k](torch.cat([predicted_features, roi_features], dim = 2)) # (batch_size, num_rois, C -> 1)
new_xyz_r = predicted_residual_r / self.div_coef_list[k] + self.radii_list[k]
# constrain predicted radius above MIN_R
new_xyz_r = torch.clamp(new_xyz_r, min = self.MIN_R)
new_xyz_r = new_xyz_r.unsqueeze(2).repeat(1, 1, self.grid_size ** 3, 1) # (batch_size, num_rois, grid_size^3, 1)
new_xyz_r = new_xyz_r.view(-1, 1).contiguous()
# feature extraction
# new_features (M, C, nsample) weights (M, nsample)
new_features, new_weights, ball_idxs = self.deform_groupers[k](
xyz, xyz_batch_cnt, new_xyz, new_xyz_r, new_xyz_batch_cnt, features, temperature_decay
)
new_features = new_features.permute(1, 0, 2).unsqueeze(dim=0) # (1, C, M, nsample)
new_features = self.feat_mlps[k](new_features) # (1, C, M, nsample)
# multiply after mlps
new_weights = new_weights.unsqueeze(0).unsqueeze(0) # (1, 1, M, nsample)
new_features = new_weights * new_features
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
new_features = torch.cat(new_features_list, dim=1) # (M1 + M2 ..., C)
return new_xyz, new_features
class StackPointnetFPModule(nn.Module):
def __init__(self, *, mlp: List[int]):
"""
Args:
mlp: list of int
"""
super().__init__()
shared_mlps = []
for k in range(len(mlp) - 1):
shared_mlps.extend([
nn.Conv2d(mlp[k], mlp[k + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp[k + 1]),
nn.ReLU()
])
self.mlp = nn.Sequential(*shared_mlps)
def forward(self, unknown, unknown_batch_cnt, known, known_batch_cnt, unknown_feats=None, known_feats=None):
"""
Args:
unknown: (N1 + N2 ..., 3)
known: (M1 + M2 ..., 3)
unknow_feats: (N1 + N2 ..., C1)
known_feats: (M1 + M2 ..., C2)
Returns:
new_features: (N1 + N2 ..., C_out)
"""
dist, idx = pointnet2_utils.three_nn(unknown, unknown_batch_cnt, known, known_batch_cnt)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=-1, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(known_feats, idx, weight)
if unknown_feats is not None:
new_features = torch.cat([interpolated_feats, unknown_feats], dim=1) # (N1 + N2 ..., C2 + C1)
else:
new_features = interpolated_feats
new_features = new_features.permute(1, 0)[None, :, :, None] # (1, C, N1 + N2 ..., 1)
new_features = self.mlp(new_features)
new_features = new_features.squeeze(dim=0).squeeze(dim=-1).permute(1, 0) # (N1 + N2 ..., C)
return new_features
| StarcoderdataPython |
3290530 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
test_karnickel
~~~~~~~~~~~~~~
Test for karnickel, AST macros for Python.
:copyright: Copyright 2010, 2011 by <NAME>.
:license: BSD, see LICENSE for details.
"""
import ast
from textwrap import dedent
from karnickel import *
def raises(exc, func, *args, **kwds):
"""Utility: Make sure the given exception is raised."""
try:
func(*args, **kwds)
except exc:
return True
else:
raise AssertionError('%s did not raise %s' % (func, exc))
@macro
def foo(x, y):
x = 2*y
@macro
def bar():
pass
test_macros = parse_macros('''
import os # unrelated
def not_a_macro():
pass
@macro
def add(i, j, k):
i + j + k
@macro
def set_x(o):
setattr(o, 'x', 1)
@macro
def assign(name, value):
name = value
@macro
def do_while(cond):
while True:
__body__
if not cond: break
''')
def expand_in(code):
ex = Expander(None, test_macros)
tree = ast.parse(code)
new_tree = ex.visit(tree)
code = compile(new_tree, '<test>', 'exec')
ns = {}
exec code in ns
return ns
def test_macro_decorator():
@macro
def test():
pass
# test that a function marked as "macro" can't be called as an
# ordinary function
assert raises(RuntimeError, test)
def test_parse():
# only functions decorated with @macro are macros
assert 'not_a_macro' not in test_macros
# test categorization
assert isinstance(test_macros['add'], ExprMacroDef)
assert isinstance(test_macros['assign'], BlockMacroDef)
assert isinstance(test_macros['do_while'], BlockMacroDef)
# test __body__ presence
assert not test_macros['assign'].has_body
assert test_macros['do_while'].has_body
# invalid macro definitions
assert raises(MacroDefError, parse_macros, dedent('''
@macro
def foo(x, y=1): pass
'''))
def test_import_from():
ns = expand_in(dedent('''
from test_karnickel.__macros__ import foo
foo(a, 21)
'''))
assert ns['a'] == 42
def test_expr_macro():
# expr macros can be used in expressions or as expr statements
assert expand_in('k = add(1, 2, 3)')['k'] == 6
assert expand_in('class X: pass\no = X(); set_x(o)')['o'].x == 1
# only calls are expanded
assert expand_in('add = 1; add')['add'] == 1
# invalid # of arguments
assert raises(MacroCallError, expand_in, 'add(1)')
def test_block_macro():
# in particular, this tests context reassignment
ns = expand_in('assign(j, 1); assign(k, j+1)')
assert ns['j'] == 1
assert ns['k'] == 2
ns = expand_in('assign([j, k], [1, 2])')
assert ns['j'] == 1
assert ns['k'] == 2
# block macros cannot be used as expressions
assert raises(MacroCallError, expand_in, 'k = assign(j, 1)')
# block macros without __body__ cannot be used in with blocks
assert raises(MacroCallError, expand_in, 'with assign(j, 1): pass')
# invalid # of arguments
assert raises(MacroCallError, expand_in, 'assign(i)')
def test_body_macro():
ns = expand_in(dedent('''
i = 0
with do_while(i != 0):
j = 1
'''))
assert ns['j'] == 1
# block macros with __body__ cannot be used in expressions or
# as expr statements
assert raises(MacroCallError, expand_in, 'k = do_while(1)')
assert raises(MacroCallError, expand_in, 'do_while(1)')
# test that unrelated with statements are left alone
assert raises(NameError, expand_in, 'with a: pass')
def test_recursive_expansion():
# test that arguments are expanded before being inserted
ns = expand_in(dedent('''
k = add(add(1, 2, 3), 4, 10)
'''))
assert ns['k'] == 20
# test that the macro body is expanded before being inserted
ns = expand_in(dedent('''
with do_while(False):
k = add(5, 5, 5)
'''))
assert ns['k'] == 15
def test_import_macros():
# test import_macros function
macros = import_macros('test_karnickel', {'foo': 'fuu', 'bar': 'bar'}, {})
assert 'fuu' in macros
assert 'bar' in macros
macros = import_macros('test_karnickel', {'*': '*'}, {})
assert 'foo' in macros
assert 'bar' in macros
assert raises(MacroDefError, import_macros, 'some_module', {}, {})
assert raises(MacroDefError, import_macros, 'test_karnickel', {'x': ''}, {})
def test_import_hook():
importer = install_hook()
import example.test
assert example.test.usage_expr() == 22
try:
import example.fail
except ImportError, err:
assert '__body__' in str(err)
else:
assert False, 'ImportError not raised'
# test import of builtin module, should still work normally
import xxsubtype
assert xxsubtype.spamdict
# test import of C module
import _testcapi
assert _testcapi.error
remove_hook()
# test calling load_module without find_module
assert raises(ImportError, importer.load_module, 'foo')
| StarcoderdataPython |
1669790 | from fcapsy import Context
from bitsets import bitset
from tests import load_all_test_files
import os
import pytest
import json
import pandas as pd
TEST_DATA_DIR_FIMI = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'fimi',
)
@pytest.mark.parametrize("data_file, json_file",
load_all_test_files(TEST_DATA_DIR_FIMI))
def test_context_from_fimi(data_file, json_file):
# Load dataset file
with open(data_file) as f:
context = Context.from_fimi(data_file)
# Load expected output
with open(json_file) as f:
expected_json = json.load(f)
# Compare
assert list(context.Attributes.supremum.members(
)) == expected_json['attributes']
assert list(context.Objects.supremum.members(
)) == expected_json['objects']
assert tuple(context.to_bools()) == tuple(
map(tuple, expected_json['bools']))
TEST_DATA_DIR_CSV = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'csv', 'no_parameter'
)
@pytest.mark.parametrize("data_file, json_file",
load_all_test_files(TEST_DATA_DIR_CSV))
def test_context_from_csv(data_file, json_file):
# Load dataset file
with open(data_file) as f:
context = Context.from_csv(data_file)
# Load expected output
with open(json_file) as f:
expected_json = json.load(f)
# Compare
assert list(context.Attributes.supremum.members(
)) == expected_json['attributes']
assert list(context.Objects.supremum.members(
)) == expected_json['objects']
assert tuple(context.to_bools()) == tuple(
map(tuple, expected_json['bools']))
TEST_DATA_DIR_CSV_DELIMITER = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'csv', 'delimiter'
)
@pytest.mark.parametrize("data_file, json_file",
load_all_test_files(TEST_DATA_DIR_CSV_DELIMITER))
def test_context_from_csv_delimiter(data_file, json_file):
# Load dataset file
with open(data_file) as f:
context = Context.from_csv(data_file, delimiter=';')
# Load expected output
with open(json_file) as f:
expected_json = json.load(f)
# Compare
assert list(context.Attributes.supremum.members(
)) == expected_json['attributes']
assert list(context.Objects.supremum.members(
)) == expected_json['objects']
assert tuple(context.to_bools()) == tuple(
map(tuple, expected_json['bools']))
def test_context_from_pandas():
bools = (
(2, 0, 6),
(132, 1, 0)
)
expected_bools = (
(1, 0, 1),
(1, 1, 0)
)
context = Context.from_pandas(pd.DataFrame(bools))
assert tuple(context.to_bools()) == expected_bools
def test_context_from_pandas_truth_values():
bools = (
(2, 0, 6),
(132, 1, 0)
)
expected_bools = (
(0, 0, 1),
(1, 0, 0)
)
df = pd.DataFrame(bools)
context = Context.from_pandas(df > 2)
assert tuple(context.to_bools()) == expected_bools
| StarcoderdataPython |
3223811 | #!/usr/bin/env python
from functools import reduce
from google.cloud.monitoring_v3 import MetricServiceClient
from google.cloud.monitoring_v3.types import LabelDescriptor, MetricDescriptor, TimeSeries
from os import environ
import psutil as ps
import requests
from signal import signal, SIGTERM
from sys import stderr
from time import sleep, time
def get_metadata(key):
return requests.get(
'http://metadata.google.internal/computeMetadata/v1/instance/' + key,
headers={'Metadata-Flavor': 'Google'}
).text
def reset():
global memory_used, disk_used, disk_reads, disk_writes, report_time
# Explicitly reset the CPU counter, because the first call of this method always reports 0
ps.cpu_percent()
memory_used = 0
disk_used = 0
disk_reads = disk_io('read_count')
disk_writes = disk_io('write_count')
report_time = 0
def measure():
global memory_used, disk_used, report_time
memory_used = max(memory_used, MEMORY_SIZE - mem_usage('available'))
disk_used = max(disk_used, disk_usage('used'))
report_time += MEASUREMENT_TIME_SEC
sleep(MEASUREMENT_TIME_SEC)
def mem_usage(param):
return getattr(ps.virtual_memory(), param)
def disk_usage(param):
return reduce(
lambda usage, mount: usage + getattr(ps.disk_usage(mount), param),
DISK_MOUNTS, 0,
)
def disk_io(param):
return getattr(ps.disk_io_counters(), param)
def format_gb(value_bytes):
return '%.1f' % round(value_bytes / 2**30, 1)
def get_metric(key, value_type, unit, description):
return client.create_metric_descriptor(PROJECT_NAME, MetricDescriptor(
type='/'.join(['custom.googleapis.com', METRIC_ROOT, key]),
description=description,
metric_kind='GAUGE',
value_type=value_type,
unit=unit,
labels=LABEL_DESCRIPTORS,
))
def create_time_series(series):
client.create_time_series(PROJECT_NAME, series)
def get_time_series(metric_descriptor, value):
series = TimeSeries()
series.metric.type = metric_descriptor.type
labels = series.metric.labels
labels['workflow_id'] = WORKFLOW_ID
labels['task_call_name'] = TASK_CALL_NAME
labels['task_call_index'] = TASK_CALL_INDEX
labels['task_call_attempt'] = TASK_CALL_ATTEMPT
labels['cpu_count'] = CPU_COUNT_LABEL
labels['mem_size'] = MEMORY_SIZE_LABEL
labels['disk_size'] = DISK_SIZE_LABEL
series.resource.type = 'gce_instance'
series.resource.labels['zone'] = ZONE
series.resource.labels['instance_id'] = INSTANCE
point = series.points.add(value=value)
point.interval.end_time.seconds = int(time())
return series
def report():
create_time_series([
get_time_series(CPU_UTILIZATION_METRIC, { 'double_value': ps.cpu_percent() }),
get_time_series(MEMORY_UTILIZATION_METRIC, { 'double_value': memory_used / MEMORY_SIZE * 100 }),
get_time_series(DISK_UTILIZATION_METRIC, { 'double_value': disk_used / DISK_SIZE * 100 }),
get_time_series(DISK_READS_METRIC, { 'double_value': (disk_io('read_count') - disk_reads) / report_time }),
get_time_series(DISK_WRITES_METRIC, { 'double_value': (disk_io('write_count') - disk_writes) / report_time }),
])
### Define constants
# Cromwell variables passed to the container
# through environmental variables
WORKFLOW_ID = environ['WORKFLOW_ID']
TASK_CALL_NAME = environ['TASK_CALL_NAME']
TASK_CALL_INDEX = environ['TASK_CALL_INDEX']
TASK_CALL_ATTEMPT = environ['TASK_CALL_ATTEMPT']
DISK_MOUNTS = environ['DISK_MOUNTS'].split()
# GCP instance name, zone and project
# from instance introspection API
INSTANCE = get_metadata('name')
_, PROJECT, _, ZONE = get_metadata('zone').split('/')
client = MetricServiceClient()
PROJECT_NAME = client.project_path(PROJECT)
METRIC_ROOT = 'wdl_task'
MEASUREMENT_TIME_SEC = 1
REPORT_TIME_SEC = 60
LABEL_DESCRIPTORS = [
LabelDescriptor(
key='workflow_id',
description='Cromwell workflow ID',
),
LabelDescriptor(
key='task_call_name',
description='Cromwell task call name',
),
LabelDescriptor(
key='task_call_index',
description='Cromwell task call index',
),
LabelDescriptor(
key='task_call_attempt',
description='Cromwell task call attempt',
),
LabelDescriptor(
key='cpu_count',
description='Number of virtual cores',
),
LabelDescriptor(
key='mem_size',
description='Total memory size, GB',
),
LabelDescriptor(
key='disk_size',
description='Total disk size, GB',
),
]
CPU_COUNT = ps.cpu_count()
CPU_COUNT_LABEL = str(CPU_COUNT)
MEMORY_SIZE = mem_usage('total')
MEMORY_SIZE_LABEL = format_gb(MEMORY_SIZE)
DISK_SIZE = disk_usage('total')
DISK_SIZE_LABEL = format_gb(DISK_SIZE)
CPU_UTILIZATION_METRIC = get_metric(
'cpu_utilization', 'DOUBLE', '%',
'% of CPU utilized in a Cromwell task call',
)
MEMORY_UTILIZATION_METRIC = get_metric(
'mem_utilization', 'DOUBLE', '%',
'% of memory utilized in a Cromwell task call',
)
DISK_UTILIZATION_METRIC = get_metric(
'disk_utilization', 'DOUBLE', '%',
'% of disk utilized in a Cromwell task call',
)
DISK_READS_METRIC = get_metric(
'disk_reads', 'DOUBLE', '{reads}/s',
'Disk read IOPS in a Cromwell task call',
)
DISK_WRITES_METRIC = get_metric(
'disk_writes', 'DOUBLE', '{writes}/s',
'Disk write IOPS in a Cromwell task call',
)
### Detect container termination
def signal_handler(signum, frame):
global running
running = False
running = True
signal(SIGTERM, signal_handler)
### Main loop
#
# It continuously measures runtime metrics every MEASUREMENT_TIME_SEC,
# and reports them to Stackdriver Monitoring API every REPORT_TIME_SEC.
#
# However, if it detects a container termination signal,
# it *should* report the final metric
# right after the current measurement, and then exit normally.
reset()
while running:
measure()
if not running or report_time >= REPORT_TIME_SEC:
report()
reset()
exit(0)
| StarcoderdataPython |
4820049 | <filename>ALGOs/RNN/helper.py
import math
import random
# 1D dot
def dot(a, b):
ans = [ a[i]*b[i] for i in range(len(a)) ]
return [sum(ans)]
# 2D random array
def dot_2D(a, b):
matrix = []
for i in range(len(a)):
row = []
for j in range(len(b[0])):
element = sum( [a[i][k]*b[k][j] for k in range(len(a[0]))] )
row.append(element)
matrix.append(row)
return matrix
def tanh_(x):
return math.tanh(x)
def tanh_2D(matrix):
for i in range(len(matrix)):
for j in range(len(matrix[0])):
matrix[i][j] = tanh_(matrix[i][j])
return matrix
def tanh(matrix):
return [tanh_(x) for x in matrix]
# 2D random array
def random_2D(n_rows, n_cols):
ans = []
for i in range(n_rows):
ans.append([ random.random() for j in range(n_cols) ])
return ans
# 1D random array
# Here n_cols = 1
def random_(n_rows, n_cols):
return [ random.random() for j in range(n_rows) ]
def add(a,b,c):
ans = [a[i]+b[i]+c[i] for i in range(len(a))]
return ans
| StarcoderdataPython |
132042 | <gh_stars>0
from leapp.actors import Actor
from leapp.libraries.common.rpms import has_package
from leapp.models import InstalledRedHatSignedRPM
from leapp.reporting import Report, create_report
from leapp import reporting
from leapp.tags import ChecksPhaseTag, IPUWorkflowTag
class CheckGrep(Actor):
"""
Check if Grep is installed. If yes, write information about non-compatible changes.
"""
name = 'checkgrep'
consumes = (InstalledRedHatSignedRPM,)
produces = (Report,)
tags = (ChecksPhaseTag, IPUWorkflowTag)
def process(self):
if has_package(InstalledRedHatSignedRPM, 'grep'):
create_report([
reporting.Title('Grep has incompatible changes in the next major version'),
reporting.Summary(
'If a file contains data improperly encoded for the current locale, and this is '
'discovered before any of the file\'s contents are output, grep now treats the file '
'as binary.\n'
'The \'grep -P\' no longer reports an error and exits when given invalid UTF-8 data. '
'Instead, it considers the data to be non-matching.\n'
'In locales with multibyte character encodings other than UTF-8, grep -P now reports '
'an error and exits instead of misbehaving.\n'
'When searching binary data, grep now may treat non-text bytes as line terminators. '
'This can boost performance significantly.\n'
'The \'grep -z\' no longer automatically treats the byte \'\\200\' as binary data.\n'
'Context no longer excludes selected lines omitted because of -m. For example, '
'\'grep "^" -m1 -A1\' now outputs the first two input lines, not just the first '
'line.\n'
),
reporting.Severity(reporting.Severity.LOW),
reporting.Tags([reporting.Tags.TOOLS]),
reporting.Remediation(hint='Please update your scripts to be compatible with the changes.')
])
| StarcoderdataPython |
3224284 |
example_grid = [
[5,0,0,0,0,7,0,0,0]
,[9,2,6,5,0,0,0,0,0]
,[3,0,0,8,0,9,0,2,0]
,[4,0,0,0,2,0,0,3,5]
,[0,3,5,1,0,4,9,7,0]
,[8,6,0,0,5,0,0,0,4]
,[0,4,0,3,0,8,0,0,2]
,[0,0,0,0,0,5,6,9,3]
,[0,0,0,6,0,0,0,0,7]]
def print_grid(grid):
for row in grid:
print(row)
def get_indices_of_empty_cells(grid):
return [(i,j) for i in range(0,9) for j in range(0,9) if grid[i][j] == 0]
def get_rows_with_empty_cells(grid):
indices = get_indices_of_empty_cells(grid)
return [[grid[indices[i][0]][j] for j in range(0,9)] for i in range(0, len(indices))]
def get_columns_with_empty_cells(grid):
indices = get_indices_of_empty_cells(grid)
return [[grid[i][indices[j][1]] for i in range(0,9)] for j in range(0, len(indices))]
def get_indices_of_boxes():
return [[(i + x, j + y) for i in range(3) for j in range(3)] for x in [0,3,6] for y in [0,3,6]]
def get_boxes_with_empty_cells(grid):
indices_of_boxes = get_indices_of_boxes()
indices_of_empty_cells = get_indices_of_empty_cells(grid)
indices_of_boxes_for_each_empty_cells = [indices_of_boxes[i]
for x in indices_of_empty_cells
for i in range(len(indices_of_boxes))
for y in indices_of_boxes[i] if x == y]
return [[grid[i][j] for (i,j) in x] for x in indices_of_boxes_for_each_empty_cells]
def get_clues_of_groups(grid):
rows = get_rows_with_empty_cells(grid)
columns = get_columns_with_empty_cells(grid)
boxes = get_boxes_with_empty_cells(grid)
return [[[x[i] for i in range(len(x)) if x[i] != 0] for x in [row, column, box]] for (row, column, box) in zip(rows, columns, boxes)]
def generate_pencil_marks(grid):
clues = get_clues_of_groups(grid)
all_clues = [set([y for i in range(len(x)) for y in x[i]]) for x in clues]
pencil_marks = [set(set({1, 2, 3, 4, 5, 6, 7, 8, 9}) - set(x)) for x in all_clues]
return pencil_marks
def get_indices_and_candidates(grid):
indices = get_indices_of_empty_cells(grid)
pencil_marks = generate_pencil_marks(grid)
return [(tuple_of_indices, candidate) for tuple_of_indices, candidate in zip(indices, pencil_marks)]
def insert_pencil_marks(grid):
indices_and_candidates = get_indices_and_candidates(grid)
for i in range(len(indices_and_candidates)):
grid[indices_and_candidates[i][0][0]][indices_and_candidates[i][0][1]] = indices_and_candidates[i][1]
return grid
print(insert_pencil_marks(example_grid)) | StarcoderdataPython |
4812346 | <gh_stars>1-10
###############################################################################
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
###############################################################################
import json
import logging
from pathlib import Path
from bufr2geojson import transform as as_geojson
from wis2box.data.base import BaseAbstractData
LOGGER = logging.getLogger(__name__)
class ObservationDataBUFR(BaseAbstractData):
"""Observation data"""
def __init__(self, topic_hierarchy: str) -> None:
"""
Abstract data initializer
:param topic_hierarchy: `wis2box.topic_hierarchy.TopicHierarchy`
object
:returns: `None`
"""
super().__init__(topic_hierarchy)
self.mappings = {}
self.output_data = {}
def transform(self, input_data: Path) -> bool:
LOGGER.info('Processing BUFR data')
with open(input_data, 'rb') as fh:
results = as_geojson(fh.read(), serialize=False)
LOGGER.info('Iterating over GeoJSON features')
# TODO: iterate over item['geojson']
for collection in results: # results is an iterator
# for each iteration we have:
# - dict['id']
# - dict['id']['_meta']
# - dict['id']
for id, item in collection.items():
data_date = item['_meta']['data_date']
# date is range/period, split and get end date/time
if '/' in data_date:
data_date = data_date.split("/")[1]
# make sure we only include those items expected
items_to_remove = list()
for key2 in item:
if key2 not in ('geojson', '_meta'):
items_to_remove.append(key2)
for key2 in items_to_remove:
item.pop(key2)
# populate output data for publication
self.output_data[id] = item
self.output_data[id]['geojson'] = json.dumps(
self.output_data[id]['geojson'], indent=4)
self.output_data[id]['_meta']['relative_filepath'] = \
self.get_local_filepath(data_date)
return True
def get_local_filepath(self, date_):
yyyymmdd = date_[0:10] # date_.strftime('%Y-%m-%d')
return (Path(yyyymmdd) / 'wis' / self.topic_hierarchy.dirpath)
def process_data(data: str, discovery_metadata: dict) -> bool:
"""
Data processing workflow for observations
:param data: `str` of data to be processed
:param discovery_metadata: `dict` of discovery metadata MCF
:returns: `bool` of processing result
"""
d = ObservationDataBUFR(discovery_metadata)
LOGGER.info('Transforming data')
d.transform(data)
LOGGER.info('Publishing data')
d.publish()
return True
| StarcoderdataPython |
3393254 | from forked.cli import main
def test_main():
"""Worst test ever."""
main([])
| StarcoderdataPython |
1615361 | for _ in range(int(input())):
d = {}
s = input()
i = 0
while i < len(s):
try:
d[s[i]] +=1
except:
d[s[i]] = 1
if d[s[i]] == 3:
if i+1 <len(s) and s[i+1] == s[i]:
d[s[i]] = 0
i+=1
else:
print("FAKE")
break
i+=1
else:
print("OK") | StarcoderdataPython |
1721103 | from discord import Embed
import requests
from datetime import datetime, timezone
from ..bases import Definition, Webhook
TIMEFILE = "chunks.sqlite"
class DiscordWebhook(Webhook):
url: str
link: str
def _construct_embed(self, defi: Definition) -> Embed:
embed = Embed(title="Map updated!", url=self.link)
tfile = (defi.dest / TIMEFILE)
if tfile.exists():
embed.timestamp = datetime.fromtimestamp(
tfile.stat().st_mtime, tz=timezone.utc)
return embed
def push(self, defi: Definition = None) -> requests.Response:
" Calls the Discord webhook "
defi = self._parent or defi
assert defi
embed = self._construct_embed(defi)
r = requests.post(self.url, json={"embeds": [embed.to_dict()]})
r.raise_for_status()
return r
Webhook.specs["discord"] = DiscordWebhook
| StarcoderdataPython |
3293138 | <filename>tests/__main__.py
#!/usr/bin/env python
"""
Runs all the tests for every type of gym environment.
Tests for the screen environment will only be run if
the screen environment was registered in the gym registry
"""
from gym import envs
import unittest
from tests.grid_env_test import GridGymTest
# from tests.ram_env_test import RamGymTest # Ram environment is not ready yet.
# only test the screen environment if its available
if "agario-screen-v0" in [env_spec.id for env_spec in envs.registry.all()]:
from tests.screen_env_test import ScreenGymTest
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
1741257 | """
A unit test for the train.py script
"""
import os
import pylearn2
from pylearn2.scripts.train import train
def test_train_cmd():
"""
Calls the train.py script with a short YAML file
to see if it trains without error
"""
train(os.path.join(pylearn2.__path__[0],
"scripts/autoencoder_example/dae.yaml"))
| StarcoderdataPython |
154285 | <filename>src/algoritmia/datastructures/trees/boundedaritytree.py
from algoritmia.datastructures.trees.interfaces import IRootedTree
class BoundedArityTree(IRootedTree): #[bounded
def __init__(self, arity: "int"=0, seq: "Iterable<T>"=[],
bounded_arity_tree: "BoundedArityTree<T>"=None, root_index: "int"=0):
if bounded_arity_tree != None:
self._arity = bounded_arity_tree._arity
self._list = bounded_arity_tree._list
else:
self._arity = arity
self._list = list(seq)
self._root_index = root_index
root = property(lambda self: self._list[self._root_index])
def succs(self, v: "T") -> "Iterable<T>":
for i in range(self._root_index, len(self._list)):
if v == self._list[i]:
first_child = self._arity * i + 1
for i in range(first_child, min(len(self._list), first_child + self._arity)):
if self._list[i] != None:
yield self._list[i]
break
def preds(self, v: "T") -> "empty Iterable<T> or Iterable<T> with a single item":
if v != self._list[self._root_index]:
for i in range(self._root_index, len(self._list)):
if v == self._list[i]:
yield self._list[(i-1) // self._arity]
def in_degree(self, v: "T") -> "0 or 1":
return 1 if v != self._list[self._root_index] else 0
def out_degree(self, v: "T") -> "int":
return sum(1 for _ in self.succs(v))
def subtrees(self) -> "Iterable<BoundedArityTree<T>>":
first_child = self._arity * self._root_index + 1
for i in range(first_child, min(len(self._list), first_child + self._arity)):
if self._list[i] != None:
yield BoundedArityTree(bounded_arity_tree=self, root_index=i)
def tree(self, v: "T") -> "BoundedArityTree<T>":
i = self._list.index(v)
if i >= self._root_index:
return BoundedArityTree(bounded_arity_tree=self, root_index=i)
def __repr__(self) -> "str":
return "{}({}, {!r}, root_index={!r})".format(
self.__class__.__name__, self._arity, self._list, self._root_index) #]bounded | StarcoderdataPython |
34305 | from collections import defaultdict
from .common import IGraph
''' Remove edges to create even trees.
You are given a tree with an even number of nodes. Consider each connection between a parent and child node to be an "edge". You
would like to remove some of these edges, such that the disconnected subtrees that remain each have an even number of nodes.
For example, suppose your input is the following tree:
1
/ \
2 3
/ \
4 5
/ | \
6 7 8
In this case, if we remove the edge (3, 4), both resulting subtrees will be even.
Write a function that returns the maximum number of edges you can remove while still satisfying this requirement.
'''
def max_edges1(graph):
def traverse(graph : IGraph, cur, result):
descendants = 0
for child in graph.neighbors(cur):
num_nodes, result = traverse(graph, child, result)
result[child] += num_nodes - 1
descendants += num_nodes
return descendants + 1, result
start = graph.root()
vertices = defaultdict(int)
_, descendants = traverse(graph, start, vertices)
return len([val for val in descendants.values() if val % 2 == 1]) | StarcoderdataPython |
1692665 | import numpy as np
import gym
import itertools as it
from dqn.dqn_agent import DQNAgent
from tensorboard_evaluation import *
from dqn.networks import NeuralNetwork, TargetNetwork
from utils import EpisodeStats
def run_episode(env, agent, deterministic, do_training=True, rendering=False, max_timesteps=1000):
"""
This methods runs one episode for a gym environment.
deterministic == True => agent executes only greedy actions according the Q function approximator (no random actions).
do_training == True => train agent
"""
stats = EpisodeStats() # save statistics like episode reward or action usage
state = env.reset()
step = 0
while True:
action_id = agent.act(state=state, deterministic=deterministic)
next_state, reward, terminal, info = env.step(action_id)
reward = reward if not terminal else -10 # I added
if do_training:
agent.train(state, action_id, next_state, reward, terminal)
stats.step(reward, action_id)
state = next_state
if rendering:
env.render()
if terminal or step > max_timesteps:
break
step += 1
print("Episodes before terminal: ", step)
return stats
def train_online(env, agent, num_episodes, model_dir="./models_cartpole", tensorboard_dir="./tensorboard"):
if not os.path.exists(model_dir):
os.mkdir(model_dir)
print("... train agent")
tensorboard = Evaluation(os.path.join(tensorboard_dir, "train"), ["episode_reward", "valid_episode_reward", "a_0", "a_1"])
# training
for i in range(num_episodes):
print("episode: ",i)
stats_training = run_episode(env, agent, deterministic=False, do_training=True)
if i % 10 == 0:
valid_reward = 0
for j in range(5):
valid_stats = run_episode(env, agent, deterministic=True, do_training=False)
valid_reward += valid_stats.episode_reward
#stats_eval = run_episode(env, agent, deterministic=True, do_training=False)
tensorboard.write_episode_data(i, eval_dict={"episode_reward" : stats_training.episode_reward,
"valid_episode_reward" : valid_reward/5,
"a_0" : stats_training.get_action_usage(0),
"a_1" : stats_training.get_action_usage(1)})
# TODO: evaluate your agent once in a while for some episodes using run_episode(env, agent, deterministic=True, do_training=False) to
# check its performance with greedy actions only. You can also use tensorboard to plot the mean episode reward.
# ...
# store model every 100 episodes and in the end.
if i % 100 == 0 or i >= (num_episodes - 1):
agent.saver.save(agent.sess, os.path.join(model_dir, "dqn_agent.ckpt"))
tensorboard.close_session()
if __name__ == "__main__":
environment = "CartPole-v0"
# environment = "MountainCar-v0"
env = gym.make(environment).unwrapped
state_dim = env.observation_space.shape[0]
num_actions = env.action_space.n
num_episodes = 100
Q = NeuralNetwork(state_dim, num_actions)
Q_target = TargetNetwork(state_dim, num_actions)
agent = DQNAgent(Q, Q_target, num_actions)
train_online(env, agent, num_episodes)
| StarcoderdataPython |
3258366 | <filename>Operators/ExampleFaceDetectOperator/FaceDetectOperator.py
from abc import ABC
import cv2
import numpy as np
from Operators.DummyAlgorithmWithModel import DummyAlgorithmWithModel
from Operators.ExampleFaceDetectOperator.PostProcessUtils import get_anchors, regress_boxes
from Utils.GeometryUtils import center_pad_image_with_specific_base, \
nms, resize_with_long_side, force_convert_image_to_bgr
from Utils.InferenceHelpers import TritonInferenceHelper
class FaceDetectOperator(DummyAlgorithmWithModel, ABC):
name = 'FaceDetect'
__version__ = 'v1.0.20210319'
def __init__(self, _inference_config, _is_test):
super().__init__(_inference_config, _is_test)
class GeneralUltraLightFaceDetect(FaceDetectOperator):
"""
这个模型超级轻量级,速度快,但是漏检的概率会比较大
适合对检测精度要求不高的场景,人脸比较明显的场景
"""
name = '自然场景下的基于UltraLightFaceDetect的人脸检测'
__version__ = 'v1.0.20210319'
def __init__(self, _inference_config, _is_test, _score_threshold=0.7, _iou_threshold=0.5):
super().__init__(_inference_config, _is_test)
self.score_threshold = _score_threshold
self.iou_threshold = _iou_threshold
self.candidate_image_size = (320, 240)
def get_inference_helper(self):
if self.inference_config['name'] == 'triton':
inference_helper = TritonInferenceHelper('UltraLightFaceDetect',
self.inference_config['triton_url'],
self.inference_config['triton_port'],
'UltraLightFaceDetect', 1)
inference_helper.add_image_input('INPUT__0', (320, 240, 3), '识别用的图像',
([127, 127, 127], [128, 128, 128]))
inference_helper.add_output('OUTPUT__0', (1, 4420, 2), 'detect score')
inference_helper.add_output('OUTPUT__1', (1, 4420, 4), 'box predict')
self.inference_helper = inference_helper
else:
raise NotImplementedError(
f"{self.inference_config['name']} helper for ultra light face detect not implement")
def execute(self, _image):
to_return_result = {
'locations': [],
}
padded_image, (width_pad_ratio, height_pad_ratio) = center_pad_image_with_specific_base(
_image,
_width_base=32,
_height_base=24,
_output_pad_ratio=True
)
resized_image = cv2.resize(_image, self.candidate_image_size)
resized_shape = resized_image.shape[:2]
resize_h, resize_w = resized_shape
candidate_image = force_convert_image_to_bgr(resized_image)
if isinstance(self.inference_helper, TritonInferenceHelper):
rgb_image = cv2.cvtColor(candidate_image, cv2.COLOR_BGR2RGB)
result = self.inference_helper.infer(_need_tensor_check=False, INPUT__0=rgb_image.astype(np.float32))
score_map = result['OUTPUT__0'].squeeze()
box = result['OUTPUT__1'].squeeze()
else:
raise NotImplementedError(
f"{self.inference_helper.type_name} helper for ultra light face detect not implement")
# 0为bg,1为人脸
box_score_map = score_map[..., 1]
available_box = box_score_map > self.score_threshold
if np.sum(available_box) == 0:
return to_return_result
filter_scores = box_score_map[available_box]
filtered_box = box[available_box, :]
filtered_box_without_normalization = filtered_box * (resize_w, resize_h, resize_w, resize_h)
final_box_index = nms(filtered_box_without_normalization, filter_scores, _nms_threshold=self.iou_threshold)
final_boxes = filtered_box[final_box_index]
final_scores = filter_scores[final_box_index]
for m_box, m_score in zip(final_boxes, final_scores):
m_box_width = m_box[2] - m_box[0]
m_box_height = m_box[3] - m_box[1]
m_box_center_x = m_box[0] + m_box_width / 2 - width_pad_ratio
m_box_center_y = m_box[1] + m_box_height / 2 - height_pad_ratio
box_info = {
'degree': 0,
'center_x': m_box_center_x,
'center_y': m_box_center_y,
'box_height': m_box_height,
'box_width': m_box_width,
}
to_return_result['locations'].append({
'box_info': box_info,
'score': m_score,
})
return to_return_result
class GeneralRetinaFaceDetect(FaceDetectOperator):
"""
这个模型比较大,精度比较高
"""
name = '自然场景下的基于RetinaFace的人脸检测'
__version__ = 'v1.0.20210319'
def __init__(self, _inference_config, _is_test, _score_threshold=0.7, _iou_threshold=0.5):
super().__init__(_inference_config, _is_test)
self.score_threshold = _score_threshold
self.iou_threshold = _iou_threshold
self.candidate_image_size = (512, 512)
def get_inference_helper(self):
if self.inference_config['name'] == 'triton':
inference_helper = TritonInferenceHelper('RetinaFace',
self.inference_config['triton_url'],
self.inference_config['triton_port'],
'RetinaFace', 1)
inference_helper.add_image_input('INPUT__0', (512, 512, 3), '识别用的图像',
([0, 0, 0], [1, 1, 1]))
inference_helper.add_output('OUTPUT__0', (1, 16128, 2), 'face classification')
inference_helper.add_output('OUTPUT__1', (1, 16128, 4), 'box predict')
inference_helper.add_output('OUTPUT__2', (1, 16128, 10), 'landmark')
self.inference_helper = inference_helper
else:
raise NotImplementedError(
f"{self.inference_config['name']} helper for retina face detect not implement")
def execute(self, _image):
to_return_result = {
'locations': [],
}
resized_image = resize_with_long_side(_image, 512)
resized_shape = resized_image.shape[:2]
resize_h, resize_w = resized_shape
padded_image, (width_pad_ratio, height_pad_ratio) = center_pad_image_with_specific_base(
resized_image,
_width_base=512,
_height_base=512,
_output_pad_ratio=True
)
candidate_image = force_convert_image_to_bgr(padded_image)
candidate_shape = candidate_image.shape[:2]
if isinstance(self.inference_helper, TritonInferenceHelper):
rgb_image = cv2.cvtColor(candidate_image, cv2.COLOR_BGR2RGB)
result = self.inference_helper.infer(_need_tensor_check=False, INPUT__0=rgb_image.astype(np.float32))
filter_scores = result['OUTPUT__0'].squeeze()
box = result['OUTPUT__1'].squeeze()
else:
raise NotImplementedError(
f"{self.inference_helper.type_name} helper for retina face detect not implement")
anchors = get_anchors(np.array(candidate_image.shape[:2]))
all_boxes, _ = regress_boxes(anchors, box, None, candidate_image.shape[:2])
exp_box_score = np.exp(filter_scores)
face_classification_index = np.argmax(exp_box_score, axis=-1)
max_classification_score = np.max(exp_box_score, axis=-1)
candidate_box_index = (face_classification_index == 0) & (max_classification_score > self.score_threshold)
filter_scores = max_classification_score[candidate_box_index]
filtered_box = all_boxes[candidate_box_index]
if len(filter_scores) == 0:
return to_return_result
filtered_box_without_normalization = filtered_box * (resize_w, resize_h, resize_w, resize_h)
final_box_index = nms(filtered_box_without_normalization, filter_scores, _nms_threshold=self.iou_threshold)
final_boxes = filtered_box[final_box_index]
final_scores = filter_scores[final_box_index]
for m_box, m_score in zip(final_boxes, final_scores):
m_box_width = m_box[2] - m_box[0]
m_box_height = m_box[3] - m_box[1]
m_box_center_x = (m_box[0] + m_box_width / 2 - width_pad_ratio) * candidate_shape[1] / resized_shape[1]
m_box_center_y = (m_box[1] + m_box_height / 2 - height_pad_ratio) * candidate_shape[0] / resized_shape[0]
box_info = {
'degree': 0,
'center_x': m_box_center_x,
'center_y': m_box_center_y,
'box_height': m_box_height * candidate_shape[0] / resized_shape[0],
'box_width': m_box_width * candidate_shape[1] / resized_shape[1],
}
to_return_result['locations'].append({
'box_info': box_info,
'score': m_score,
})
return to_return_result
if __name__ == '__main__':
from argparse import ArgumentParser
from Utils.AnnotationTools import draw_rotated_bbox
ag = ArgumentParser('Face Detect Example')
ag.add_argument('-i', '--image_path', dest='image_path', type=str, required=True, help='本地图像路径')
ag.add_argument('-u', '--triton_url', dest='triton_url', type=str, required=True, help='triton url')
ag.add_argument('-p', '--triton_port', dest='triton_port', type=int, default=8001, help='triton grpc 端口')
args = ag.parse_args()
img = cv2.imread(args.image_path)
ultra_light_face_detect_handler = GeneralUltraLightFaceDetect({
'name': 'triton',
'triton_url': args.triton_url,
'triton_port': args.triton_port
}, True, 0.7, 0.5)
retina_face_detect_handler = GeneralRetinaFaceDetect({
'name': 'triton',
'triton_url': args.triton_url,
'triton_port': args.triton_port
}, True, 0.7, 0.5)
ultra_light_face_detected_boxes = ultra_light_face_detect_handler.execute(img)['locations']
ultra_light_face_result = img.copy()
for m_box in ultra_light_face_detected_boxes:
draw_rotated_bbox(ultra_light_face_result, m_box['box_info'], (255, 0, 0), 2)
cv2.imshow('ultra_light_face_result', ultra_light_face_result)
retina_detected_face_boxes = retina_face_detect_handler.execute(img)['locations']
retina_detected_face_result = img.copy()
for m_box in retina_detected_face_boxes:
draw_rotated_bbox(retina_detected_face_result, m_box['box_info'], (0, 0, 255), 2)
cv2.imshow('retina_detected_face_result', retina_detected_face_result)
cv2.waitKey(0)
cv2.destroyAllWindows()
| StarcoderdataPython |
154764 | """
Count the number of ways to tile the floor of size n x m using 1 x m size tiles
Given a floor of size n x m and tiles of size 1 x m. The problem is to count the number of ways to tile the
given floor using 1 x m tiles. A tile can either be placed horizontally or vertically.
Both n and m are positive integers and 2 < = m.
Examples:
Input : n = 2, m = 3
Output : 1
Only one combination to place
two tiles of size 1 x 3 horizontally
on the floor of size 2 x 3.
Input : n = 4, m = 4
Output : 2
1st combination:
All tiles are placed horizontally
2nd combination:
All tiles are placed vertically.
"""
"""
This problem is mainly a more generalized approach to the Tiling Problem.
Approach: For a given value of n and m, the number of ways to tile the
floor can be obtained from the following relation.
| 1, 1 < = n < m
count(n) = | 2, n = m
| count(n-1) + count(n-m), m < n
"""
def tiling(n,m):
count=[]
for i in range(n+2):
count.append(0)
count[0]=0
for i in range(1,n+1):
# recurssive cases
if i > m:
count[i]=count[i-1]+count[i-m]
#base cases
elif i <m:
count[i]=1
# i == m
else:
count[i]=2
return count[n]
# print(tiling(7,4))
"""
Count number of ways to fill a “n x 4” grid using “1 x 4” tiles
Given a number n, count number of ways to fill a n x 4 grid using 1 x 4 tiles
Examples:
Input : n = 1
Output : 1
Input : n = 2
Output : 1
We can only place both tiles horizontally
Input : n = 3
Output : 1
We can only place all tiles horizontally.
Input : n = 4
Output : 2
The two ways are :
1) Place all tiles horizontally
2) Place all tiles vertically.
Input : n = 5
Output : 3
We can fill a 5 x 4 grid in following ways :
1) Place all 5 tiles horizontally
2) Place first 4 vertically and 1 horizontally.
3) Place first 1 horizontally and 4 horizontally.
Let “count(n)” be the count of ways to place tiles on a “n x 4” grid,
following two cases arise when we place the first tile.
Place the first tile horizontally : If we place first tile horizontally,
the problem reduces to “count(n-1)”
Place the first tile vertically : If we place first tile vertically,
then we must place 3 more tiles vertically. So the problem reduces to
“count(n-4)”
Therefore, count(n) can be written as below.
count(n) = 1 if n = 1 or n = 2 or n = 3
count(n) = 2 if n = 4
count(n) = count(n-1) + count(n-4)
"""
def titling(n):
count=[0 for i in range(n+1)]
for i in range(1,n+1):
if i<=3:
count[i]=1
elif i==4:
count[i]==2
else:
count[i]=count[i-1]+count[i-4]
return count[n] | StarcoderdataPython |
3205052 | import logging
import os
import shutil
from tempfile import mkdtemp
from service_buddy.ci.ci import BuildCreator
from service_buddy.ci.travis_build_creator import TravisBuildCreator
from service_buddy.service import loader
from service_buddy.service.service import Service
from service_buddy.util import pretty_printer
from testcase_parent import ParentTestCase
DIRNAME = os.path.dirname(os.path.abspath(__file__))
class TravisBuildTestCase(ParentTestCase):
def tearDown(self):
pass
@classmethod
def setUpClass(cls):
super(TravisBuildTestCase, cls).setUpClass()
cls.test_resources = os.path.join(DIRNAME, '../resources/travis_build_test')
cls.yml_folder = os.path.join(cls.test_resources, "app1", "service")
cls.app_dir = os.path.join(cls.test_resources, "app1")
def test_travis_file_detection(self):
build_creator = BuildCreator(dry_run=True, template_directory=self.test_resources)
test_service = Service(app="app1", role="service", definition={"service-type": "test"})
build_creator.create_project(service_definition=test_service, app_dir=self.app_dir)
self._assertInYaml({"ubar":"Overwrote existing travis.yml"},self.yml_folder)
temp = mkdtemp()
loader.safe_mkdir(test_service.get_service_directory(temp))
build_creator._get_default_build_creator().dry_run = False
build_creator.create_project(service_definition=test_service, app_dir=temp)
def test_travis_arg_render(self):
items = "infra-buddy validate-template --service-template-directory . --service-type {role}"
item2 = "pyb install_dependencies package -P build_number=0.1.${TRAVIS_BUILD_NUMBER}"
list_args = []
TravisBuildCreator._append_rendered_arguments(list_args, items, {'role': 'vbar'})
self.assertTrue("vbar" in list_args[0],"Did not render properly")
TravisBuildCreator._append_rendered_arguments(list_args, item2, {'role': 'vbar'})
self.assertTrue("${TRAVIS_BUILD_NUMBER}" in list_args[1],"Did not render properly")
def test_yml_update(self):
temp = mkdtemp()
source = os.path.join(self.yml_folder, '.travis.yml')
destination = os.path.join(temp, '.travis.yml')
shutil.copy(source, destination)
build_creator = BuildCreator(dry_run=True, template_directory=self.test_resources)
build_creator._get_default_build_creator()._write_deploy_stanza(temp)
self._assertInYaml({"deploy":"Cound not find deploy stanza"},temp)
def _assertInList(self, param, line_list, error_message):
for line in line_list:
if param in line:
return
self.fail(error_message)
def _assertInYaml(self, expected_error_msg, directory):
destination = os.path.join(directory, '.travis.yml')
with open(destination) as desty:
readlines = desty.readlines()
for expected, error_msg in expected_error_msg.iteritems():
self._assertInList(expected, readlines, error_msg)
| StarcoderdataPython |
3204169 | #!/usr/bin/env python
kingdoms = ['Bacteria', 'Protozoa', 'Chromista', 'Plantae', 'Fungi', 'Animalia']
print(kingdoms[-6])
print(kingdoms[-1])
print(kingdoms[-6:-3])
print(kingdoms[-4:-1])
print(kingdoms[-2:])
| StarcoderdataPython |
34980 | from pageobject import PageObject
from homepage import HomePage
from locatormap import LocatorMap
from robot.api import logger
class LoginPage():
PAGE_TITLE = "Login - PageObjectLibrary Demo"
PAGE_URL = "/login.html"
# these are accessible via dot notaton with self.locator
# (eg: self.locator.username, etc)
_locators = {
"username": "id=id_username",
"password": "<PASSWORD>",
"submit_button": "id=id_submit",
}
def __init__(self):
self.logger = logger
self.po = PageObject()
self.se2lib = self.po.se2lib
self.locator = LocatorMap(getattr(self, "_locators", {}))
def navigate_to(self, url):
logger.console ("Navigating to %s".format(url))
self.se2lib.go_to(url)
if 'yahoo' in url:
logger.console ("Navigating to homepage")
return HomePage()
def create_browser(self, browser_name):
self.se2lib.create_webdriver(browser_name)
def enter_username(self, username):
"""Enter the given string into the username field"""
self.se2lib.input_text(self.locator.username, username)
def enter_password(self, password):
"""Enter the given string into the password field"""
self.se2lib.input_text(self.locator.password, password)
def click_the_submit_button(self):
"""Click the submit button, and wait for the page to reload"""
with self.po._wait_for_page_refresh():
self.se2lib.click_button(self.locator.submit_button)
return HomePage() | StarcoderdataPython |
157945 | import pygame
from data.clip import clip
def load_tileset(path):
tileset_img = pygame.image.load(path + 'tileset.png').convert()
tileset_img.set_colorkey((0, 0, 0))
width = tileset_img.get_width()
tile_size = [16, 16]
tile_count = int((width + 1) / (tile_size[0] + 1))
images = [clip(tileset_img, i * (tile_size[0] + 1), 0, tile_size[0], tile_size[1]) for i in range(tile_count)]
return images
| StarcoderdataPython |
1773067 | import math
import os
from decimal import Decimal
from django import template
from django.utils import timezone
from djmoney.money import Money
from app import settings
from app.enums import FileStatus
from app.utils import get_site_url
from event.enums import ApplicationStatus, DietType, TshirtSize, CompanyTier
from user.enums import SexType
register = template.Library()
@register.simple_tag
def settings_value(name):
return getattr(settings, name, "")
@register.filter
def time_left(time: timezone.datetime):
return time - timezone.now()
@register.filter
def timedelta_display(time: timezone.timedelta):
seconds = time.total_seconds()
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "{:02d}:{:02d}:{:02d}".format(math.floor(h), math.floor(m), math.floor(s))
@register.filter
def days_left(timedelta: timezone.timedelta):
return int(timedelta.total_seconds() // (60 * 60 * 24))
@register.filter
def file_name(value):
return os.path.basename(value.file.name)
@register.filter
def display_departments(departments):
return " / ".join([d.name for d in departments])
@register.filter
def response_title(code):
if float(code) / 100.0 == 2.0:
return "Success " + str(code)
return "Error " + str(code)
@register.filter
def application_status(status):
return ApplicationStatus(status).name.capitalize()
@register.filter
def user_sex(status):
return SexType(status).name.capitalize()
@register.filter
def application_tshirt(status):
return TshirtSize(status).name.upper()
@register.filter
def application_diet(status):
return DietType(status).name.replace("_", "-").capitalize()
@register.filter
def company_tier(status):
return CompanyTier(status).name.capitalize()
@register.simple_tag
def site_url():
return get_site_url()
@register.filter
def money(value):
if isinstance(value, Money):
value = value.amount
return "{:,.2f}".format(value).replace(",", " ").replace(".", ",")
@register.simple_tag
def money_vat(value, vat):
if isinstance(value, Money):
value = value.amount
value = (value * vat) / Decimal("100.0")
return "{:,.2f}".format(value).replace(",", " ").replace(".", ",")
@register.simple_tag
def money_total(value, vat):
if isinstance(value, Money):
value = value.amount
value += (value * vat) / Decimal("100.0")
return "{:,.2f}".format(value).replace(",", " ").replace(".", ",")
@register.filter
def code(value):
return " ".join(value[i : i + 4] for i in range(0, len(value), 4))
| StarcoderdataPython |
98177 | <reponame>TylerPham2000/zulip<gh_stars>1000+
import time
from unittest import TestCase, mock
from scripts.lib.check_rabbitmq_queue import CRITICAL, OK, UNKNOWN, WARNING, analyze_queue_stats
class AnalyzeQueueStatsTests(TestCase):
def test_no_stats_available(self) -> None:
result = analyze_queue_stats("name", {}, 0)
self.assertEqual(result["status"], UNKNOWN)
def test_queue_stuck(self) -> None:
"""Last update > 5 minutes ago and there's events in the queue."""
result = analyze_queue_stats("name", {"update_time": time.time() - 301}, 100)
self.assertEqual(result["status"], CRITICAL)
self.assertIn("queue appears to be stuck", result["message"])
def test_queue_just_started(self) -> None:
"""
We just started processing a burst of events, and haven't processed enough
to log productivity statistics yet.
"""
result = analyze_queue_stats(
"name",
{
"update_time": time.time(),
"current_queue_size": 10000,
"recent_average_consume_time": None,
},
10000,
)
self.assertEqual(result["status"], OK)
def test_queue_normal(self) -> None:
"""10000 events and each takes a second => it'll take a long time to empty."""
result = analyze_queue_stats(
"name",
{
"update_time": time.time(),
"current_queue_size": 10000,
"queue_last_emptied_timestamp": time.time() - 10000,
"recent_average_consume_time": 1,
},
10000,
)
self.assertEqual(result["status"], CRITICAL)
self.assertIn("clearing the backlog", result["message"])
# If we're doing 10K/sec, it's OK.
result = analyze_queue_stats(
"name",
{
"update_time": time.time(),
"current_queue_size": 10000,
"queue_last_emptied_timestamp": time.time() - 10000,
"recent_average_consume_time": 0.0001,
},
10000,
)
self.assertEqual(result["status"], OK)
# Verify logic around whether it'll take MAX_SECONDS_TO_CLEAR to clear queue.
with mock.patch.dict("scripts.lib.check_rabbitmq_queue.MAX_SECONDS_TO_CLEAR", {"name": 10}):
result = analyze_queue_stats(
"name",
{
"update_time": time.time(),
"current_queue_size": 11,
"queue_last_emptied_timestamp": time.time() - 10000,
"recent_average_consume_time": 1,
},
11,
)
self.assertEqual(result["status"], WARNING)
self.assertIn("clearing the backlog", result["message"])
result = analyze_queue_stats(
"name",
{
"update_time": time.time(),
"current_queue_size": 9,
"queue_last_emptied_timestamp": time.time() - 10000,
"recent_average_consume_time": 1,
},
9,
)
self.assertEqual(result["status"], OK)
| StarcoderdataPython |
20307 | # Princess No Damage Skin (30-Days)
success = sm.addDamageSkin(2432803)
if success:
sm.chat("The Princess No Damage Skin (30-Days) has been added to your account's damage skin collection.")
| StarcoderdataPython |
3273254 | #!/usr/bin/env python
"""
Example script that processes the servers of a server or group nickname.
"""
import sys
import os
from pprint import pprint
import easy_server
def main():
"""Main function"""
if len(sys.argv) < 2:
print("Usage: {} SERVERFILE [NICKNAME]".format(sys.argv[0]))
sys.exit(2)
server_file = sys.argv[1]
if len(sys.argv) > 2:
nickname = sys.argv[2]
else:
nickname = None
try:
esf_obj = easy_server.ServerFile(server_file)
except easy_server.ServerFileException as exc:
print("Error: {}".format(exc))
return 1
if nickname:
es_list = esf_obj.list_servers(nickname)
else:
es_list = esf_obj.list_default_servers()
for es in es_list:
nickname = es.nickname
host = es.secrets['host'],
username = es.secrets['username']
password = es.secrets['password']
print("Server {n}: host={h}, username={u}, password=********".
format(n=nickname, h=host, u=username))
return 0
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
3301842 | <gh_stars>0
from pathlib import Path
import sys
import platform
import os
import time
import datetime
def readIgnores():
with open("ignore.txt") as file:
ignoreLines = file.readlines()
ignorePaths = {}
for line in ignoreLines:
ignorePaths[line.rstrip()] = True
return(ignorePaths)
def CompareDirectories(path1, path2, ignorePaths):
for item1 in path1.iterdir():
if str(item1) in ignorePaths:
continue
found = False
for item2 in path2.iterdir():
if item1.name == item2.name:
found = True
if item1.suffix == ".jpg": # I rarely change .jpg files but they might still have different modified times for some reason -> ignore them
break
if item1.is_dir():
CompareDirectories(item1, item2, ignorePaths)
else:
time1 = datetime.datetime.fromtimestamp(item1.stat().st_mtime)
time2 = datetime.datetime.fromtimestamp(item2.stat().st_mtime)
difference = time1 - time2
# A little ugly solution here, but sometimes the last modified time differs 1 second or about one hour even if they were modified at the same time. The latter is probably related to the summer time and different memory technologies. Let's ignore those.
if difference > datetime.timedelta(0,2) and not (difference > datetime.timedelta(0,58,0,0,59) and difference <= datetime.timedelta(0,0,0,0,0,1)):
print("diff mod time\t" + str(item1) + " | " + str(difference))
break
if found == False:
print("not found\t" + str(item1))
def main():
path1 = sys.argv[1]
path2 = sys.argv[2]
ignorePaths = readIgnores()
CompareDirectories(Path(path1), Path(path2), ignorePaths)
main()
| StarcoderdataPython |
1703623 | import numpy as np
def diamondarray(dimension=1,fill=1,unfill=0):
""" Create a diamond array using a square dimension.
Fill and unfill values can be integer or float. """
nullresult=np.zeros(1)
#// verify inputs
try:
if not isinstance(dimension, (int, np.integer)):
dimesion=int(dimension)
if not isinstance(fill, (int, float, np.integer)):
fill=int(fill)
if not isinstance(unfill, (int, float, np.integer)):
unfill=int(unfill)
except:
return nullresult
#// check if odd
return nullresult
#// initialize 2d array
a=np.zeros((dimension,dimension))
for row in range(dimension):
for col in range(dimension):
a[row,col]=unfill
#// find the middle of the array
midpoint=(dimension-1)/2
#// initialize an offset
offset=-1
offsetstep=1
#// loop through rows and columns
for row in range(dimension):
if dimension%2 == 0 and row == np.ceil(midpoint):
#// repeat offset for second midpoint row
offset=offset
else:
if row <= np.ceil(midpoint):
#// increase offset for each row for top
offset=offset+offsetstep
else:
#// decrease offset for each row for bottom
offset=offset-offsetstep
for col in range(dimension):
#// set value to one
if dimension%2 == 0:
if col <= np.floor(midpoint):
if col == np.floor(midpoint)-offset:
a[row,col]=fill
if col >= np.ceil(midpoint):
if col == int(midpoint)+offset+1:
a[row,col]=fill
else:
if col == midpoint+offset or col == midpoint-offset:
pass
a[row,col]=fill
return a
def bisectorarray(dimension=1,vertical=True,horizontal=True,fill=1,unfill=0):
""" Create an array using square dimension with the midpoint column
filled. Fill and unfill values can be integer or float. """
nullresult=np.zeros(1)
#// verify inputs
try:
if not isinstance(dimension, (int, np.integer)):
dimesion=int(dimension)
if not isinstance(fill, (int, float, np.integer)):
fill=int(fill)
if not isinstance(unfill, (int, float, np.integer)):
unfill=int(unfill)
except:
return nullresult
#// initialize 2d array
a=np.zeros((dimension,dimension))
for row in range(dimension):
for col in range(dimension):
a[row,col]=unfill
#// find the middle of the array
midpoint=(dimension-1)/2
#// loop through rows and columns
for row in range(dimension):
for col in range(dimension):
#// set value to one
if (col == np.floor(midpoint) or col == np.ceil(midpoint)) and vertical==True:
a[row,col]=fill
if (row == np.floor(midpoint) or row == np.ceil(midpoint)) and horizontal==True:
a[row,col]=fill
return a
| StarcoderdataPython |
44472 | <filename>enhancements/predict.py
import numpy as np
from sklearn.preprocessing import StandardScaler
import scipy.io
import tensorflow as tf
tf.random.set_seed(10)
import os
import sys
sys.path.append('../')
from csen_regressor import model
import argparse
from sklearn.model_selection import train_test_split
# INITIALIZATION
# construct the argument parser and parse the argument
ap = argparse.ArgumentParser()
ap.add_argument('--method', default='CSEN',
help="Method for the regression: CL-CSEN, CSEN, CL-CSEN-1D, CSEN-1D, SVR.")
ap.add_argument('--feature_type', help="Features extracted by the network (DenseNet121, VGG19, ResNet50).")
ap.add_argument('--weights', default=False, help="Evaluate the model.")
args = vars(ap.parse_args())
modelType = args['method'] # CL-CSEN, CSEN, and SVR.
feature_type = args['feature_type']
weights = args['weights']
MR = '0.5' # Measurement rate for CL-CSEN and CSEN approaches.
if modelType == 'CL-CSEN':
from cl_csen_regressor import model
elif modelType == 'CL-CSEN-1D':
from cl_csen_1d_regressor import model
elif modelType == 'CSEN':
from csen_regressor import model
elif modelType == 'CSEN-1D':
from csen_1d_regressor import model
elif modelType == 'SVR':
from competing_regressor import svr as model
# From where to load weights
weightsDir = '../weights/' + modelType + '/'
# Init the model
modelFold = model.model()
weightPath = weightsDir + feature_type + '_' + MR + '_1' + '.h5'
modelFold.load_weights(weightPath)
# Load image to be evaluated
data = '../CSENdata-2D/' + feature_type
dataPath = data + '_mr_' + MR + '_run1' + '.mat'
dic_label = scipy.io.loadmat('../CSENdata-2D/dic_label' + '.mat')["ans"]
x_train, X_val, x_test, y_train, y_val, y_test = None, None, None, None, None, None
Data = scipy.io.loadmat(dataPath)
x_dic = Data['x_dic'].astype('float32')
x_train = Data['x_train'].astype('float32')
x_test = Data['x_test'].astype('float32')
y_dic = Data['dicRealLabel'].astype('float32')
y_train = Data['trainRealLabel'].astype('float32')
y_test = Data['testRealLabel'].astype('float32')
print('\n\n\n')
print('Loaded dataset:')
print(len(x_train), ' train')
print(len(x_test), ' test')
# Partition for the validation.
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size = 0.2, random_state = 1)
# Data normalization.
m = x_train.shape[1]
n = x_train.shape[2]
x_dic = np.reshape(x_dic, [len(x_dic), m * n])
x_train = np.reshape(x_train, [len(x_train), m * n])
x_val = np.reshape(x_val, [len(x_val), m * n])
x_test = np.reshape(x_test, [len(x_test), m * n])
scaler = StandardScaler().fit(np.concatenate((x_dic, x_train), axis = 0))
x_dic = scaler.transform(x_dic)
x_train = scaler.transform(x_train)
x_val = scaler.transform(x_val)
x_test = scaler.transform(x_test)
x_dic = np.reshape(x_dic, [len(x_dic), m, n])
x_train = np.reshape(x_train, [len(x_train), m, n])
x_val = np.reshape(x_val, [len(x_val), m, n])
x_test = np.reshape(x_test, [len(x_test), m, n])
x_train = np.concatenate((x_dic, x_train), axis = 0)
y_train = np.concatenate((y_dic, y_train), axis = 0)
print("\n")
print('Partitioned.')
print(len(x_train), ' Train')
print(len(x_val), ' Validation')
print(len(x_test), ' Test\n')
print("\n\n\n")
x_train = np.expand_dims(x_train, axis=-1)
x_val = np.expand_dims(x_val, axis=-1)
x_test = np.expand_dims(x_test, axis=-1)
print("x_test shape: {}".format(x_test.shape))
print("Image shape: {}".format(x_test[0:2].shape))
image = x_test[0:30,:,:,:]
print("Image shape: {}".format(image.shape))
y_pred = modelFold.predict_distance(image)
print("y_pred: {}".format(y_pred))
print("vs.")
print("y_test: {}".format(y_test[0:20])) | StarcoderdataPython |
9851 | <reponame>csisarep/groundwater_dashboard
# Generated by Django 2.2 on 2021-09-11 04:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('waterApp', '0010_auto_20210911_1041'),
]
operations = [
migrations.AlterField(
model_name='gwmonitoring',
name='id',
field=models.BigAutoField(primary_key=True, serialize=False),
),
]
| StarcoderdataPython |
3309924 | #!/usr/bin/env python3
import socket
HOST = "127.0.0.1"
PORT = 65431
def send(s, cmd):
s.sendall(cmd.encode("utf-8"))
data = s.recv(1024)
print("Received", repr(data))
def get_val(s, key):
cmd = "get {}".format(key)
s.sendall(cmd.encode("utf-8"))
data = s.recv(1024)
print("Received", repr(data))
def set_val(s, key, value):
cmd = "set {} {}\r\n{}".format(key, len(value), value)
s.sendall(cmd.encode("utf-8"))
data = s.recv(1024)
print("Received", repr(data))
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
send(s, "set k1 7\r\n123557")
send(s, "get k1")
send(s, "set k2 15\r\nthis is second value")
send(s, "get k2")
send(s, "set k3 7\r\nI am third")
send(s, "get k3")
send(s, "set k1 7\r\nover written first")
send(s, "get k1")
set_val(s, "big_input_key", "The ParaPro Assessment is a general aptitude test that is required in many states for paraprofessional certification. It also offers school districts an objective assessment of your foundation of knowledge and skills. Start now and take the necessary steps to become a paraprofessional.")
get_val(s, "big_input_key")
# key that doesn't exist
get_val(s, "get k6")
| StarcoderdataPython |
3252853 | <filename>Aspect_Extraction/model.py
import logging
import keras.backend as K
from keras.layers import Dense, Activation, Embedding, Input
from keras.models import Model
from my_layers import Attention, Average, WeightedSum, WeightedAspectEmb, MaxMargin
from w2v_emb_reader import W2VEmbReader as EmbReader
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
logger = logging.getLogger(__name__)
def create_model(args, maxlen, vocab):
def ortho_reg(weight_matrix):
# orthogonal regularization for aspect embedding matrix
w_n = weight_matrix / K.cast(K.epsilon() + K.sqrt(K.sum(K.square(weight_matrix), axis=-1, keepdims=True)),
K.floatx())
reg = K.sum(K.square(K.dot(w_n, K.transpose(w_n)) - K.eye(w_n.shape[0].value)))
return args.ortho_reg * reg
vocab_size = len(vocab)
# Inputs
sentence_input = Input(shape=(maxlen,), dtype='int32', name='sentence_input')
neg_input = Input(shape=(args.neg_size, maxlen), dtype='int32', name='neg_input')
# Construct word embedding layer
word_emb = Embedding(vocab_size, args.emb_dim, mask_zero=True, name='word_emb')
# Compute sentence representation
e_w = word_emb(sentence_input)
y_s = Average()(e_w)
att_weights = Attention(name='att_weights')([e_w, y_s])
z_s = WeightedSum()([e_w, att_weights])
# Compute representations of negative instances
e_neg = word_emb(neg_input)
z_n = Average()(e_neg)
# Reconstruction
p_t = Dense(args.aspect_size)(z_s)
p_t = Activation('softmax', name='p_t')(p_t)
r_s = WeightedAspectEmb(args.aspect_size, args.emb_dim, name='aspect_emb',
W_regularizer=ortho_reg)(p_t)
# Loss
loss = MaxMargin(name='max_margin')([z_s, z_n, r_s])
model = Model(inputs=[sentence_input, neg_input], outputs=loss)
# Word embedding and aspect embedding initialization
if args.emb_path:
emb_reader = EmbReader(args.emb_path, emb_dim=args.emb_dim)
logger.info('Initializing word embedding matrix')
K.set_value(
model.get_layer('word_emb').embeddings,
emb_reader.get_emb_matrix_given_vocab(vocab, K.get_value(model.get_layer('word_emb').embeddings)))
logger.info('Initializing aspect embedding matrix as centroid of kmean clusters')
K.set_value(
model.get_layer('aspect_emb').W,
emb_reader.get_aspect_matrix(args.aspect_size))
return model | StarcoderdataPython |
3219377 | <reponame>zeroam/TIL
"""read_write_data.py
구글 스프레드 시트 문서에 데이터 입력 및 접근하기
"""
import ezsheets
ss = ezsheets.createSpreadsheet("My SpreadSheet")
sheet = ss[0] # 첫번째 시트에 접근
print(sheet.title) # '시트1'
# 데이터 입력
sheet["A1"] = "Name"
sheet["B1"] = "Age"
sheet["C1"] = "Favorite Movie"
print(sheet["A1"]) # Name
print(sheet[2, 1]) # Age
sheet[1, 2] = "Alice"
sheet[2, 2] = 30
sheet[3, 2] = "RoboCop"
# 특정 행, 열 접근하기
print(sheet.getRow(1)) # 첫 번째 행 접근
print(sheet.getColumn(1)) # 첫 번째 열 접근하기
# print(sheet.getColumn("A")) # 첫 번째 열 접근하기
# 특정 행, 열 수정하기
sheet.updateRow(3, ["Pumpkin", 25, "Halloween"])
column_one = sheet.getColumn(1)
for i, value in enumerate(column_one):
column_one[i] = value.upper()
sheet.updateColumn(1, column_one) # 대문자로 변환된 리스트로 수정 내용 반영
# 모든 데이터 접근하기
rows = sheet.getRows()
print(rows[0]) # 첫 번째 행 데이터
rows[3][0] = "NewName"
sheet.updateRows(rows)
| StarcoderdataPython |
74560 | <gh_stars>0
import logging
import pytest
from math import isclose
import numpy as np
from haystack.modeling.infer import QAInferencer
from haystack.modeling.data_handler.inputs import QAInput, Question
@pytest.fixture()
def span_inference_result(bert_base_squad2, caplog=None):
if caplog:
caplog.set_level(logging.CRITICAL)
obj_input = [
QAInput(
doc_text="Twilight Princess was released to universal critical acclaim and commercial success. It received perfect scores from major publications such as 1UP.com, Computer and Video Games, Electronic Gaming Monthly, Game Informer, GamesRadar, and GameSpy. On the review aggregators GameRankings and Metacritic, Twilight Princess has average scores of 95% and 95 for the Wii version and scores of 95% and 96 for the GameCube version. GameTrailers in their review called it one of the greatest games ever created.",
questions=Question("Who counted the game among the best ever made?", uid="best_id_ever"),
)
]
result = bert_base_squad2.inference_from_objects(obj_input, return_json=False)[0]
return result
@pytest.fixture()
def no_answer_inference_result(bert_base_squad2, caplog=None):
if caplog:
caplog.set_level(logging.CRITICAL)
obj_input = [
QAInput(
doc_text='The majority of the forest is contained within Brazil, with 60% of the rainforest, followed by Peru with 13%, Colombia with 10%, and with minor amounts in Venezuela, Ecuador, Bolivia, Guyana, Suriname and French Guiana. States or departments in four nations contain "Amazonas" in their names. The Amazon represents over half of the planet\'s remaining rainforests, and comprises the largest and most biodiverse tract of tropical rainforest in the world, with an estimated 390 billion individual trees divided into 16,000 species.',
questions=Question(
"The Amazon represents less than half of the planets remaining what?", uid="best_id_ever"
),
)
]
result = bert_base_squad2.inference_from_objects(obj_input, return_json=False)[0]
return result
def test_inference_different_inputs(bert_base_squad2):
qa_format_1 = [
{
"questions": ["Who counted the game among the best ever made?"],
"text": "Twilight Princess was released to universal critical acclaim and commercial success. It received perfect scores from major publications such as 1UP.com, Computer and Video Games, Electronic Gaming Monthly, Game Informer, GamesRadar, and GameSpy. On the review aggregators GameRankings and Metacritic, Twilight Princess has average scores of 95% and 95 for the Wii version and scores of 95% and 96 for the GameCube version. GameTrailers in their review called it one of the greatest games ever created.",
}
]
q = Question(text="Who counted the game among the best ever made?")
qa_format_2 = QAInput(
questions=[q],
doc_text="Twilight Princess was released to universal critical acclaim and commercial success. It received perfect scores from major publications such as 1UP.com, Computer and Video Games, Electronic Gaming Monthly, Game Informer, GamesRadar, and GameSpy. On the review aggregators GameRankings and Metacritic, Twilight Princess has average scores of 95% and 95 for the Wii version and scores of 95% and 96 for the GameCube version. GameTrailers in their review called it one of the greatest games ever created.",
)
result1 = bert_base_squad2.inference_from_dicts(dicts=qa_format_1)
result2 = bert_base_squad2.inference_from_objects(objects=[qa_format_2])
assert result1 == result2
def test_span_inference_result_ranking_by_confidence(bert_base_squad2, caplog=None):
if caplog:
caplog.set_level(logging.CRITICAL)
obj_input = [
QAInput(
doc_text="<NAME>cess was released to universal critical acclaim and commercial success. It received perfect scores from major publications such as 1UP.com, Computer and Video Games, Electronic Gaming Monthly, Game Informer, GamesRadar, and GameSpy. On the review aggregators GameRankings and Metacritic, Twilight Princess has average scores of 95% and 95 for the Wii version and scores of 95% and 96 for the GameCube version. GameTrailers in their review called it one of the greatest games ever created.",
questions=Question("Who counted the game among the best ever made?", uid="best_id_ever"),
)
]
# by default, result is sorted by confidence and not by score
result_ranked_by_confidence = bert_base_squad2.inference_from_objects(obj_input, return_json=False)[0]
assert all(
result_ranked_by_confidence.prediction[i].confidence >= result_ranked_by_confidence.prediction[i + 1].confidence
for i in range(len(result_ranked_by_confidence.prediction) - 1)
)
assert not all(
result_ranked_by_confidence.prediction[i].score >= result_ranked_by_confidence.prediction[i + 1].score
for i in range(len(result_ranked_by_confidence.prediction) - 1)
)
# ranking can be adjusted so that result is sorted by score
bert_base_squad2.model.prediction_heads[0].use_confidence_scores_for_ranking = False
result_ranked_by_score = bert_base_squad2.inference_from_objects(obj_input, return_json=False)[0]
assert all(
result_ranked_by_score.prediction[i].score >= result_ranked_by_score.prediction[i + 1].score
for i in range(len(result_ranked_by_score.prediction) - 1)
)
assert not all(
result_ranked_by_score.prediction[i].confidence >= result_ranked_by_score.prediction[i + 1].confidence
for i in range(len(result_ranked_by_score.prediction) - 1)
)
def test_inference_objs(span_inference_result, caplog=None):
if caplog:
caplog.set_level(logging.CRITICAL)
assert span_inference_result
def test_span_performance(span_inference_result, caplog=None):
if caplog:
caplog.set_level(logging.CRITICAL)
best_pred = span_inference_result.prediction[0]
assert best_pred.answer == "GameTrailers"
best_score_gold = 13.4205
best_score = best_pred.score
assert isclose(best_score, best_score_gold, rel_tol=0.001)
no_answer_gap_gold = 13.9827
no_answer_gap = span_inference_result.no_answer_gap
assert isclose(no_answer_gap, no_answer_gap_gold, rel_tol=0.001)
def test_no_answer_performance(no_answer_inference_result, caplog=None):
if caplog:
caplog.set_level(logging.CRITICAL)
best_pred = no_answer_inference_result.prediction[0]
assert best_pred.answer == "no_answer"
best_score_gold = 12.1445
best_score = best_pred.score
assert isclose(best_score, best_score_gold, rel_tol=0.001)
no_answer_gap_gold = -14.4646
no_answer_gap = no_answer_inference_result.no_answer_gap
assert isclose(no_answer_gap, no_answer_gap_gold, rel_tol=0.001)
def test_qa_pred_attributes(span_inference_result, caplog=None):
if caplog:
caplog.set_level(logging.CRITICAL)
qa_pred = span_inference_result
attributes_gold = [
"aggregation_level",
"answer_types",
"context",
"context_window_size",
"ground_truth_answer",
"id",
"n_passages",
"no_answer_gap",
"prediction",
"question",
"to_json",
"to_squad_eval",
"token_offsets",
]
for ag in attributes_gold:
assert ag in dir(qa_pred)
def test_qa_candidate_attributes(span_inference_result, caplog=None):
if caplog:
caplog.set_level(logging.CRITICAL)
qa_candidate = span_inference_result.prediction[0]
attributes_gold = [
"aggregation_level",
"answer",
"answer_support",
"answer_type",
"context_window",
"n_passages_in_doc",
"offset_answer_end",
"offset_answer_start",
"offset_answer_support_end",
"offset_answer_support_start",
"offset_context_window_end",
"offset_context_window_start",
"offset_unit",
"passage_id",
"probability",
"score",
"set_answer_string",
"set_context_window",
"to_doc_level",
"to_list",
]
for ag in attributes_gold:
assert ag in dir(qa_candidate)
def test_id(span_inference_result, no_answer_inference_result):
assert span_inference_result.id == "best_id_ever"
assert no_answer_inference_result.id == "best_id_ever"
def test_duplicate_answer_filtering(bert_base_squad2):
qa_input = [
{
"questions": ["“In what country lies the Normandy?”"],
"text": """The Normans (Norman: Nourmands; French: Normands; Latin: Normanni) were the people who in the 10th and 11th centuries gave their name to Normandy, a region in France. They were descended from Norse (\"Norman\" comes from \"Norseman\")
raiders and pirates from Denmark, Iceland and Norway who, under their leader Rollo, agreed to swear fealty to King Charles III of West Francia. Through generations of assimilation and mixing with the native Frankish and Roman-Gaulish populations, their descendants would gradually merge with the Carolingian-based cultures of West Francia.
The distinct cultural and ethnic identity of the Normans emerged initially in the first half of the 10th century, and it continued to evolve over the succeeding centuries. Weird things happen in Normandy, France.""",
}
]
bert_base_squad2.model.prediction_heads[0].n_best = 5
bert_base_squad2.model.prediction_heads[0].n_best_per_sample = 5
bert_base_squad2.model.prediction_heads[0].duplicate_filtering = 0
result = bert_base_squad2.inference_from_dicts(dicts=qa_input)
offset_answer_starts = []
offset_answer_ends = []
for answer in result[0]["predictions"][0]["answers"]:
offset_answer_starts.append(answer["offset_answer_start"])
offset_answer_ends.append(answer["offset_answer_end"])
assert len(offset_answer_starts) == len(set(offset_answer_starts))
assert len(offset_answer_ends) == len(set(offset_answer_ends))
def test_no_duplicate_answer_filtering(bert_base_squad2):
qa_input = [
{
"questions": ["“In what country lies the Normandy?”"],
"text": """The Normans (Norman: Nourmands; French: Normands; Latin: Normanni) were the people who in the 10th and 11th centuries gave their name to Normandy, a region in France. They were descended from Norse (\"Norman\" comes from \"Norseman\")
raiders and pirates from Denmark, Iceland and Norway who, under their leader Rollo, agreed to swear fealty to King Charles III of West Francia. Through generations of assimilation and mixing with the native Frankish and Roman-Gaulish populations, their descendants would gradually merge with the Carolingian-based cultures of West Francia.
The distinct cultural and ethnic identity of the Normans emerged initially in the first half of the 10th century, and it continued to evolve over the succeeding centuries. Weird things happen in Normandy, France.""",
}
]
bert_base_squad2.model.prediction_heads[0].n_best = 5
bert_base_squad2.model.prediction_heads[0].n_best_per_sample = 5
bert_base_squad2.model.prediction_heads[0].duplicate_filtering = -1
bert_base_squad2.model.prediction_heads[0].no_ans_boost = -100.0
result = bert_base_squad2.inference_from_dicts(dicts=qa_input)
offset_answer_starts = []
offset_answer_ends = []
for answer in result[0]["predictions"][0]["answers"]:
offset_answer_starts.append(answer["offset_answer_start"])
offset_answer_ends.append(answer["offset_answer_end"])
assert len(offset_answer_starts) != len(set(offset_answer_starts))
assert len(offset_answer_ends) != len(set(offset_answer_ends))
def test_range_duplicate_answer_filtering(bert_base_squad2):
qa_input = [
{
"questions": ["“In what country lies the Normandy?”"],
"text": """The Normans (Norman: Nourmands; French: Normands; Latin: Normanni) were the people who in the 10th and 11th centuries gave their name to Normandy, a region in France. They were descended from Norse (\"Norman\" comes from \"Norseman\")
raiders and pirates from Denmark, Iceland and Norway who, under their leader Rollo, agreed to swear fealty to King Charles III of West Francia. Through generations of assimilation and mixing with the native Frankish and Roman-Gaulish populations, their descendants would gradually merge with the Carolingian-based cultures of West Francia.
The distinct cultural and ethnic identity of the Normans emerged initially in the first half of the 10th century, and it continued to evolve over the succeeding centuries. Weird things happen in Normandy, France.""",
}
]
bert_base_squad2.model.prediction_heads[0].n_best = 5
bert_base_squad2.model.prediction_heads[0].n_best_per_sample = 5
bert_base_squad2.model.prediction_heads[0].duplicate_filtering = 5
result = bert_base_squad2.inference_from_dicts(dicts=qa_input)
offset_answer_starts = []
offset_answer_ends = []
for answer in result[0]["predictions"][0]["answers"]:
offset_answer_starts.append(answer["offset_answer_start"])
offset_answer_ends.append(answer["offset_answer_end"])
offset_answer_starts.sort()
offset_answer_starts.remove(0)
distances_answer_starts = [j - i for i, j in zip(offset_answer_starts[:-1], offset_answer_starts[1:])]
assert all(
distance > bert_base_squad2.model.prediction_heads[0].duplicate_filtering
for distance in distances_answer_starts
)
offset_answer_ends.sort()
offset_answer_ends.remove(0)
distances_answer_ends = [j - i for i, j in zip(offset_answer_ends[:-1], offset_answer_ends[1:])]
assert all(
distance > bert_base_squad2.model.prediction_heads[0].duplicate_filtering for distance in distances_answer_ends
)
def test_qa_confidence():
inferencer = QAInferencer.load(
"deepset/roberta-base-squad2", task_type="question_answering", batch_size=40, gpu=True
)
QA_input = [
{
"questions": ["Who counted the game among the best ever made?"],
"text": "Twilight Princess was released to universal critical acclaim and commercial success. It received perfect scores from major publications such as 1UP.com, Computer and Video Games, Electronic Gaming Monthly, Game Informer, GamesRadar, and GameSpy. On the review aggregators GameRankings and Metacritic, Twilight Princess has average scores of 95% and 95 for the Wii version and scores of 95% and 96 for the GameCube version. GameTrailers in their review called it one of the greatest games ever created.",
}
]
result = inferencer.inference_from_dicts(dicts=QA_input, return_json=False)[0]
assert np.isclose(result.prediction[0].confidence, 0.990427553653717)
assert result.prediction[0].answer == "GameTrailers"
if __name__ == "__main__":
test_inference_different_inputs()
test_inference_objs()
test_duplicate_answer_filtering()
test_no_duplicate_answer_filtering()
test_range_duplicate_answer_filtering()
test_qa_confidence()
| StarcoderdataPython |
3266502 | <filename>poly_classifier/rooted_poly_decider.py
# assumptions: δ = 2
# configurations = [(root,child_1,child_2),...]
# labels = set([label_1,label_2,...])
import math
import networkx
from rooted_tree_classifier.log_decider import isFlexible
def get_labels(configurations):
labels = set()
for conf in configurations:
for label in conf:
labels.add(label)
return labels
def trim(labels, configurations):
# trim outputs a subset of labels that can label any sufficiently large Δ-regular tree
# lemma 5.28 in the paper
while True:
new_labels = get_new_labels(labels, configurations)
assert not (set(new_labels) - set(labels)
) # trimming labels should not introduce any new labels
if set(new_labels) == set(labels):
break
else:
labels = new_labels
return labels
def get_new_labels(old_labels, configurations):
new_labels = set()
for conf in configurations:
pot_label = conf[0]
if pot_label not in old_labels:
continue
ok = True
for cont_label in conf[1:]:
if cont_label not in old_labels:
ok = False
break
if ok:
new_labels.add(pot_label)
return new_labels
def create_graph(labels, configurations):
graph = {label: [] for label in labels}
for conf in configurations:
head = conf[0]
if head in labels:
for tail in conf[1:]:
if tail in labels:
graph[head].append(tail)
return graph
def flexible_scc_restrictions(labels, configurations):
# output: list of all label restrictions
# lemma 5.29 in the paper
# create automaton M
graph = create_graph(labels, configurations)
# find all strongly connected component
nxgraph = networkx.to_networkx_graph(graph, create_using=networkx.DiGraph)
flexible_restrictions = []
for component in networkx.strongly_connected_components(nxgraph):
representative = list(component)[0]
if isFlexible(graph, representative):
flexible_restrictions.append(component)
return flexible_restrictions
def max_depth(labels, configurations):
if not labels:
return 0
maximum = 0
for flexible_restriction in flexible_scc_restrictions(
labels, configurations):
if labels - flexible_restriction: # if we removed something
depth = max_depth(trim(flexible_restriction, configurations),
configurations)
maximum = max(maximum, depth)
else:
return math.inf
return 1 + maximum
def rooted_polynomial_classifier(configurations):
labels = get_labels(configurations)
return max_depth(trim(labels, configurations), configurations)
| StarcoderdataPython |
1624238 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
This plugin is 3rd party and not part of p2p-streams addon
Sopcast.ucoz
"""
import sys,os
current_dir = os.path.dirname(os.path.realpath(__file__))
basename = os.path.basename(current_dir)
core_dir = current_dir.replace(basename,'').replace('parsers','')
sys.path.append(core_dir)
from peertopeerutils.webutils import *
from peertopeerutils.pluginxbmc import *
from peertopeerutils.directoryhandle import *
import acestream as ace
import sopcast as sop
base_url = 'http://livefootballvideo.com/streaming'
def module_tree(name,url,iconimage,mode,parser,parserfunction):
if not parserfunction: sopcast_ucoz()
elif parserfunction == 'play': sopcast_ucoz_play(name,url)
def sopcast_ucoz():
conteudo=clean(get_page_source('http://sopcast.ucoz.com'))
listagem=re.compile('<div class="eTitle" style="text-align:left;"><a href="(.+?)">(.+?)</a>').findall(conteudo)
for urllist,titulo in listagem:
try:
match = re.compile('\((.*?)\.(.*?)\.(.*?)\. (.*?):(.*?) UTC\) (.*)').findall(titulo)
if match:
for dia,mes,ano,hora,minuto,evento in match:
import datetime
from utils import pytzimp
d = pytzimp.timezone(str(pytzimp.timezone('Europe/London'))).localize(datetime.datetime(int(ano), int(mes), int(dia), hour=int(hora), minute=int(minuto)))
timezona= settings.getSetting('timezone_new')
my_location=pytzimp.timezone(pytzimp.all_timezones[int(timezona)])
convertido=d.astimezone(my_location)
fmt = "%y-%m-%d %H:%M"
time=convertido.strftime(fmt)
addDir('[B][COLOR orange]' + time + '[/B][/COLOR]-' + evento,urllist,401,os.path.join(current_dir,'icon.png'),len(listagem),False,parser="sopcastucoz",parserfunction="play")
else:
addDir(titulo,urllist,401,'',len(listagem),False,parser="sopcastucoz",parserfunction="play")
except:
addDir(titulo,urllist,401,'',len(listagem),False,parser="sopcastucoz",parserfunction="play")
def sopcast_ucoz_play(name,url):
conteudo=clean(get_page_source(url))
blogpost = re.findall('<tr><td class="eMessage">(.*?)<tr><td colspan', conteudo, re.DOTALL)
if blogpost:
ender=[]
titulo=[]
match = re.compile('br.+?>(.+?)<').findall(blogpost[0])
for address in match:
if "sop://" in address:
titulo.append('Sopcast [' + address +']')
ender.append(address)
elif "(ace stream)" in address:
titulo.append('Acestream [' + address.replace(' (ace stream)','') +']')
ender.append(address.replace(' (ace stream)',''))
else: pass
if ender and titulo:
index = xbmcgui.Dialog().select(translate(40023), titulo)
if index > -1:
nomeescolha=titulo[index]
linkescolha=ender[index]
if re.search('acestream',nomeescolha,re.IGNORECASE) or re.search('TorrentStream',nomeescolha,re.IGNORECASE): ace.acestreams(nomeescolha,'',linkescolha)
elif re.search('sopcast',nomeescolha,re.IGNORECASE): sop.sopstreams(nomeescolha,'',linkescolha)
else: xbmcgui.Dialog().ok(translate(40000),translate(40024))
else:
xbmcgui.Dialog().ok(translate(40000),translate(40008))
| StarcoderdataPython |
42106 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Created by techno at 25/04/19
#Feature: #Enter feature name here
# Enter feature description here
#Scenario: # Enter scenario name here
# Enter steps here
""" tokenizing a string and counting unique words"""
text = ('this is sample text with several words different '
'this is more sample text with some different words')
word_counts = {}
#count ocurrences of each unique word
for word in text.split():
if word in word_counts:
word_counts[word] +=1 #update existing key-value pair
print(word_counts) # print just to check how is working
else:
word_counts[word] = 1 #insert new key-value pair
print(word_counts) # print just to check how is working
print(f'{"WORD":<12}COUNT')
for word, count in sorted(word_counts.items()):
print(f'{word:<12}{count}')
print('\nNumer of unique words:', len(word_counts))
"""
Line 10 tokenizes text by calling string method split, which separates the
words using the method’s delimiter string argument. If you do not provide
an argument, split uses a space. The method returns a list of tokens
(that is, the words in text). Lines 10–14 iterate through the list of words.
For each word, line 11 determines whether that word (the key) is already
in the dictionary. If so, line 12 increments that word’s count; otherwise,
line 14 inserts a new key–value pair for that word with an initial count of 1.
Lines 16–21 summarize the results in a two-column table containing each word
and its corresponding count. The for statement in lines 18 and 19 iterates
through the diction-ary’s key–value pairs. It unpacks each key and value
into the variables word and count, then displays them in two columns.
Line 21 displays the number of unique words.
""" | StarcoderdataPython |
3378554 | # coding: utf-8
import sys
sys.path.append(".")
from workshop.en.z_1 import *
reportErrors = True
"""
- when 'True', the validity of the values are checked upstream.
- when 'False', no check; if errors, Python exceptions are displayed.
"""
def solveFirstDegreeEquation(a, b, c):
solution = (c-b)/a
return str(solution)
def solveFirstDegreeInequation(a, b, ineg, c):
sol = solveFirstDegreeEquation(a, b, c)
if ineg == "≥":
if a > 0:
return "[" + sol + " ; " + "+∞["
else:
return "]-∞" + " ; " + sol + "]"
if ineg == ">":
if a > 0:
return "]" + sol + " ; " + "+∞["
else:
return "]-∞" + " ; " + sol + "["
if ineg == "≤":
if a > 0:
return "]-∞" + " ; " + sol + "]"
else:
return "[" + sol + " ; " + "+∞["
if ineg == "<":
if a > 0:
return "]-∞" + " ; " + sol + "["
else:
return "]" + sol + " ; " + "+∞["
def solve(a, b, c, operator):
# Only useful when 'reportErrors' at 'False'.
if (a == 0):
warn("'a' must be different from 0 !")
return
# There are other tests to do.
erase()
display("Solution of")
display("{:g} × x + {:g} {} {:g}:".format(a, b, operator, c))
if operator == '=':
display("x = " + solveFirstDegreeEquation(a, b, c))
else:
display("x = " + solveFirstDegreeInequation(a, b, operator, c))
go(globals())
| StarcoderdataPython |
3282405 | import sys
import logging
import argparse
import shutil
from collections import OrderedDict
import git
import yaml
import torch
import torch.optim as optim
from baselines.common.atari_wrappers import EpisodicLifeEnv, FireResetEnv
OPTS = OrderedDict({None: None,
'adam': optim.Adam,
'rmsprop': optim.RMSprop})
CLI_LOGGING_FORMAT = '[%(filename)s][%(funcName)s:%(lineno)d]' + \
'[%(levelname)s] %(message)s'
CLI_LOGGING_STREAM = sys.stdout
def get_logger(logger_name, log_level='info'):
CLI_LOGGING_LEVEL = getattr(logging, log_level.upper(), None)
logger = logging.getLogger(logger_name)
logger.setLevel(CLI_LOGGING_LEVEL)
ch = logging.StreamHandler(CLI_LOGGING_STREAM)
formatter = logging.Formatter(CLI_LOGGING_FORMAT)
ch.setFormatter(formatter)
ch.setLevel(CLI_LOGGING_LEVEL)
logger.addHandler(ch)
logger.propagate = False
return logger
logger = get_logger(__file__)
def read_yaml(config_file):
if not config_file.is_file():
logger.error('Not a file, {}'.format(config_file))
return
try:
with config_file.open('r') as pfile:
d = yaml.load(pfile, yaml.FullLoader)
assert validate_config(d)
logger.info('Read config file {}'.format(config_file.as_posix()))
return d
except Exception as err:
logger.error('Error reading {}, {}'.format(config_file, err))
def atari_play_env(env):
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
return env
def get_repo_hexsha():
filepath = __file__
repopath = filepath.split('utils')[0]
g = git.Repo(repopath)
return g.head.commit.hexsha[:8]
def copy_yaml(src_file, dest_dir, hexsha):
stem = src_file.stem
fname = '{}-{}.yaml'.format(stem, hexsha)
dst_file = dest_dir.joinpath(fname)
shutil.copyfile(src_file.as_posix(), dst_file.as_posix())
def write_model(model, tag, dest):
model_savefile = '{0}/agent-{1}.pth'.format(dest, tag)
logger.debug("Saving Agent to {}".format(model_savefile))
torch.save(model.state_dict(), model_savefile)
def add_verbosity_parser(parser):
parser.add_argument('-l', '--log', dest='log', choices=['info', 'debug'],
default='info', help='Set verbosity for the logger')
return parser
def validate_config(cfgs):
assert cfgs.get('env'), 'Expected Environment info in config file'
assert cfgs.get('agent'), 'Expected Agent info in config file'
assert cfgs.get('train'), 'Expected Training info in config file'
assert cfgs.get('test'), 'Expected Testing info in config file'
return True
| StarcoderdataPython |
1683566 | import duckdb
from decimal import Decimal
import pytest
def initialize(con):
con.execute("Create Table bla (i integer, j decimal(5,2), k varchar)")
con.execute("insert into bla values (1,2.1,'a'), (2,3.2,'b'), (NULL, NULL, NULL)")
return con.table('bla')
def munge(cell):
try:
cell = round(float(cell), 2)
except (ValueError, TypeError):
cell = str(cell)
return cell
def munge_compare(left_list, right_list):
assert len(left_list) == len(right_list)
for i in range (len(left_list)):
tpl_left = left_list[i]
tpl_right = right_list[i]
assert len(tpl_left) == len(tpl_right)
for j in range (len(tpl_left)):
left_cell = munge(tpl_left[j])
right_cell = munge(tpl_right[j])
assert left_cell == right_cell
def aggregation_generic(aggregation_function,assertion_answers):
assert len(assertion_answers) >=2
# Check single column
print(aggregation_function('i').execute().fetchall())
munge_compare(aggregation_function('i').execute().fetchall(), assertion_answers[0])
# Check multi column
print(aggregation_function('i,j').execute().fetchall() )
munge_compare(aggregation_function('i,j').execute().fetchall(), assertion_answers[1])
if len(assertion_answers) < 3:
# Shouldn't be able to aggregate on string
with pytest.raises(Exception, match='No function matches the given name'):
aggregation_function('k').execute().fetchall()
else:
print (aggregation_function('k').execute().fetchall())
munge_compare( aggregation_function('k').execute().fetchall(), assertion_answers[2])
# Check empty
with pytest.raises(Exception, match='incompatible function arguments'):
aggregation_function().execute().fetchall()
# Check Null
with pytest.raises(Exception, match='incompatible function arguments'):
aggregation_function(None).execute().fetchall()
# Check broken
with pytest.raises(Exception, match='Referenced column "nonexistant" not found'):
aggregation_function('nonexistant').execute().fetchall()
class TestRAPIAggregations(object):
def test_sum(self, duckdb_cursor):
rel = initialize(duckdb_cursor)
aggregation_generic(rel.sum,[[(3,)], [(3, Decimal('5.30'))]])
duckdb_cursor.execute("drop table bla")
def test_count(self, duckdb_cursor):
rel = initialize(duckdb_cursor)
aggregation_generic(rel.count,[[(2,)], [(2,2)], [(2,)]])
duckdb_cursor.execute("drop table bla")
def test_median(self, duckdb_cursor):
rel = initialize(duckdb_cursor)
# is this supposed to accept strings?
aggregation_generic(rel.median,[[(1.5,)], [(1.5, Decimal('2.10'))], [('a',)]])
duckdb_cursor.execute("drop table bla")
def test_min(self, duckdb_cursor):
rel = initialize(duckdb_cursor)
aggregation_generic(rel.min,[[(1,)], [(1, Decimal('2.10'))], [('a',)]])
duckdb_cursor.execute("drop table bla")
def test_max(self, duckdb_cursor):
rel = initialize(duckdb_cursor)
aggregation_generic(rel.max,[[(2,)], [(2, Decimal('3.2'))], [('b',)]])
duckdb_cursor.execute("drop table bla")
def test_mean(self, duckdb_cursor):
rel = initialize(duckdb_cursor)
aggregation_generic(rel.mean,[[(1.5,)], [(1.5, 2.65)]])
duckdb_cursor.execute("drop table bla")
def test_var(self, duckdb_cursor):
rel = initialize(duckdb_cursor)
aggregation_generic(rel.var,[[(0.25,)], [(0.25, 0.30249999999999994)]])
duckdb_cursor.execute("drop table bla")
def test_std(self, duckdb_cursor):
rel = initialize(duckdb_cursor)
aggregation_generic(rel.std,[[(0.5,)], [(0.5, 0.5499999999999999)]])
duckdb_cursor.execute("drop table bla")
def test_apply(self, duckdb_cursor):
rel = initialize(duckdb_cursor)
rel.apply('sum', 'i').execute().fetchone() == (3,)
duckdb_cursor.execute("drop table bla")
def test_quantile(self, duckdb_cursor):
rel = initialize(duckdb_cursor)
extra_param = '0.5'
aggregation_function = rel.quantile
# Check single column
assert aggregation_function(extra_param,'i').execute().fetchone() == (1,)
# Check multi column
assert aggregation_function(extra_param,'i,j').execute().fetchone() == (1, Decimal('2.10'))
assert aggregation_function(extra_param,'k').execute().fetchone() == ('a',)
# Check empty
with pytest.raises(Exception, match='incompatible function arguments'):
aggregation_function().execute().fetchone()
# Check Null
with pytest.raises(Exception, match='incompatible function arguments'):
aggregation_function(None).execute().fetchone()
# Check broken
with pytest.raises(Exception, match='incompatible function arguments.'):
aggregation_function('bla').execute().fetchone()
duckdb_cursor.execute("drop table bla")
def test_value_counts(self, duckdb_cursor):
con = duckdb.connect()
rel = initialize(con)
con.execute("insert into bla values (1,2.1,'a'), (NULL, NULL, NULL)")
munge_compare(rel.value_counts('i').execute().fetchall(),[(None, 0), (1, 2), (2, 1)])
with pytest.raises(Exception, match='Only one column is accepted'):
rel.value_counts('i,j').execute().fetchall()
def test_length(self, duckdb_cursor):
con = duckdb.connect()
rel = initialize(con)
assert len(rel) == 3
def test_shape(self, duckdb_cursor):
rel = initialize(duckdb_cursor)
assert rel.shape == (3, 3)
duckdb_cursor.execute("drop table bla")
def test_unique(self, duckdb_cursor):
rel = initialize(duckdb_cursor)
aggregation_generic(rel.unique,[[(1,), (2,), (None,)], [(1, Decimal('2.10')), (2, Decimal('3.20')), (None, None)],[('a',), ('b',), (None,)]])
duckdb_cursor.execute("drop table bla")
def test_mad(self, duckdb_cursor):
rel = initialize(duckdb_cursor)
aggregation_generic(rel.mad,[[(0.5,)], [(0.5, Decimal('0.55'))]])
duckdb_cursor.execute("drop table bla")
def test_mode(self, duckdb_cursor):
rel = initialize(duckdb_cursor)
aggregation_generic(rel.mode,[[(1,)], [(1, Decimal('2.10'))],[('a',)]])
duckdb_cursor.execute("drop table bla")
def test_abs(self, duckdb_cursor):
rel = initialize(duckdb_cursor)
aggregation_generic(rel.abs,[[(1,), (2,), (None,)], [(1, Decimal('2.10')), (2, Decimal('3.20')), (None, None)]])
duckdb_cursor.execute("drop table bla")
def test_prod(self, duckdb_cursor):
rel = initialize(duckdb_cursor)
aggregation_generic(rel.prod,[[(2.0,)], [(2.0, 6.720000000000001)]])
duckdb_cursor.execute("drop table bla")
def test_skew(self, duckdb_cursor):
rel = initialize(duckdb_cursor)
aggregation_generic(rel.skew,[[(None,)], [(None, None)]])
duckdb_cursor.execute("create table aggr(k int, v decimal(10,2), v2 decimal(10, 2));")
duckdb_cursor.execute("""insert into aggr values
(1, 10, null),
(2, 10, 11),
(2, 10, 15),
(2, 10, 18),
(2, 20, 22),
(2, 20, 25),
(2, 25, null),
(2, 30, 35),
(2, 30, 40),
(2, 30, 50),
(2, 30, 51);""")
rel = duckdb_cursor.table('aggr')
munge_compare(rel.skew('k,v,v2').execute().fetchall(),[(-3.316624790355393, -0.16344366935199223, 0.3654008511025841)])
duckdb_cursor.execute("drop table aggr")
duckdb_cursor.execute("drop table bla")
def test_kurt(self, duckdb_cursor):
rel = initialize(duckdb_cursor)
aggregation_generic(rel.kurt,[[(None,)], [(None, None)]])
duckdb_cursor.execute("create table aggr(k int, v decimal(10,2), v2 decimal(10, 2));")
duckdb_cursor.execute("""insert into aggr values
(1, 10, null),
(2, 10, 11),
(2, 10, 15),
(2, 10, 18),
(2, 20, 22),
(2, 20, 25),
(2, 25, null),
(2, 30, 35),
(2, 30, 40),
(2, 30, 50),
(2, 30, 51);""")
rel = duckdb_cursor.table('aggr')
munge_compare(rel.kurt('k,v,v2').execute().fetchall(),[(10.99999999999836, -1.9614277138467147, -1.445119691585509)])
duckdb_cursor.execute("drop table aggr")
duckdb_cursor.execute("drop table bla")
def test_cum_sum(self, duckdb_cursor):
rel = initialize(duckdb_cursor)
aggregation_generic(rel.cumsum,[[(1,), (3,), (3,)], [(1, Decimal('2.10')), (3, Decimal('5.30')), (3, Decimal('5.30'))]])
duckdb_cursor.execute("drop table bla")
def test_cum_prod(self, duckdb_cursor):
rel = initialize(duckdb_cursor)
aggregation_generic(rel.cumprod,[[(1.0,), (2.0,), (2.0,)], [(1.0, 2.1), (2.0, 6.720000000000001), (2.0, 6.720000000000001)]])
duckdb_cursor.execute("drop table bla")
def test_cum_max(self, duckdb_cursor):
rel = initialize(duckdb_cursor)
aggregation_generic(rel.cummax,[[(1,), (2,), (2,)], [(1, Decimal('2.10')), (2, Decimal('3.20')), (2, Decimal('3.20'))], [('a',), ('b',), ('b',)]])
duckdb_cursor.execute("drop table bla")
def test_cum_min(self, duckdb_cursor):
rel = initialize(duckdb_cursor)
aggregation_generic(rel.cummin,[[(1,), (1,), (1,)], [(1, Decimal('2.10')), (1, Decimal('2.10')), (1, Decimal('2.10'))], [('a',), ('a',), ('a',)]])
duckdb_cursor.execute("drop table bla")
def test_cum_sem(self, duckdb_cursor):
rel = initialize(duckdb_cursor)
aggregation_generic(rel.sem,[[(0.35355339059327373,)], [(0.35355339059327373, 0.38890872965260104)]])
duckdb_cursor.execute("drop table bla")
def test_describe(self, duckdb_cursor):
rel = initialize(duckdb_cursor)
assert rel.describe().fetchall() == [('[Min: 1, Max: 2][Has Null: true, Has No Null: true][Approx Unique: 3]', '[Min: 2.10, Max: 3.20][Has Null: true, Has No Null: true][Approx Unique: 3]', '[Min: a, Max: b, Has Unicode: false, Max String Length: 1][Has Null: true, Has No Null: true][Approx Unique: 3]')]
duckdb_cursor.execute("drop table bla")
| StarcoderdataPython |
3283302 | <reponame>ysharma12/Food-Name-Classification-and-Ingredients-Prediction
from imports import*
import utils
class dai_image_csv_dataset(Dataset):
def __init__(self, data_dir, data, transforms_ = None, obj = False,
minorities = None, diffs = None, bal_tfms = None):
super(dai_image_csv_dataset, self).__init__()
self.data_dir = data_dir
self.data = data
self.transforms_ = transforms_
self.tfms = None
self.obj = obj
self.minorities = minorities
self.diffs = diffs
self.bal_tfms = bal_tfms
assert transforms_ is not None, print('Please pass some transforms.')
def __len__(self):
return len(self.data)
def __getitem__(self, index):
img_path = os.path.join(self.data_dir,self.data.iloc[index, 0])
img = Image.open(img_path)
img = img.convert('RGB')
# img = torchvision.transforms.functional.to_grayscale(img,num_output_channels=3)
y = self.data.iloc[index, 1]
if self.minorities and self.bal_tfms:
if y in self.minorities:
if hasattr(self.bal_tfms,'transforms'):
for tr in self.bal_tfms.transforms:
tr.p = self.diffs[y]
l = [self.bal_tfms]
l.extend(self.transforms_)
self.tfms = transforms.Compose(l)
else:
for t in self.bal_tfms:
t.p = self.diffs[y]
self.transforms_[1:1] = self.bal_tfms
self.tfms = transforms.Compose(self.transforms_)
# print(self.tfms)
else:
self.tfms = transforms.Compose(self.transforms_)
else:
self.tfms = transforms.Compose(self.transforms_)
x = self.tfms(img)
# if self.obj:
# s = x.size()[1]
# if isinstance(s,tuple):
# s = s[0]
# row_scale = s/img.size[0]
# col_scale = s/img.size[1]
# y = rescale_bbox(y,row_scale,col_scale)
# y.squeeze_()
# y2 = self.data.iloc[index, 2]
# y = (y,y2)
return (x,y)
class dai_image_csv_dataset_food(Dataset):
def __init__(self, data_dir, data, transforms_ = None, obj = False,
minorities = None, diffs = None, bal_tfms = None):
super(dai_image_csv_dataset_food, self).__init__()
self.data_dir = data_dir
self.data = data
self.transforms_ = transforms_
self.tfms = None
self.obj = obj
self.minorities = minorities
self.diffs = diffs
self.bal_tfms = bal_tfms
assert transforms_ is not None, print('Please pass some transforms.')
def __len__(self):
return len(self.data)
def __getitem__(self, index):
img_path = os.path.join(self.data_dir,self.data.iloc[index, 0])
img = Image.open(img_path)
img = img.convert('RGB')
y1,y2 = self.data.iloc[index, 1],self.data.iloc[index, 2]
self.tfms = transforms.Compose(self.transforms_)
x = self.tfms(img)
return (x,y1,y2)
class dai_image_dataset(Dataset):
def __init__(self, data_dir, data_df, input_transforms = None, target_transforms = None):
super(dai_image_dataset, self).__init__()
self.data_dir = data_dir
self.data_df = data_df
self.input_transforms = None
self.target_transforms = None
if input_transforms:
self.input_transforms = transforms.Compose(input_transforms)
if target_transforms:
self.target_transforms = transforms.Compose(target_transforms)
def __len__(self):
return len(self.data_df)
def __getitem__(self, index):
img_path = os.path.join(self.data_dir,self.data_df.iloc[index, 0])
img = Image.open(img_path)
img = img.convert('RGB')
target = img.copy()
if self.input_transforms:
img = self.input_transforms(img)
if self.target_transforms:
target = self.target_transforms(target)
return img, target
def listdir_fullpath(d):
return [os.path.join(d, f) for f in os.listdir(d)]
def get_minorities(df,thresh=0.8):
c = df.iloc[:,1].value_counts()
lc = list(c)
max_count = lc[0]
diffs = [1-(x/max_count) for x in lc]
diffs = dict((k,v) for k,v in zip(c.keys(),diffs))
minorities = [c.keys()[x] for x,y in enumerate(lc) if y < (thresh*max_count)]
return minorities,diffs
def csv_from_path(path, img_dest):
path = Path(path)
img_dest = Path(img_dest)
labels_paths = list(path.iterdir())
tr_images = []
tr_labels = []
for l in labels_paths:
if l.is_dir():
for i in list(l.iterdir()):
if i.suffix in IMG_EXTENSIONS:
name = i.name
label = l.name
new_name = '{}_{}_{}'.format(path.name,label,name)
new_path = img_dest/new_name
# print(new_path)
os.rename(i,new_path)
tr_images.append(new_name)
tr_labels.append(label)
# os.rmdir(l)
tr_img_label = {'Img':tr_images, 'Label': tr_labels}
csv = pd.DataFrame(tr_img_label,columns=['Img','Label'])
csv = csv.sample(frac=1).reset_index(drop=True)
return csv
def add_extension(a,e):
a = [x+e for x in a]
return a
def one_hot(targets, multi = False):
if multi:
binerizer = MultiLabelBinarizer()
dai_1hot = binerizer.fit_transform(targets)
else:
binerizer = LabelBinarizer()
dai_1hot = binerizer.fit_transform(targets)
return dai_1hot,binerizer.classes_
def get_img_stats(dataset,sz):
print('Calculating mean and std of the data for standardization. Might take some time, depending on the training data size.')
size = int(len(dataset)*sz)
i = 0
imgs = []
for d in dataset:
img = d[0]
# print(img.size())
if i > size:
break
imgs.append(img)
i+=1
imgs_ = torch.stack(imgs,dim=3)
imgs_ = imgs_.view(3,-1)
imgs_mean = imgs_.mean(dim=1)
imgs_std = imgs_.std(dim=1)
del imgs
del imgs_
print('Done')
return imgs_mean,imgs_std
def split_df(train_df,test_size = 0.15):
try:
train_df,val_df = train_test_split(train_df,test_size = test_size,random_state = 2,stratify = train_df.iloc[:,1])
except:
train_df,val_df = train_test_split(train_df,test_size = test_size,random_state = 2)
train_df = train_df.reset_index(drop = True)
val_df = val_df.reset_index(drop = True)
return train_df,val_df
def save_obj(path,obj):
with open(path, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(path):
with open(path, 'rb') as f:
return pickle.load(f)
class DataProcessor:
def __init__(self, data_path = None, train_csv = None, val_csv = None,test_csv = None,
tr_name = 'train', val_name = 'val', test_name = 'test', extension = None, setup_data = True):
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
(self.data_path,self.train_csv,self.val_csv,self.test_csv,
self.tr_name,self.val_name,self.test_name,self.extension) = (data_path,train_csv,val_csv,test_csv,
tr_name,val_name,test_name,extension)
self.multi_label = False
self.single_label = False
self.img_mean = self.img_std = None
self.data_dir,self.num_classes,self.class_names = data_path,0,[]
if setup_data:
self.set_up_data()
def set_up_data(self,split_size = 0.15):
(data_path,train_csv,val_csv,test_csv,tr_name,val_name,test_name) = (self.data_path,self.train_csv,self.val_csv,self.test_csv,
self.tr_name,self.val_name,self.test_name)
# check if paths given and also set paths
if not data_path:
data_path = os.getcwd() + '/'
tr_path = os.path.join(data_path,tr_name)
val_path = os.path.join(data_path,val_name)
test_path = os.path.join(data_path,test_name)
if (os.path.exists(os.path.join(data_path,tr_name+'.csv'))) and train_csv is None:
train_csv = tr_name+'.csv'
# if os.path.exists(os.path.join(data_path,val_name+'.csv')):
# val_csv = val_name+'.csv'
# if os.path.exists(os.path.join(data_path,test_name+'.csv')):
# test_csv = test_name+'.csv'
# paths to csv
if not train_csv:
# print('no')
train_csv,val_csv,test_csv = self.data_from_paths_to_csv(data_path,tr_path,val_path,test_path)
train_csv_path = os.path.join(data_path,train_csv)
train_df = pd.read_csv(train_csv_path)
if 'Unnamed: 0' in train_df.columns:
train_df = train_df.drop('Unnamed: 0', 1)
if len(train_df.columns) > 2:
self.obj = True
img_names = [str(x) for x in list(train_df.iloc[:,0])]
if self.extension:
img_names = add_extension(img_names,self.extension)
if val_csv:
val_csv_path = os.path.join(data_path,val_csv)
val_df = pd.read_csv(val_csv_path)
val_targets = list(val_df.iloc[:,1].apply(lambda x: str(x)))
if test_csv:
test_csv_path = os.path.join(data_path,test_csv)
test_df = pd.read_csv(test_csv_path)
test_targets = list(test_df.iloc[:,1].apply(lambda x: str(x)))
targets = list(train_df.iloc[:,1].apply(lambda x: str(x)))
lengths = [len(t) for t in [s.split() for s in targets]]
self.target_lengths = lengths
split_targets = [t.split() for t in targets]
if lengths[1:] != lengths[:-1]:
self.multi_label = True
# print('\nMulti-label Classification\n')
try:
split_targets = [list(map(int,x)) for x in split_targets]
except:
pass
dai_onehot,onehot_classes = one_hot(split_targets,self.multi_label)
train_df.iloc[:,1] = [torch.from_numpy(x).type(torch.FloatTensor) for x in dai_onehot]
self.data_dir,self.num_classes,self.class_names = data_path,len(onehot_classes),onehot_classes
else:
# print('\nSingle-label Classification\n')
self.single_label = True
unique_targets = list(np.unique(targets))
unique_targets_dict = {k:v for v,k in enumerate(unique_targets)}
train_df.iloc[:,1] = pd.Series(targets).apply(lambda x: unique_targets_dict[x])
if val_csv:
val_df.iloc[:,1] = pd.Series(val_targets).apply(lambda x: unique_targets_dict[x])
if test_csv:
test_df.iloc[:,1] = pd.Series(test_targets).apply(lambda x: unique_targets_dict[x])
self.data_dir,self.num_classes,self.class_names = data_path,len(unique_targets),unique_targets
if not val_csv:
train_df,val_df = split_df(train_df,split_size)
if not test_csv:
val_df,test_df = split_df(val_df,split_size)
tr_images = [str(x) for x in list(train_df.iloc[:,0])]
val_images = [str(x) for x in list(val_df.iloc[:,0])]
test_images = [str(x) for x in list(test_df.iloc[:,0])]
if self.extension:
tr_images = add_extension(tr_images,self.extension)
val_images = add_extension(val_images,self.extension)
test_images = add_extension(test_images,self.extension)
train_df.iloc[:,0] = tr_images
val_df.iloc[:,0] = val_images
test_df.iloc[:,0] = test_images
if self.single_label:
dai_df = pd.concat([train_df,val_df,test_df])
dai_df.iloc[:,1] = [self.class_names[x] for x in dai_df.iloc[:,1]]
dai_df.to_csv(os.path.join(data_path,'dai_df.csv'),index=False)
train_df.to_csv(os.path.join(data_path,'{}.csv'.format(self.tr_name)),index=False)
val_df.to_csv(os.path.join(data_path,'{}.csv'.format(self.val_name)),index=False)
test_df.to_csv(os.path.join(data_path,'{}.csv'.format(self.test_name)),index=False)
self.minorities,self.class_diffs = None,None
if self.single_label:
self.minorities,self.class_diffs = get_minorities(train_df)
self.data_dfs = {self.tr_name:train_df, self.val_name:val_df, self.test_name:test_df}
data_dict = {'data_dfs':self.data_dfs,'data_dir':self.data_dir,'num_classes':self.num_classes,'class_names':self.class_names,
'minorities':self.minorities,'class_diffs':self.class_diffs,'single_label':self.single_label,'multi_label':self.multi_label}
self.data_dict = data_dict
return data_dict
def data_from_paths_to_csv(self,data_path,tr_path,val_path = None,test_path = None):
train_df = csv_from_path(tr_path,tr_path)
train_df.to_csv(os.path.join(data_path,self.tr_name+'.csv'),index=False)
ret = (self.tr_name+'.csv',None,None)
if val_path is not None:
val_exists = os.path.exists(val_path)
if val_exists:
val_df = csv_from_path(val_path,tr_path)
val_df.to_csv(os.path.join(data_path,self.val_name+'.csv'),index=False)
ret = (self.tr_name+'.csv',self.val_name+'.csv',None)
if test_path is not None:
test_exists = os.path.exists(test_path)
if test_exists:
test_df = csv_from_path(test_path,tr_path)
test_df.to_csv(os.path.join(data_path,self.test_name+'.csv'),index=False)
ret = (self.tr_name+'.csv',self.val_name+'.csv',self.test_name+'.csv')
return ret
def get_data(self, data_dict = None, s = (224,224), dataset = dai_image_csv_dataset, bs = 32, balance = False, tfms = None,bal_tfms = None,
num_workers = 8, stats_percentage = 0.6, img_mean = None, img_std = None):
self.image_size = s
if not data_dict:
data_dict = self.data_dict
data_dfs,data_dir,minorities,class_diffs,single_label,multi_label = (data_dict['data_dfs'],data_dict['data_dir'],
data_dict['minorities'],data_dict['class_diffs'],
data_dict['single_label'],data_dict['multi_label'])
if not single_label:
balance = False
if not bal_tfms:
bal_tfms = { self.tr_name: [transforms.RandomHorizontalFlip()],
self.val_name: None,
self.test_name: None
}
else:
bal_tfms = {self.tr_name: bal_tfms, self.val_name: None, self.test_name: None}
resize_transform = transforms.Resize(s,interpolation=Image.NEAREST)
if not tfms:
tfms = [
resize_transform,
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
else:
tfms_temp = [
resize_transform,
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
tfms_temp[1:1] = tfms
tfms = tfms_temp
# print(tfms)
data_transforms = {
self.tr_name: tfms,
self.val_name: [
# transforms.Resize(s[0]+50),
# transforms.CenterCrop(s[0]),
transforms.Resize(s,interpolation=Image.NEAREST),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
],
self.test_name: [
# transforms.Resize(s[0]+50),
# transforms.CenterCrop(s[0]),
transforms.Resize(s,interpolation=Image.NEAREST),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
}
if img_mean is None and self.img_mean is None:
temp_tfms = [resize_transform, transforms.ToTensor()]
temp_dataset = dataset(os.path.join(data_dir,self.tr_name),data_dfs[self.tr_name],temp_tfms)
self.img_mean,self.img_std = get_img_stats(temp_dataset,stats_percentage)
elif self.img_mean is None:
self.img_mean,self.img_std = img_mean,img_std
data_transforms[self.tr_name][-1].mean,data_transforms[self.tr_name][-1].std = self.img_mean,self.img_std
data_transforms[self.val_name][-1].mean,data_transforms[self.val_name][-1].std = self.img_mean,self.img_std
data_transforms[self.test_name][-1].mean,data_transforms[self.test_name][-1].std = self.img_mean,self.img_std
if balance:
image_datasets = {x: dataset(os.path.join(data_dir,self.tr_name),data_dfs[x],
data_transforms[x],minorities,class_diffs,bal_tfms[x])
for x in [self.tr_name, self.val_name, self.test_name]}
else:
image_datasets = {x: dataset(os.path.join(data_dir,self.tr_name),data_dfs[x],
data_transforms[x])
for x in [self.tr_name, self.val_name, self.test_name]}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=bs,
shuffle=True, num_workers=num_workers)
for x in [self.tr_name, self.val_name, self.test_name]}
dataset_sizes = {x: len(image_datasets[x]) for x in [self.tr_name, self.val_name, self.test_name]}
self.image_datasets,self.dataloaders,self.dataset_sizes = (image_datasets,dataloaders,
dataset_sizes)
return image_datasets,dataloaders,dataset_sizes | StarcoderdataPython |
1737529 | <reponame>Capping-WAR/API
import connexion
import six
from swagger_server.models.request_info import RequestInfo # noqa: E501
from swagger_server.models.rule import Rule # noqa: E501
from swagger_server import util
from swagger_server.__globals__ import _globals
def add_rule(Rule): # noqa: E501
"""Add a Rule
# noqa: E501
:param Rule: Rule to be added
:type Rule: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
values = list(Rule.values())
cols = ','.join(list(Rule.keys()))
results = _globals.pgapi.insert('Rules', values, cols=cols)
if type(results) != list:
results = str(results)
return results
def delete_rule(ruleID): # noqa: E501
"""Delete a Rule
# noqa: E501
:param ruleID: ID of Rule
:type ruleID: int
:rtype: None
"""
results = _globals.pgapi.delete(
'Rules',
clause=f'WHERE ruleID={ruleID}'
)
if type(results) != list:
results = str(results)
return results
def get_rule_by_id(ruleID): # noqa: E501
"""Get a Rule by ruleID
# noqa: E501
:param ruleID: ID of Rule
:type ruleID: int
:rtype: List[Rule]
"""
results = _globals.pgapi.get(
'Rules',
clause=f'WHERE ruleID={ruleID}'
)
if type(results) != list:
results = str(results)
return {'Rule':results}
def get_rules(): # noqa: E501
"""Get all Rules
# noqa: E501
:rtype: List[Rule]
"""
results = _globals.pgapi.get(
'Rules',
)
if type(results) != list:
results = str(results)
return {'Rules':results}
def update_rule(ruleID, Rule): # noqa: E501
"""Update a Rule
# noqa: E501
:param ruleID: ID of Rule
:type ruleID: int
:param Rule: Updated Rule
:type Rule: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
results = _globals.pgapi.update(
'Rules',
Rule,
clause=f'WHERE ruleID={ruleID}'
)
if type(results) != list:
results = str(results)
return results | StarcoderdataPython |
3373764 | <reponame>arpancodes/pyre-check
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import dataclasses
import json
import logging
from pathlib import Path
from typing import TextIO
from .. import configuration as configuration_module, log
from . import commands, server_connection, remote_logging
LOG: logging.Logger = logging.getLogger(__name__)
HELP_MESSAGE: str = """
Possible queries:
- attributes(class_name)
Returns a list of attributes, including functions, for a class.
- batch(query1(arg), query2(arg))
Runs a batch of queries and returns a map of responses. List of given queries
may include any combination of other valid queries except for `batch` itself.
- callees(function)
Calls from a given function.
- callees_with_location(function)
Calls from a given function, including the locations at which they are called.
- defines(module_or_class_name)
Returns a JSON with the signature of all defines for given module or class.
- dump_call_graph()
Returns a comprehensive JSON of caller -> list of callees.
- inline_decorators(qualified_function_name, decorators_to_skip=[decorator1, ...])
Returns the function definition after inlining decorators.
Allows skipping certain decorators when inlining.
- less_or_equal(T1, T2)
Returns whether T1 is a subtype of T2.
- path_of_module(module)
Gives an absolute path for `module`.
- save_server_state('path')
Saves Pyre's serialized state into `path`.
- superclasses(class_name1, class_name2, ...)
Returns a mapping of class_name to the list of superclasses for `class_name`.
If no class name is provided, return the mapping for all classes Pyre knows about.
- type(expression)
Evaluates the type of `expression`.
- types(path='path') or types('path1', 'path2', ...)
Returns a map from each given path to a list of all types for that path.
- validate_taint_models('optional path')
Validates models and returns errors.
Defaults to model path in configuration if no parameter is passed in.
"""
class InvalidQueryResponse(Exception):
pass
@dataclasses.dataclass(frozen=True)
class Response:
payload: object
def _print_help_message() -> None:
log.stdout.write(HELP_MESSAGE)
def parse_query_response_json(response_json: object) -> Response:
if (
isinstance(response_json, list)
and len(response_json) > 1
and response_json[0] == "Query"
):
return Response(response_json[1])
raise InvalidQueryResponse(f"Unexpected JSON response from server: {response_json}")
def parse_query_response(text: str) -> Response:
try:
response_json = json.loads(text)
return parse_query_response_json(response_json)
except json.JSONDecodeError as decode_error:
message = f"Cannot parse response as JSON: {decode_error}"
raise InvalidQueryResponse(message) from decode_error
def _send_query_request(output_channel: TextIO, query_text: str) -> None:
query_message = json.dumps(["Query", query_text])
LOG.debug(f"Sending `{log.truncate(query_message, 400)}`")
output_channel.write(f"{query_message}\n")
def _receive_query_response(input_channel: TextIO) -> Response:
query_message = input_channel.readline().strip()
LOG.debug(f"Received `{log.truncate(query_message, 400)}`")
return parse_query_response(query_message)
def query_server(socket_path: Path, query_text: str) -> Response:
with server_connection.connect_in_text_mode(socket_path) as (
input_channel,
output_channel,
):
_send_query_request(output_channel, query_text)
return _receive_query_response(input_channel)
@remote_logging.log_usage(command_name="query")
def run(
configuration: configuration_module.Configuration, query_text: str
) -> commands.ExitCode:
socket_path = server_connection.get_default_socket_path(
project_root=Path(configuration.project_root),
relative_local_root=Path(configuration.relative_local_root)
if configuration.relative_local_root
else None,
)
try:
if query_text == "help":
_print_help_message()
return commands.ExitCode.SUCCESS
response = query_server(socket_path, query_text)
log.stdout.write(json.dumps(response.payload))
return commands.ExitCode.SUCCESS
except server_connection.ConnectionFailure:
LOG.warning(
"A running Pyre server is required for queries to be responded. "
"Please run `pyre` first to set up a server."
)
return commands.ExitCode.SERVER_NOT_FOUND
except Exception as error:
raise commands.ClientException(
f"Exception occured during pyre query: {error}"
) from error
| StarcoderdataPython |
35862 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from time import strftime, gmtime
from email.header import make_header
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from .utils import strip_tags, format_email_address
from .attachment import Attachment
from .compat import unicode_compatible, to_unicode, to_string, PY3
@unicode_compatible
class PlainMessage(object):
"""Simple wrapper for data of e-mail message with plain text."""
_PREAMBLE_TEXT = "This is a multi-part message in MIME format."
def __init__(self, sender, subject, content, charset="utf-8"):
self._sender = format_email_address(sender)
self._charset = to_string(charset)
self._content = to_unicode(content)
self._subject = to_unicode(subject)
self._attachments = []
self._recipients = {"To": [], "Cc": [], "Bcc": []}
@property
def sender(self):
return self._sender
@property
def subject(self):
return self._subject
@property
def recipients(self):
to = self._recipients["To"]
cc = self._recipients["Cc"]
bcc = self._recipients["Bcc"]
return frozenset(to + cc + bcc)
def add_recipients(self, *recipients):
recipients = self._unique_recipients(recipients)
self._recipients["To"].extend(recipients)
def add_recipients_cc(self, *recipients):
recipients = self._unique_recipients(recipients)
self._recipients["Cc"].extend(recipients)
def add_recipients_bcc(self, *recipients):
recipients = self._unique_recipients(recipients)
self._recipients["Bcc"].extend(recipients)
def _unique_recipients(self, recipients):
recipients = map(format_email_address, recipients)
return frozenset(recipients) - self.recipients
@property
def content(self):
return self._content
@property
def payload(self):
payload = self._build_content_payload(self._content)
if self._attachments:
content_payload = payload
payload = MIMEMultipart("mixed")
payload.attach(content_payload)
payload.preamble = self._PREAMBLE_TEXT
payload = self._set_payload_headers(payload)
for attachment in self._attachments:
payload.attach(attachment.payload)
return payload
def _build_content_payload(self, content):
return MIMEText(content.encode(self._charset), "plain", self._charset)
def _set_payload_headers(self, payload):
for copy_type, recipients in self._recipients.items():
for recipient in recipients:
payload[copy_type] = self._make_header(recipient)
payload["From"] = self._make_header(self._sender)
payload["Subject"] = self._make_header(self._subject)
payload["Date"] = strftime("%a, %d %b %Y %H:%M:%S %z", gmtime())
return payload
def _make_header(self, value):
return make_header([(self._to_string(value), self._charset)])
def _to_string(self, value):
if PY3:
return value
else:
return value.encode(self._charset)
def attach(self, file, charset=None, mimetype=None):
if charset is None:
charset = self._charset
attachment = Attachment(file, charset, mimetype)
self._attachments.append(attachment)
return attachment
if PY3:
def __str__(self):
return self.payload.as_string()
else:
def __bytes__(self):
return self.payload.as_string()
def __repr__(self):
return to_string("<PlainMessage: %s>" % self.subject)
class HtmlMessage(PlainMessage):
"""Simple wrapper for data of e-mail message with HTML content."""
def _build_content_payload(self, content):
content = content.encode(self._charset)
payload = MIMEMultipart("alternative", charset=self._charset)
text_alternative = MIMEText(strip_tags(content), "plain", self._charset)
payload.attach(text_alternative)
html_alternative = MIMEText(content, "html", self._charset)
payload.attach(html_alternative)
return payload
| StarcoderdataPython |
1789830 | import json
import random
import glob
import os
import string
from collections import OrderedDict
questions = {}
user_answers = OrderedDict()
current_question = None
current_solution = None
formatted_solution = None
possible_solutions = None
requires = ['db']
def start_quiz(bot, c, e, args):
# TODO: I assume global state breaks if the bot is in more than one channel
global current_question
global current_solution
global formatted_solution
global possible_solutions
global questions
global user_answers
if current_solution:
bot.reply(c, e, 'There already is a quiz running.')
return
elif e.target not in bot.channels:
# TODO: Should also be usable somewhere else
bot.reply(c, e, 'This command can only be used in channels.')
return
elif len(args) < 1:
datasets = ', '.join(questions.keys())
bot.reply(c, e, 'Please specify a dataset name: %s' % datasets)
return
bot.logger.debug('Starting quiz')
dataset = args[0]
if dataset not in questions:
bot.reply(c, e, 'Dataset "%s" does not exist' % dataset)
return
current_question = question = random.choice(questions[dataset])
timeout = question['time'] if 'time' in question else 20 + 25 * question['level']
full_question = '%s (%d secs)' % (question['question'], timeout)
if 'options' in question:
full_question += ': ' + ', '.join([string.ascii_lowercase[i] + ') ' + option for i, option in enumerate(question['options'])])
bot.reply(c, e, full_question)
current_solution = question['answers'].copy()
if 'options' in question:
current_solution += [string.ascii_lowercase[i] for i, answer in enumerate(question['options']) if answer in question['answers']]
formatted_solution = ', '.join([string.ascii_lowercase[i] + ') ' + answer for i, answer in enumerate(question['options']) if answer in question['answers']])
possible_solutions = question['options'] + list(string.ascii_lowercase[:len(question['options'])])
else:
formatted_solution = ', '.join(question['answers'])
possible_solutions = question['answers']
user_answers = OrderedDict()
bot.hook_timeout(timeout, end_quiz, c, e)
def save_answers(bot, c, e, matches):
global current_solution
global possible_solutions
if not current_solution:
return
username, _, _ = e.source.partition('!')
answer = e.arguments[0]
if answer.lower() in map(str.lower, possible_solutions):
if username in user_answers:
user_answers.move_to_end(username)
user_answers[username] = e.arguments[0]
def end_quiz(bot, c, e):
global formatted_solution
global current_question
global current_solution
global user_answers
correct_users = [user for user, answer in user_answers.items()
if answer.lower() in map(str.lower, current_solution)]
# Scores: If there were correct answers, in total we reward as many points as users participated in the quiz.
# The user with the first correct answer is rewarded 2 * base_score points, all other correct users are rewarded base_score points each.
# This scheme incentivizes that many users participate, and correctly answering difficult questions (user answers are spread out evenly among the options) results in a higher reward. Furthermore the first correct user gets a bonus over imitators.
# Note that this does not work well when a question does not have any 'options', as the number of participants cannot be measured.
base_score = len(user_answers) / (1 + len(correct_users))
res = 'Quiz has ended. Correct solution is: %s (%d %swere right).' \
% (formatted_solution, len(correct_users),
'out of ' + str(len(user_answers)) + ' ' if 'options' in current_question else '')
if correct_users:
res += ' %.1f p. for %s' % (2 * base_score, prevent_highlight(correct_users[0]))
if len(correct_users) > 1:
res += ' (first), %.1f p. for rest' % base_score
res += '.'
bot.reply(c, e, res)
if 'explanation' in current_question and current_question['explanation']:
bot.reply(c, e, current_question['explanation'])
conn = bot.provides['db'].get_conn()
cursor = conn.cursor()
for i, username in enumerate(correct_users):
cursor.execute('''INSERT OR IGNORE INTO quiz_score (username, score)
VALUES (?, ?)''', (username, 0))
score = 2 * base_score if i == 0 else base_score # user with first correct answer gets bonus
cursor.execute('''UPDATE quiz_score SET score = score + ?
WHERE username = ?''', (score, username))
conn.commit()
current_solution = None
def quiz_score(bot, c, e, args):
conn = bot.provides['db'].get_conn()
cursor = conn.cursor()
if len(args) > 0:
username = args[0]
cursor.execute('''SELECT score FROM quiz_score
WHERE username = ?''', (username,))
row = cursor.fetchone()
score = row[0] if row else 0
bot.reply(c, e, '%s: %d' % (prevent_highlight(username), score))
else:
cursor.execute('''SELECT username, score FROM quiz_score
ORDER BY score DESC
LIMIT 10''')
score_outs = []
for username, score in cursor:
score_outs.append('%s: %d' % (prevent_highlight(username), score))
bot.reply(c, e, ', '.join(score_outs))
def load_module(bot):
global questions
conn = bot.provides['db'].get_conn()
cur = conn.cursor()
cur.execute('''CREATE TABLE IF NOT EXISTS quiz_score (
username TEXT NOT NULL UNIQUE,
score INTEGER NOT NULL
)''')
conn.commit()
for filepath in glob.glob('data/quiz/*.json'):
set_name = os.path.splitext(os.path.basename(filepath))[0]
data = []
with open(filepath) as f:
for line in f:
if not line.startswith('#') and line.strip(): # not comment or empty line
question = json.loads(line)
# remove empty options (they might exist due to conversion from other formats)
if 'options' in question:
question['options'] = [o for o in question['options'] if o]
data.append(question)
questions[set_name] = data
bot.hook_command('quiz', start_quiz)
bot.hook_command('quiz-score', quiz_score)
bot.hook_regexp('.*', save_answers)
return [hash(start_quiz), hash(quiz_score), hash(save_answers)]
def commands():
return [('quiz', 'Start a new round of a quiz', 'quiz set-name'),
('quiz-score', 'Show the current score. Each quiz rewards as many points as the number of players who participate. The first correct user gets twice the points.', 'quiz-score [user]')]
def prevent_highlight(username):
# use ZERO WIDTH NO-BREAK SPACE so that users' clients don't notify them
return username[0] + '\ufeff' + username[1:]
| StarcoderdataPython |
3215920 | #!/usr/bin/python3
__version__ = '0.0.8' # Time-stamp: <2021-09-14T10:47:01Z>
## Language: Japanese/UTF-8
"""Simulation Buddhism Prototype No.3 - Death
死亡関連
"""
##
## Author:
##
## JRF ( http://jrf.cocolog-nifty.com/statuses/ (in Japanese))
##
## License:
##
## The author is a Japanese.
##
## I intended this program to be public-domain, but you can treat
## this program under the (new) BSD-License or under the Artistic
## License, if it is convenient for you.
##
## Within three months after the release of this program, I
## especially admit responsibility of efforts for rational requests
## of correction to this program.
##
## I often have bouts of schizophrenia, but I believe that my
## intention is legitimately fulfilled.
##
import math
import random
import simbdp3.base as base
from simbdp3.base import ARGS, Person0, Economy0
from simbdp3.common import Death, Tomb, np_clip
from simbdp3.inherit import calc_inheritance_share
class PersonDT (Person0):
def is_dead (self):
return self.death is not None
def die_relation (self, relation):
p = self
rel = relation
economy = self.economy
if p.age > 60:
p.a60_spouse_death = True
rel.end = economy.term
if rel.spouse != '' and economy.is_living(rel.spouse):
s = economy.people[rel.spouse]
if s.marriage is not None and s.marriage.spouse == p.id:
s.marriage.end = economy.term
s.trash.append(s.marriage)
s.marriage = None
for a in s.adulteries:
if a.spouse == p.id:
a.end = economy.term
s.trash.append(a)
s.adulteries.remove(a)
def die_child (self, child_id):
p = self
economy = self.economy
ch = None
for x in p.children:
if x.id == child_id:
ch = x
if ch is None:
return
ch.death_term = economy.term
p.children.remove(ch)
p.trash.append(ch)
def die_supporting (self, new_supporter):
p = self
economy = self.economy
ns = None
if new_supporter is not None \
and new_supporter != '':
assert economy.is_living(new_supporter)
ns = economy.people[new_supporter]
assert new_supporter is None or new_supporter == ''\
or (ns is not None and ns.supported is None)
if new_supporter is None or new_supporter == '':
for x in [x for x in p.supporting]:
if x != '' and x in economy.people:
s = economy.people[x]
assert s.supported == p.id
if new_supporter is None:
s.remove_supported()
else:
s.supported = ''
else:
ns.add_supporting(p.supporting_non_nil())
p.supporting = []
def do_inheritance (self):
p = self
economy = self.economy
assert p.is_dead()
q = p.death.inheritance_share
a = p.prop + p.land * ARGS.prop_value_of_land
if q is None or a <= 0:
economy.cur_forfeit_prop += p.prop
economy.cur_forfeit_land += p.land
p.prop = 0
p.land = 0
return
land = p.land
prop = p.prop
for x, y in sorted(q.items(), key=lambda x: x[1], reverse=True):
a1 = a * y
l = math.floor(a1 / ARGS.prop_value_of_land)
if l > land:
l = land
land = 0
else:
land -= l
if x == '':
economy.cur_forfeit_land += l
economy.cur_forfeit_prop += a1 - l * ARGS.prop_value_of_land
prop -= a1 - l * ARGS.prop_value_of_land
else:
assert economy.is_living(x)
p1 = economy.people[x]
if l > 0:
p1.tmp_land_damage = \
(p1.tmp_land_damage * p1.land
+ p.tmp_land_damage * l) / (p1.land + l)
p1.land += l
p1.prop += a1 - l * ARGS.prop_value_of_land
prop -= a1 - l * ARGS.prop_value_of_land
p.land = 0
p.prop = 0
class EconomyDT (Economy0):
def is_living (self, id_or_person):
s = id_or_person
if type(id_or_person) is not str:
s = id_or_person.id
return s in self.people and self.people[s].death is None
def get_person (self, id1):
economy = self
if id1 in economy.people:
return economy.people[id1]
elif id1 in economy.tombs:
return economy.tombs[id1].person
return None
def die (self, persons):
economy = self
if isinstance(persons, base.Person):
persons = [persons]
for p in persons:
assert not p.is_dead()
dt = Death()
dt.term = economy.term
p.death = dt
tomb = Tomb()
tomb.death_term = economy.term
tomb.person = p
tomb.death_hating = p.hating.copy()
tomb.death_hating_unknown = p.hating_unknown
tomb.death_political_hating = p.political_hating
tomb.death_merchant_hating = p.merchant_hating
tomb.death_merchant_hated = p.merchant_hated
economy.tombs[p.id] = tomb
prs = [[] for dist in economy.nation.districts]
for p in economy.people.values():
if not p.is_dead() and p.in_priesthood():
prs[p.district].append(p.id)
for p in persons:
tomb = economy.tombs[p.id]
if prs[p.district]:
tomb.priest = random.choice(prs[p.district])
a = (p.prop + p.land * ARGS.prop_value_of_land) \
* ARGS.priest_share
if a > 0:
p.prop -= a
economy.nation.districts[p.district].priests_share += a
for p in persons:
if p.in_jail():
p.release_from_jail()
for p in persons:
if p.dominator_position is None:
continue
p.get_dominator().resign()
for p in persons:
if p.id in economy.dominator_parameters:
economy.dominator_parameters[p.id].economy = None
del economy.dominator_parameters[p.id]
for p in persons:
p.death.inheritance_share = calc_inheritance_share(economy, p.id)
for p in persons:
spouse = None
if p.marriage is not None \
and (p.marriage.spouse == ''
or economy.is_living(p.marriage.spouse)):
spouse = p.marriage.spouse
if p.marriage is not None:
p.die_relation(p.marriage)
for a in p.adulteries:
p.die_relation(a)
# father mother は死んでも情報の更新はないが、child は欲し
# い子供の数に影響するため、更新が必要。
if p.father != '' and economy.is_living(p.father):
economy.people[p.father].die_child(p.id)
if p.mother != '' and economy.is_living(p.mother):
economy.people[p.mother].die_child(p.id)
fst_heir = None
if p.death.inheritance_share is not None:
l1 = [(x, y) for x, y
in p.death.inheritance_share.items()
if x != '' and economy.is_living(x)
and x != spouse
and (economy.people[x].supported is None or
economy.people[x].supported == p.id)
and economy.people[x].age >= 18]
if l1:
u = max(l1, key=lambda x: x[1])[1]
l2 = [x for x, y in l1 if y == u]
fst_heir = max(l2, key=lambda x:
economy.people[x].asset_value())
if (fst_heir is None
or fst_heir not in [ch.id for ch in p.children]) \
and spouse is not None and spouse in p.supporting:
if spouse == '':
fst_heir = ''
p.remove_supporting_nil()
else:
s = economy.people[spouse]
if s.age >= 18 and s.age < 70:
fst_heir = spouse
s.remove_supported()
if fst_heir is not None and fst_heir != '' \
and fst_heir in p.supporting:
fh = economy.people[fst_heir]
fh.remove_supported()
if p.supporting:
if p.supported is not None \
and economy.is_living(p.supported):
p.die_supporting(p.supported)
elif fst_heir is None or p.death.inheritance_share is None:
p.die_supporting(None)
else:
p.die_supporting(fst_heir)
if p.supported is not None:
p.remove_supported()
if fst_heir is not None and fst_heir != '':
fh = economy.people[fst_heir]
fh.add_supporting(p)
for p in persons:
p.do_inheritance()
def update_death (economy):
print("\nDeath:...", flush=True)
l = []
for p in economy.people.values():
if not p.is_dead():
if random.random() < ARGS.general_death_rate:
l.append(p)
else:
threshold = 0
if p.age > 110:
threshold = 1
elif p.age > 80 and p.age <= 100:
threshold = ARGS.a80_death_rate
elif p.age > 60 and p.age <= 80:
threshold = ARGS.a60_death_rate
elif p.age >= 0 and p.age <= 3:
threshold = ARGS.infant_death_rate
ij = np_clip(p.injured + p.tmp_injured, 0, 1)
threshold2 = ARGS.injured_death_rate * ij
if random.random() < max([threshold, threshold2]):
l.append(p)
economy.die(l)
| StarcoderdataPython |
4826286 | <gh_stars>10-100
import numpy as np
from scipy.signal import butter
from sklearn.pipeline import FeatureUnion, Pipeline
from sklearn.preprocessing import FunctionTransformer
from classification.features.constants import (
FREQ_BANDS_ORDERS,
FREQ_BANDS_RANGE,
NYQUIST_FREQ,
)
from classification.features.pipeline.utils import (
get_data_from_epochs,
get_transformer,
)
def _get_signal_mean_energy(signal):
"""
signal: array of (nb_sample_per_epoch,)
"""
return np.sum(signal**2) * 1e6
def _get_pipeline_per_subband(subband_name: str):
"""
Constructs a pipeline to extract the specified subband related features.
Output:
sklearn.pipeline.Pipeline object containing all steps to calculate time-domain feature on the specified subband.
"""
freq_range = FREQ_BANDS_RANGE[subband_name]
order = FREQ_BANDS_ORDERS[subband_name]
assert len(
freq_range) == 2, "Frequency range must only have 2 elements: [lower bound frequency, upper bound frequency]"
bounds = [freq / NYQUIST_FREQ for freq in freq_range]
b, a = butter(order, bounds, btype='bandpass')
def filter_epochs_in_specified_subband(epochs):
return epochs.copy().filter(
l_freq=bounds[0],
h_freq=bounds[1],
method='iir',
n_jobs=1,
iir_params={
'a': a,
'b': b
}, verbose=False)
return Pipeline([
('filter', FunctionTransformer(filter_epochs_in_specified_subband, validate=False)),
('get-values', FunctionTransformer(get_data_from_epochs, validate=False)),
('mean-energy', FunctionTransformer(
get_transformer(_get_signal_mean_energy), validate=True
)),
])
def get_subband_feature_union():
return FeatureUnion([(
f"{band_name}-filter",
_get_pipeline_per_subband(band_name)
) for band_name in FREQ_BANDS_ORDERS.keys()], n_jobs=1)
| StarcoderdataPython |
3261933 | from __future__ import annotations
from typing import List
from reamber.base.lists.notes.HoldList import HoldList
from reamber.bms.BMSHold import BMSHold
from reamber.bms.lists.notes.BMSNoteList import BMSNoteList
class BMSHoldList(List[BMSHold], HoldList, BMSNoteList):
def _upcast(self, objList: List = None) -> BMSHoldList:
""" This is to facilitate inherited functions to work
:param objList: The List to cast
:rtype: BMSHoldList
"""
return BMSHoldList(objList)
def multOffset(self, by: float, inplace:bool = False):
HoldList.multOffset(self, by=by, inplace=inplace)
def data(self) -> List[BMSHold]:
return self
| StarcoderdataPython |
3390068 | <gh_stars>0
from sensormodule import isSensorLeft, isSensorRight
from directions import forward, left_forward,right_forward
while True:
if isSensorLeft() == True:
right_forward()
elif isSensorRight() == True:
left_forward()
else:
forward()
| StarcoderdataPython |
84012 | <reponame>trackuity/jinx
import os
import json
import bsddb3
import struct
class Indexer:
def __init__(self, name, key_field, prefix_fields=None):
self._key_field = key_field
self._prefix_fields = prefix_fields
self._file = open(name, 'r')
self._db = bsddb3.hashopen(name + '.jinx', 'c')
self._offset = 0
def index(self):
for line in self._file:
data = json.loads(line)
key = data[self._key_field]
if self._prefix_fields is not None:
prefix_key = ",".join(
data[prefix_field] for prefix_field in self._prefix_fields
)
key = '{0}:{1}'.format(prefix_key, key)
self._db[bytes(key, 'utf-8')] = struct.pack('L', self._offset)
self._offset += len(line)
def close(self):
self._file.close()
self._db.close()
class Database:
def __init__(self, name, data_dir='.'):
path = os.path.join(data_dir, name)
if os.path.isdir(path): # when directory, pick last member alphanumerically
filename = sorted(f for f in os.listdir(path) if f.endswith('.jinx'))[-1][:-5]
path = os.path.join(path, filename)
else:
for ext in ('', '.json', '.jsonl'):
path = os.path.join(data_dir, name + ext)
if os.path.exists(path + '.jinx'):
break
else:
raise ValueError('database does not exist')
self._file = open(path, 'r')
self._index = bsddb3.hashopen(path + '.jinx', 'r')
def multi_get(self, keys):
for key in keys:
packed = self._index.get(bytes(key, 'utf-8'))
if packed is not None:
offset = struct.unpack('L', packed)[0]
self._file.seek(offset)
yield self._file.readline()[:-1] # don't include newline
def close(self):
self._file.close()
self._index.close()
| StarcoderdataPython |
1695773 | <reponame>steinnymir/sytools<filename>sytools/pes/dld.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
@author: <NAME>
Copyright (C) 2018 <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import json
import os
import cv2 as cv
import h5py
import skimage.filters as skfilt
import tifffile
import xarray as xr
from dask.diagnostics import ProgressBar
from scipy.ndimage import rotate
from tqdm import tqdm
from .utils import *
def main():
pass
class DldProcessor(object):
PARAMETERS = ['k_center', 'k_final',
'unit_cell', 'aoi_px', 'aoi_k',
'warp_grid_reg', 'warp_grid_dist', 'warp_grid_spacing',
'tof_to_ev', 'tof_0', 'h5_file', 'raw_file','mask_file',
'sigma_tof_blur', 'sigma_highpass', 'sigma_lowpass',
'rotation_angle', 'rotation_center',
]
def __init__(self, h5_file=None, raw_data=None, processed=None, mask=None, chunks=None):
# data containers
self.processed = None
self.raw = None
self.mask = None
# disk file properties
self.h5_file = None
self.raw_file = None
self.mask_file = None
self.chunks = chunks
self.k_center = None, None # as row,col => y, x
self.k_final = None
self.unit_cell = None, None, None
self.aoi_px = None, None
self.aoi_k = None, None
self.warp_grid_reg = None
self.warp_grid_dist = None
self.warp_grid_spacing = None
self.tof_to_ev = None
self.tof_0 = None
# Processing parameters
self.sigma_tof_blur = None
self.sigma_highpass = None
self.sigma_lowpass = None
self.rotation_angle = None
self.rotation_center = None
self._dos = None
self.history = ''
if h5_file is not None:
self.load_h5(h5_file)
self.h5_file = h5_file
self.update_history('load_h5', f'faddr:{h5_file}')
if raw_data is not None:
if isinstance(raw_data, np.ndarray):
data = raw_data.astype(np.uint16)
elif isinstance(raw_data, str):
if os.path.isfile(raw_data):
self.raw_file = raw_data
data = tifffile.imread(raw_data).astype(np.uint16)
else:
raise ValueError(f'invalid entry "{raw_data}" for Raw Data')
coords = {'ToF': np.arange(data.shape[0]),
'X': np.arange(data.shape[1]),
'Y': np.arange(data.shape[2]), }
self.raw = xr.DataArray(data=data, coords=coords, dims=['ToF', 'X', 'Y'])
if self.processed is None:
self.processed = xr.DataArray(data=self.raw.values.astype(np.float64, copy=True), coords=coords,
dims=['ToF', 'X', 'Y'])
if mask is not None:
self.load_mask(mask)
if processed is not None:
self.processed = processed
@property
def metadata_dict(self):
d = {}
for par in self.PARAMETERS:
d[par] = getattr(self, par)
return d
# d = {'bz_width': self.bz_width,
# 'k_center': self.k_center,
# 'k_final': self.k_final,
# 'unit_cell': self.unit_cell,
# 'aoi_px': self.aoi_px,
# 'aoi_k': self.aoi_k,
# 'warp_grid_reg': self.warp_grid_reg,
# 'warp_grid_dist': self.warp_grid_dist,
# 'warp_grid_spacing': self.warp_grid_spacing,
# }
@property
def dos(self):
if self._dos is not None:
return self._dos
else:
self._dos = self.raw.sum(dim=('X', 'Y'))
return self._dos
@property
def dos_masked(self):
if self._mdos is not None:
return self._mdos
else:
self._mdos = self.raw.where(self.mask).sum(dim=('X', 'Y'))
return self._mdos
@property
def reciprocal_unit_cell(self):
return tuple([2 * np.pi / x for x in self.unit_cell])
@property
def masked_data(self):
return self.processed * self.mask
def update_history(self, method, attributes):
self.history += f'{method}:{attributes}\n'
def reset_data(self):
self.processed.values = self.raw.astype(np.float64, copy=True)
def reset_history(self):
self.history = ''
def chunk(self, chunks=None):
if chunks is not None:
self.chunks = chunks
if self.chunks is None:
self.chunks = {'ToF': 1, 'X': 128, 'Y': 128}
self.processed = self.processed.chunk(chunks=self.chunks)
self.raw = self.raw.chunk(chunks=self.chunks)
self.mask = self.mask.chunk(chunks=self.chunks)
self.update_history('chunk', f'chunks:{chunks}')
def renormalize_DOS(self, use_mask=True):
""" normalize the """
print('Renormalizing DOS and applying mask...')
if use_mask and self.mask is not None:
data = self.processed * self.mask
raw_data = self.raw * self.mask
else:
data = self.processed
raw_data = self.raw
norm = data.sum((1, 2)) / raw_data.sum((1, 2)).astype(np.float64)
self.processed.values = data / norm[:, None, None]
self.update_history('renormalize_DOS', f'use_mask:{use_mask}')
def make_finite(self, substitute=0.0):
""" set all nans and infs to 0. or whatever specified value in substitute"""
print('Handling nans and infs...')
self.processed.values[~np.isfinite(self.processed.values)] = substitute
self.update_history('make_finite','substitute:{substitute}')
def filter_diffraction(self, sigma=None):
""" divide by self, gaussian blurred along energy direction to remove diffraction pattern"""
print('Removing diffraction pattern...')
if sigma is not None:
self.sigma_tof_blur = sigma
self.processed = self.processed / skfilt.gaussian(self.processed, sigma=(self.sigma_tof_blur, 0, 0))
self.update_history('filter_diffraction', f'sigma:{self.sigma_tof_blur}')
def high_pass_isoenergy(self, sigma=None, truncate=2.0):
""" gaussian band pass to remove low frequency fluctuations """
print('Applying high pass filter to each energy slice...')
if sigma is not None:
self.sigma_highpass = sigma
lp = skfilt.gaussian(self.processed, sigma=(0, self.sigma_highpass, self.sigma_highpass), preserve_range=True,
truncate=truncate)
self.processed = self.processed - lp
self.update_history('high_pass_isoenergy', f'sigma:{self.sigma_highpass}, truncate:{truncate}')
def low_pass_isoenergy(self, sigma=(2, 2, 2), truncate=4.0):
""" gaussian band pass to remove low frequency fluctuations """
print('Applying low pass filter to each energy slice...')
if sigma is not None:
self.sigma_lowpass = sigma
self.processed.values = skfilt.gaussian(self.processed, sigma=self.sigma_lowpass, preserve_range=True,
truncate=truncate)
self.update_history('low_pass_isoenergy', f'sigma:{self.sigma_lowpass}, truncate:{truncate}')
def rotate(self, angle, axes=(1, 2), center=None, **kwargs):
""" Rotate the plane defined by axes, around its center."""
if angle is not None:
self.rotation_angle = angle
if center is not None:
self.rotation_center = center # TODO: implement off center rotation
self.processed.values = rotate(self.processed, angle, reshape=False, axes=axes, **kwargs)
self.mask.values = rotate(self.mask, angle, reshape=False, axes=axes, **kwargs)
hist_str = f'angle:{self.rotation_angle}, center:{self.rotation_center}'
for k, v in kwargs.items():
hist_str += f', {k}:{v}'
self.update_history('rotate', hist_str)
def describe_str(self, data=None, print=False):
if data is None:
data = self.processed
s = 'min {:9.3f} | max {:9.3f} | mean {:9.3f} | sum {:9.3f}'.format(np.amin(data),
np.amax(data),
np.mean(data),
np.sum(data))
if print:
print(s)
else:
return s
def load_mask(self, mask=None):
print('loading mask...')
if isinstance(mask, xr.DataArray):
self.mask = mask
else:
coords, dims = None, None
if isinstance(mask, str):
self.mask_file = mask
if '.np' in mask:
mask = np.load(mask)
elif '.h5' in mask:
with h5py.File(mask, 'r') as f:
mask = f['mask/data'][...]
try:
coords = {}
for key in f['mask/axes']:
coords[key] = f[f'mask/axes/{key}']
dims = [x for x in ['ToF', 'X', 'Y'] if x in coords]
except KeyError:
pass
if coords is None and dims is None:
coords = {'ToF': np.arange(0, mask.shape[0]),
'X': np.arange(0, mask.shape[1]),
'Y': np.arange(0, mask.shape[2])}
dims = ['ToF', 'X', 'Y']
self.mask = xr.DataArray(data=mask.astype(np.bool_), coords=coords, dims=dims)
def warp_grid(self, grid_dict, mask=True, ret=False, replace=True):
""" use the given points to create a grid on which to perform perpective warping"""
if isinstance(grid_dict, dict):
pass
elif isinstance(grid_dict, str):
with open(grid_dict, 'r') as f:
grid_dict = json.load(f)
elif None in [self.k_center, self.warp_grid_spacing, self.warp_grid_dist, self.warp_grid_reg]:
pass
else:
raise KeyError('grid_dict is neither a dictionary nor a file')
self.warp_grid_reg = grid_dict['regular']
self.warp_grid_dist = grid_dict['distorted']
self.k_center = grid_dict['k_center']
self.warp_grid_spacing = grid_dict['spacing']
hist_str = f'k_center:{self.k_center}, ' + \
f'warp_grid_spacing:{self.warp_grid_spacing}, ' + \
f'warp_grid_reg:{self.rotation_angle}, ' + \
f'warp_grid_dist:{self.rotation_center}'
self.update_history('warp_grid', hist_str)
print('Warping data...')
# Divide the xy plane in squares and triangles defined by the simmetry points given
# At the moment, only the squares are being used.
squares = []
triangles = []
def get_square_corners(pt, dd):
tl = pt
tr = pt[0] + dd, pt[1]
bl = pt[0], pt[1] + dd
br = pt[0] + dd, pt[1] + dd
return [tl, tr, br, bl]
print(' - making squares...')
for i, pt in enumerate(self.warp_grid_reg):
corner_pts = get_square_corners(pt, self.warp_grid_spacing)
corners = []
# ensure at least one vertex is inside the figure
if not any([all([x[0] < 0 for x in corner_pts]),
all([x[0] > self.processed.shape[1] for x in corner_pts]),
all([y[1] < 0 for y in corner_pts]),
all([y[1] > self.processed.shape[2] for y in corner_pts])]):
for c in corner_pts:
for j in range(len(self.warp_grid_reg)):
dist = point_distance(self.warp_grid_reg[j], c)
if dist < 0.1:
corners.append(j)
break
if len(corners) == 4:
squares.append(corners)
elif len(corners) == 3:
triangles.append(corners)
# Add padding to account for areas out of selected points
pads = []
pads.append(int(np.round(max(0, -min([x[0] for x in self.warp_grid_reg])))))
pads.append(int(np.round(max(0, max([x[0] for x in self.warp_grid_reg]) - self.processed.shape[1]))))
pads.append(int(np.round(max(0, -min([x[1] for x in self.warp_grid_reg])))))
pads.append(int(np.round(max(0, max([x[1] for x in self.warp_grid_reg]) - self.processed.shape[2]))))
for i in range(4):
if pads[i] == 0:
pads[i] = self.warp_grid_spacing
xpad_l, xpad_r, ypad_l, ypad_r = pads
warped_data_padded = np.zeros(
(self.processed.shape[0], self.processed.shape[1] + xpad_l + xpad_r,
self.processed.shape[2] + ypad_l + ypad_r))
if mask:
warped_mask_padded = np.zeros(
(self.processed.shape[0], self.processed.shape[1] + xpad_l + xpad_r,
self.processed.shape[2] + ypad_l + ypad_r))
print(' - calculate warp...')
for e in tqdm(range(self.processed.shape[0])):
# if mask and True not in self.mask[e, ...]:
if mask and True not in self.mask[e, ...].values:
pass
else:
img_pad = np.zeros(
(self.processed.shape[1] + xpad_l + xpad_r, self.processed.shape[2] + ypad_l + ypad_r))
img_pad[xpad_l:-xpad_r, ypad_l:-ypad_r] = self.processed[e, ...]
if mask:
mask_pad = np.zeros(
(self.processed.shape[1] + xpad_l + xpad_r, self.processed.shape[2] + ypad_l + ypad_r))
mask_pad[xpad_l:-xpad_r, ypad_l:-ypad_r] = self.mask[e, ...].astype(np.float)
for corners in squares:
xf, yf = self.warp_grid_reg[corners[0]]
xt, yt = self.warp_grid_reg[corners[2]]
xf += xpad_l
xt += xpad_l
yf += ypad_l
yt += ypad_l
pts1 = np.float32([(x + xpad_l, y + ypad_l) for x, y in
[self.warp_grid_dist[x] for x in corners]]) # [pts[39],pts[41],pts[22],pts[24]]
pts2 = np.float32([(x + xpad_l, y + ypad_l) for x, y in [self.warp_grid_reg[x] for x in corners]])
M = cv.getPerspectiveTransform(pts1, pts2)
dst = cv.warpPerspective(img_pad, M, img_pad.shape[::-1])
if mask:
dst_mask = cv.warpPerspective(mask_pad, M, mask_pad.shape[::-1])
# print( warped_data_padded[e,yf:yt,xf:xt].shape,dst[yf:yt,xf:xt].shape)
try:
warped_data_padded[e, yf:yt, xf:xt] = dst[yf:yt, xf:xt]
if mask:
warped_mask_padded[e, yf:yt, xf:xt] = dst_mask[yf:yt, xf:xt]
except Exception as ex:
print(ex)
warped_data = warped_data_padded[:, xpad_l:-xpad_r, ypad_l:-ypad_r]
if mask:
warped_mask = warped_mask_padded[:, xpad_l:-xpad_r, ypad_l:-ypad_r].astype(np.bool_)
if replace:
self.processed.values = warped_data
if mask:
self.mask.values = warped_mask
if ret:
if mask:
return warped_data, warped_mask
else:
return warped_data
def create_dataframe(self, data='processed', masked=True, chunks={'ToF': 1, 'X': 128, 'Y': 128}):
da = getattr(self, data)
da.name = data
if da.chunks is None:
self.chunk(chunks)
if masked:
da = da.where(self.mask, other=0.0)
self.df = da.to_dataset().to_dask_dataframe().dropna(subset=[data])
self.df = self.df[self.df['processed'] != 0]
def to_parquet(self, file, cols=None):
if cols is None:
df = self.df
else:
df = self.df[cols]
with ProgressBar():
df.to_parquet(file)
def compute_energy_momentum(self, k_center=None, aoi_px=None, aoi_k=None, tof_to_ev=None, tof_0=None):
# TODO: generalize for arbitrary unit cell and energy momentum conversion parameters
if k_center is not None:
self.k_center = k_center
if aoi_px is not None:
self.aoi_px = aoi_px
if aoi_k is not None:
self.aoi_k = aoi_k
if tof_to_ev is not None:
self.tof_to_ev = tof_to_ev
if tof_0 is not None:
self.tof_0 = tof_0
kx = to_reduced_scheme(
to_k_parallel(self.processed.X, self.k_center[1], aoi_px=self.aoi_px[1], aoi_k=self.aoi_k[1]),
aoi_k=self.aoi_k[1])
ky = to_reduced_scheme(
to_k_parallel(self.processed.Y, self.k_center[0], aoi_px=self.aoi_px[0], aoi_k=self.aoi_k[0]),
aoi_k=self.aoi_k[0])
kz = to_reduced_scheme(to_k_perpendicular((self.processed.Y, self.processed.X), self.k_center,
kf=self.k_final, aoi_px=np.mean(self.aoi_px),
aoi_k=np.mean(self.aoi_k) - self.reciprocal_unit_cell[2] / 2),
self.reciprocal_unit_cell[2])
e = slice_to_ev(self.processed.ToF, ToF_to_ev=self.tof_to_ev, t0=self.tof_0)
coords = {'kx': kx, 'ky': ky, 'kz': kz, 'e': e}
self.processed = self.processed.assign_coords(coords)
self.mask = self.mask.assign_coords(coords)
hist_str = f'k_center:{self.k_center}, ' + \
f'aoi_px:{self.aoi_px}, ' + \
f'aoi_k:{self.aoi_k}, ' + \
f'tof_to_ev:{self.tof_to_ev}, ' + \
f'tof_0:{self.tof_0}'
self.update_history('compute_energy_momentum', hist_str)
def save(self, file, save_raw=True, format='h5', mode='a',
overwrite=False, chunks='auto', compression='gzip'):
""" Store data to disk.
Allowed formats are h5, numpy and tiff
"""
if f'.{format}' not in file:
file += f'.{format}'
dir = os.path.dirname(file)
if not os.path.isdir(dir):
os.makedirs(dir)
elif os.path.isfile(file):
if not overwrite: # TODO: check for "mode"
raise FileExistsError(f'File {file} already exists, set new name or allow overwriting')
else:
os.remove(file)
print(f'Saving processed data as "{file}"...')
if format == 'h5':
with h5py.File(file, mode=mode) as f:
errors = []
if self.processed.chunks is not None:
# TODO: auto define chunks size from xarray chunks
pass
if chunks == 'auto':
chunks = 1, self.processed.shape[1] // 16, self.processed.shape[2] // 16
elif chunks and self.chunks is not None:
chunks = [self.chunks[k] for k in self.processed.dims]
f.create_dataset('processed/data', data=self.processed, chunks=chunks, compression=compression)
f.create_dataset('processed/axes/ToF', data=self.processed.ToF, compression=compression)
f.create_dataset('processed/axes/X', data=self.processed.X, compression=compression)
f.create_dataset('processed/axes/Y', data=self.processed.Y, compression=compression)
f.create_dataset('mask/data', data=self.mask, chunks=chunks, compression=compression)
f.create_dataset('mask/axes/ToF', data=self.mask.ToF, compression=compression)
f.create_dataset('mask/axes/X', data=self.mask.X, compression=compression)
f.create_dataset('mask/axes/Y', data=self.mask.Y, compression=compression)
if save_raw:
f.create_dataset('raw/data', data=self.raw, chunks=chunks, dtype=np.uint16,
compression=compression) # raw detector data
f.create_dataset('raw/axes/ToF', data=self.raw.ToF, compression=compression)
f.create_dataset('raw/axes/X', data=self.raw.X, compression=compression)
f.create_dataset('raw/axes/Y', data=self.raw.Y, compression=compression)
for par in self.PARAMETERS:
v = getattr(self, par)
if v is not None:
try:
f.create_dataset(f'metadata/{par}', data=v)
except Exception as e:
errors.append((par, v, e))
if len(errors) > 0:
for par, v, e in errors:
print(f'Failed writing {par} = {v}. Error: {e}')
f.create_dataset("history", data=self.history) # metadata as string
elif format == 'npy':
np.save(file, self.processed)
elif format == 'tiff':
tifffile.imsave(file, self.processed, description=self.hist_str)
def load_h5(self, file, read_groups=None):
""""""
print(f'loading data from "{file}".')
with h5py.File(file, mode='r') as f:
groups = f.keys()
if read_groups is None:
read_groups = groups
print(f'Found {len(groups)} groups: {groups}\nLoading:')
for group in f.keys():
if group not in read_groups:
print(f' - {group} ignored')
else:
print(f' - {group}...')
if group == 'history':
self.hist_str = f['history'][()]
if group == 'metadata':
for key, value in f[f'{group}'].items():
v = value[...]
if getattr(self,key) is None: #TODO: improve metatadata reading
try:
v = float(v)
if v%1 == 0:
v = int(v)
except ValueError:
v = str(v)
except TypeError:
pass
else:
v = tuple(v)
setattr(self, key, v)
elif group in ['raw','processed','mask']:
data = f[f'{group}/data'][...]
coords = {}
try:
for key in f[f'{group}/axes']:
coords[key] = f[f'{group}/axes/{key}']
dims = [x for x in ['ToF', 'X', 'Y'] if x in coords]
except KeyError:
coords = {'ToF': np.arange(0, data.shape[0]),
'X': np.arange(0, data.shape[1]),
'Y': np.arange(0, data.shape[2])}
dims = ['ToF', 'X', 'Y']
setattr(self, group, xr.DataArray(data=data, coords=coords, dims=dims, name=group))
# self.update_history('load_h5', f'file:{file}, read_groups:{read_groups}')
def main():
pass
if __name__ == '__main__':
main()
| StarcoderdataPython |
51639 | <gh_stars>10-100
from hooks.pre_gen_project import check_valid_email_address_format
import pytest
# Define test cases for the `TestCheckValidEmailAddressFormat` test class
args_invalid_email_addresses = ["hello.world", "foo_bar"]
args_valid_email_addresses = ["<EMAIL>", "foo@bar"]
class TestCheckValidEmailAddressFormat:
@pytest.mark.parametrize("test_input_email", args_invalid_email_addresses)
def test_raises_assertion_error_for_invalid_emails(
self, test_input_email: str
) -> None:
"""Test an `AssertionError` is raised for invalid email addresses."""
# Execute the `check_valid_email_address_format` function, and check it raises
# an `AssertionError`
with pytest.raises(AssertionError):
check_valid_email_address_format(test_input_email)
@pytest.mark.parametrize("test_input_email", args_valid_email_addresses)
def test_passes_for_valid_emails(self, test_input_email: str) -> None:
"""Test no errors are raised for valid email addresses."""
# Execute the `check_valid_email_address_format` function, which should not
# raise any exceptions for a valid email address
try:
check_valid_email_address_format(test_input_email)
except Exception as e:
pytest.fail(f"Error raised: {e}")
| StarcoderdataPython |
106999 | from functools import lru_cache
from dataclasses import dataclass
from typing import List
from translator.translator import _
@dataclass
class Product:
name: str
friendly_name: str
description: str
extended_description: str
picture: str
technical: List[str]
hilights: List[str]
products = (
Product(name="EyePoint_P10", friendly_name=_("EyePoint P10"), picture="P10.png",
description=_("Автоматическая настольная система для поиска неисправных электронных компонентов на "
"печатных платах по методу АСА."),
extended_description=_("Система сочетает в себе автоматическую оптическую систему распознавания выводов "
"компонентов и летающий щуп для проведения электрического тестирования по методу "
"аналогового сигнатурного анализа. "
"Для использования системы EyePoint Вам необходима последняя версия драйвера. "
"Предыдущие версии выложены для пользователей, которые не хотят переходить на новые версии. "),
hilights=[_("Прост в использовании"),
_("Снизит нагрузку на инженера"),
_("Не повредит плату")],
technical=[_("Метод тестирования: АСА"),
_("Диапазон частот тестирующего сигнала: 1 Гц – 100 кГц"),
_("Рабочие напряжения: 1,2, 3,3, 5, 12 В"),
_("Максимальный размер платы: 280x275 мм"),
_("Поддержка корпусов: LQFP, SOIC, SMD, SOT, DIP и т.д"),
_("Построение карты тестирования до 10 см²/мин"),
_("Скорость тестирования: 100 точек в мин"),
_("Точность установки щупа: 30 мкм"),
_("Время на смену платы: 30 сек"),
_("Тестирующее напряжение до +/- 12 В"),
_("Чувствительность по R 2 Ом - 450 кОм"),
_("Чувствительность по C 300 пФ - 100 мкФ"),
_("Чувствительность по L от 270 мкГн"),
_("Габариты и вес: 604х543х473 мм, 50 кг"),
_("Управляющий ПК: Intel i5 2.8 ГГц, 16 Гб RAM, 256 Гб SSD"),
_("Электропитание: ~220В, 300 Вт")
]
),
Product(name="EyePoint_P10b", friendly_name=_("EyePoint P10b"), picture="P10b.png",
description=_("EyePoint P10b - автоматическая настольная система для поиска неисправных электронных "
"компонентов на печатных платах с опцией выявления контрафактных, перемаркерованных или "
"поврежденных компонентов в BGA корпусах."),
extended_description=_("Система сочетает в себе автоматическую оптическую систему распознавания выводов "
"компонентов и летающий щуп для проведения электрического тестирования по методу "
"аналогового сигнатурного анализа. "
"Для использования системы EyePoint Вам необходима последняя версия драйвера. "
"Предыдущие версии выложены для пользователей, которые не хотят переходить на новые версии. "),
hilights=[_("PCB и BGA"),
_("Экономия времени на поиске до 3 раз"),
_("Автоматическая проверка до 2500 выводов")],
technical=[_("Метод тестирования: АСА"),
_("Диапазон частот тестирующего сигнала: 1 Гц – 100 кГц"),
_("Рабочие напряжения: 1,2, 3,3, 5, 12 В"),
_("Максимальный размер платы: 280x275 мм"),
_("Поддержка корпусов: LQFP, SOIC, SMD, SOT, DIP и т.д."),
_("Поддержка тестирования корпусов типа BGA"),
_("Построение карты тестирования до 10 см²/мин"),
_("Шаг и количество выводов BGA микросхем: 1,5 - 0,4 мм, 8 - 2500 шт."),
_("Расположение выводов: произвольное"),
_("Скорость тестирования: 100 точек в мин"),
_("Точность установки щупа: 30 мкм"),
_("Время на смену платы: 30 сек"),
_("Тестирующее напряжение до +/- 12 В"),
_("Чувствительность по R 2 Ом - 450 кОм"),
_("Чувствительность по C 300 пФ - 100 мкФ"),
_("Чувствительность по L от 270 мкГн"),
_("Предоставляются подложки для крепления BGA микросхем"),
_("Габариты и вес: 604х543х473 мм, 50 кг"),
_("Управляющий ПК: Intel i5 2.8 ГГц, 16 Гб RAM, 256 Гб SSD"),
_("Электропитание: ~220В, 300 Вт")
]
),
Product(name="EyePoint_S2", friendly_name=_("EyePoint S2"), picture="S2.png",
description=_("Ручная версия локализатора неисправных электронных компонентов с сенсорным экраном."),
extended_description=_("EyePoint S2 – второе поколение настольной системы для локализации неисправных "
"электронных компонентов на печатных платах методом аналогового сигнатурного "
"анализа. Система обладает большим удобным сенсорным экраном, а дополнительная "
"педаль позволит упростить управление системой. "
"Для использования системы EyePoint Вам необходима последняя версия драйвера. "
"Предыдущие версии выложены для пользователей, которые не хотят переходить на новые версии. "),
hilights=[_("Простой и легкий прибор"),
_("Частота зондирования до 100 кГц"),
_("Доступен план тестирования ")],
technical=[_("Диапазон частот тестирующего сигнала: 1 Гц – 100 кГц"),
_("Рабочие напряжения: 1,2, 3,3, 5, 12 В"),
_("В зависимости от диапазона измеряемых значений максимальное напряжение до 12 В, "
"максимальный ток до 250 мкА"),
_("Габаритные размеры: 205 х 204 х 120 мм"),
_("7-ми дюймовый цветной дисплей с функцией touchscreen"),
_("Регулируемый порог совпадения сигнатур"),
_("Внешняя педаль для дополнительного функционала"),
_("Экспорт данных в формате данных PNG на внешний Flash накопитель"),
_("Поддержка режима «План тестирования»"),
_("Возможность подключения к ПК по USB (Win, Linux)"),
_("Возможность программного управления (C/C++, C#, Python)")
]
),
Product(name="EyePoint_u2", friendly_name=_("EyePoint u2"), picture="u2.png",
description=_("EyePoint u2 – миниатюрная система для поиска неисправных электронных компонентов на "
"печатных платах методом аналогового сигнатурного анализа ASA. Для работы необходимо "
"подключить EyePoint u2 к компьютеру по USB и начать тестирование в ручном режиме."),
extended_description=_("EyePoint u2 – миниатюрная система для поиска неисправных электронных компонентов "
"на печатных платах методом аналогового сигнатурного анализа АСА. Для работы "
"необходимо подключить EyePoint u2 к компьютеру по USB и начать тестирование в "
"ручном режиме. Версии: u21 – одноканальная система, u22 – двухканальная система. "
"Для использования системы EyePoint Вам необходима последняя версия драйвера. "
"Предыдущие версии выложены для пользователей, которые не хотят переходить на новые версии. "),
hilights=[_("Частота зондирования до 100 кГц"),
_("Миниатюрные размеры"),
_("Доступен функционал старшей модели S2")],
technical=[_("Метод тестирования: АСА"),
_("Диапазон частот тестирующего сигнала: 1 Гц – 100 кГц"),
_("Рабочие напряжения: 1.2, 3.3, 5, 12 В"),
_("Чувствительность по току: 250 мкА, 2.5 мА, 25 мА"),
_("Напряжение питания: 5 В"),
_("Габаритные размеры: 175 х 90 х 40 мм"),
_("Регулируемый порог совпадения сигнатур"),
_("Поддержка режима «План тестирования»"),
_("Интерфейс связи USB 2.0"),
_("Поддержка ОС: Windows 7/8/10 (х64/х86), Linux"),
_("Возможность программного управления: C#/C++, Qt, Python"),
_("В зависимости от диапазона измеряемых значений максимальное напряжение до 12 В, "
"максимальный ток до 250 мкА")
]
),
Product(name="EyePoint_a2", friendly_name=_("EyePoint a2"), picture="a2.png",
description=_("Одноканальный OEM модуль аналогового сигнатурного анализа без корпуса и пользовательского "
"ПО."),
extended_description=_("EyePoint a2 - ОЕМ модуль, предназначенный для поиска неисправных электронных "
"компонентов на печатных платах методом аналогового сигнатурного анализа (АСА) с "
"подключением к ПК по USB и возможностью составления плана тестирования. Для "
"подключения щупов используется коаксиальный кабель с обязательным подключением "
"экрана кабеля к заземляющим контактам со стороны платы. С модулем предоставляется "
"доступ к API для разработки собственного программного обеспечения с "
"использованием результатом измерения а2. "
"Для использования системы EyePoint Вам необходима последняя версия драйвера. "
"Предыдущие версии выложены для пользователей, которые не хотят переходить на новые версии. "),
hilights=[_("Открытый API"),
_("Полный функционал"),
_("Максимально доступный")],
technical=[_("Метод тестирования: АСА"),
_("Диапазон частот тестирующего сигнала: 1 Гц – 100 кГц"),
_("Рабочие напряжения: 1.2, 3.3, 5, 12 В"),
_("Чувствительность по току: 250 мкА, 2.5 мА, 25 мА"),
_("Интерфейс подключения к ПК: USB 2.0"),
_("Возможность программного управления: С#/С++; Python"),
_("Габаритные размеры: 60х40х5 мм"),
_("Напряжение питания: 5 В"),
_("Питание от USB разъема")
]
)
)
assert(all([" " not in p.name for p in products]))
@lru_cache(maxsize=128)
def product_by_name(name: str) -> Product:
for product in products:
if product.name == name:
return product
raise ValueError("No such product ", name)
| StarcoderdataPython |
196037 | import os
import subprocess
import tempfile
import uuid
import graphviz
from IPython.display import display, Image
from IPython.core.magic import Magics, cell_magic, magics_class
from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring
from common import helper
compiler = 'iverilog'
yosys_run = '/content/cad4u/verilog/yosys'
script_run = '/content/cad4u/verilog/script.ys'
ext = '.v'
@magics_class
class VERILOGPlugin(Magics):
def __init__(self, shell):
super(VERILOGPlugin, self).__init__(shell)
self.argparser = helper.get_argparser()
self.already_install = False
def updateInstall(self):
print("Installing iverilog. Please wait... ", end="")
args = ["sh", "/content/cad4u/verilog/update_install.sh"]
output = subprocess.check_output(args, stderr=subprocess.STDOUT)
output = output.decode('utf8')
#helper.print_out(output)
print("done!")
@staticmethod
def compile(file_path, flags):
args = [compiler, file_path + ext, "-o", file_path + ".out"]
# adding flags: -O3, -unroll-loops, ...
for flag in flags:
if flag == "<":
break
args.append(flag)
subprocess.check_output(args, stderr=subprocess.STDOUT)
def run_verilog(self, file_path):
args = [file_path + ".out"]
output = subprocess.check_output(args, stderr=subprocess.STDOUT)
output = output.decode('utf8')
helper.print_out(output)
def run_yosys(self, file_path):
args = [yosys_run, "-Q", "-T", "-q", "-s", script_run]
output = subprocess.check_output(args, stderr=subprocess.STDOUT)
output = output.decode('utf8')
helper.print_out(output)
# Printer dot
display(Image(filename="/content/code.png"))
@cell_magic
def verilog(self, line, cell):
if not self.already_install:
self.already_install = True
self.updateInstall()
args = line.split()
with tempfile.TemporaryDirectory() as tmp_dir:
file_path = os.path.join(tmp_dir, str(uuid.uuid4()))
with open(file_path + ext, "w") as f:
f.write(cell)
try:
self.compile(file_path, args)
self.run_verilog(file_path)
except subprocess.CalledProcessError as e:
helper.print_out(e.output.decode("utf8"))
@cell_magic
def print_verilog(self, line, cell):
if not self.already_install:
self.already_install = True
self.updateInstall()
args = line.split()
file_path = os.path.join('/content/code')
with open(file_path + ext, "w") as f:
f.write(cell)
try:
self.run_yosys(file_path)
except subprocess.CalledProcessError as e:
helper.print_out(e.output.decode("utf8"))
@cell_magic
def waveform(self, line, cell):
if not self.already_install:
self.already_install = True
self.updateInstall()
args = line.split()
if len(args) > 0:
name = args[0]
if '.vcd' not in name:
name += '.vcd'
else:
print("Name of file not exist! Please give the name.")
print("Ex. \%\%waveform <name_file>.vcd")
exit(0)
import sys
sys.path.insert(0,'.')
from cad4u.verilog.vcd_parser.vcd_plotter import VcdPlotter
op_dict = []
sign_list = []
time_begin = []
time_end = []
base = []
flag_op_dict = False
for l in cell.strip().split("\n"):
l = l.split("#")[0]
if l == '':
continue
if 'sign_list' not in l and 'op_dict' not in l:
s = l.replace('=', '+=[') + ']'
exec(s)
else:
if 'op_dict' in l:
flag_op_dict = True
exec(l.replace('=', '+='))
if flag_op_dict == False:
op_dict = [[{}]]
vcd_plt = VcdPlotter('/content/%s'%name)
vcd_plt.show(op_dict, sign_list, time_begin[0], time_end[0], base[0])
| StarcoderdataPython |
1639011 | <filename>train/lr_schedule.py
import os
import math
import numpy as np
import torch
def set_lr_scheduler(optimizer, cfg):
r"""Sets the learning rate scheduler
"""
if cfg.lr_scheduler == 'step':
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, cfg.step_size, cfg.step_gamma)
elif cfg.lr_scheduler == 'multistep':
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, cfg.step_milestones, cfg.step_gamma)
elif cfg.lr_scheduler == 'exp':
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, cfg.step_gamma)
elif cfg.lr_scheduler == 'cosine':
#lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=cfg.epochs)
#lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=cfg.epochs)
lr_scheduler = WarmupCosineAnnealing(optimizer, epochs=cfg.epochs, warmup_epoch=0)
elif cfg.lr_scheduler == 'warmup_cosine':
lr_scheduler = WarmupCosineAnnealing(optimizer, epochs=cfg.epochs, warmup_epoch=cfg.warmup)
else:
raise ValueError('==> unavailable lr_scheduler:%s' % cfg.scheduler)
return lr_scheduler
def step_lr_epoch(trainer):
trainer.lr_scheduler.step()
def step_lr_batch(trainer):
curr = trainer.reports['epoch'] + (trainer.memory['i'] + 1) / trainer.memory['batch_len_trn']
trainer.lr_scheduler.step(curr)
class WarmupCosineAnnealing(torch.optim.lr_scheduler._LRScheduler):
def __init__(self, optimizer, epochs, warmup_epoch=5, last_epoch=-1):
if epochs <= 0 or not isinstance(epochs, int):
raise ValueError("Expected positive integer epochs, but got {}".format(epochs))
if warmup_epoch < 0 or not isinstance(warmup_epoch, int):
raise ValueError("Expected positive integer or zero warmup_epoch, but got {}".format(warmup_epoch))
self.epochs = epochs
self.warmup_epoch = warmup_epoch
super(WarmupCosineAnnealing, self).__init__(optimizer, last_epoch)
def get_lr(self, epoch):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
lrs = []
for base_lr in self.base_lrs:
if epoch < self.warmup_epoch:
lr = base_lr * (epoch + 1) / self.warmup_epoch
else:
lr = base_lr * (1 + math.cos(math.pi * (epoch - self.warmup_epoch) / (self.epochs - self.warmup_epoch))) / 2
lrs.append(lr)
return lrs
def step(self, epoch=None):
"""Step could be called after every epoch or batch update
Example:
>>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult)
>>> iters = len(dataloader)
>>> for epoch in range(20):
>>> for i, sample in enumerate(dataloader):
>>> inputs, labels = sample['inputs'], sample['labels']
>>> optimizer.zero_grad()
>>> outputs = net(inputs)
>>> loss = criterion(outputs, labels)
>>> loss.backward()
>>> optimizer.step()
>>> scheduler.step(epoch + i / iters)
"""
if epoch is None:
epoch = self.last_epoch + 1
elif epoch < 0:
raise ValueError("Expected non-negative epoch, but got {}".format(epoch))
self.last_epoch = math.floor(epoch)
class _enable_get_lr_call:
def __init__(self, o):
self.o = o
def __enter__(self):
self.o._get_lr_called_within_step = True
return self
def __exit__(self, type, value, traceback):
self.o._get_lr_called_within_step = False
return self
with _enable_get_lr_call(self):
for i, data in enumerate(zip(self.optimizer.param_groups, self.get_lr(epoch))):
param_group, lr = data
param_group['lr'] = lr
self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
| StarcoderdataPython |
3303140 | <reponame>juanjnc/TGBot
from telegram import Update, ChatAction
from telegram.ext import CallbackContext
def start(update: Update, context: CallbackContext):
"""Envía un mensaje cuando se manda el comando /start."""
context.bot.sendChatAction(chat_id=update.message.chat_id, action=ChatAction.TYPING, timeout=10)
user = update.effective_user
update.message.reply_markdown_v2(fr'Hola {user.mention_markdown_v2()}, encantado de conocerte\!')
def unknown(update: Update, context: CallbackContext):
"""Para comando no reconocido"""
context.bot.sendChatAction(chat_id=update.message.chat_id, action=ChatAction.TYPING, timeout=10)
context.bot.send_message(chat_id=update.effective_chat.id,
text="Ente, no puedo hacer algo para no que no estoy programado. Háztelo mirar, gracias")
| StarcoderdataPython |
3325022 | import copy
import ast
class ReadableFields:
"""
This class is responsible for getting all fields from the constructors of
the classes that inherity from it
"""
def __init__(self):
"""
This constructor makes impossible to create a class without a __init__ mehtod.
"""
raise Exception("Class must implement a constructor")
def get_dict(self):
"""
Return a copy of the dictionary that represents the class
"""
return copy.deepcopy(self.__dict__)
def get_deep_dict(self):
"""
Return a dictionary represeting the classes and all their sub-classes
"""
result = self.get_dict()
mutiple_types = (list, tuple)
default_types = (str,)
for field in self.get_init_attributes():
field_type = type(result[field])
if field_type in mutiple_types:
field_list = []
for sub_field in field:
field_list.append(sub_field.get_deep_dict())
result[field] = field_list
elif not field_type in default_types:
result[field] = result[field].get_deep_dict()
return result
@classmethod
def get_init_attributes(cls):
"""
Return a list of attributes of the class (excluding self and cls)
"""
removable_attributes = ('self', 'cls')
attributes = list(cls.__init__.__code__.co_varnames)
for attribute in removable_attributes:
try:
attributes.remove(attribute)
except ValueError:
pass
return attributes
@classmethod
def generate_instance(cls):
"""
Generate a instance of the class by receiving it fields automatically on the terminal
"""
kwargs = {}
callable_classes = cls.get_callable_classes()
for field in cls.get_init_attributes():
if field in callable_classes.keys():
kwargs[field] = callable_classes[field].generate_instance()
else:
kwargs[field] = input(f"Please inform {field}: ")
return cls(**kwargs)
@classmethod
def get_callable_classes(cls):
"""
Return the a dictionary that represent fields and their respective class
"""
from models import variables
return variables
| StarcoderdataPython |
1797487 | <reponame>Trustmega/luxatray
import os
import hid
from gi.repository import Gtk as gtk, AppIndicator3 as appindicator
def main():
indicator = appindicator.Indicator.new("luxatray", "starred-symbolic",
appindicator.IndicatorCategory.APPLICATION_STATUS)
indicator.set_status(appindicator.IndicatorStatus.ACTIVE)
indicator.set_menu(menu())
gtk.main()
def menu():
menu = gtk.Menu()
cmd_off = gtk.MenuItem('Off')
cmd_off.connect('activate', lightOff)
menu.append(cmd_off)
cmd_red = gtk.MenuItem('Red')
cmd_red.connect('activate', red)
menu.append(cmd_red)
cmd_green = gtk.MenuItem('Green')
cmd_green.connect('activate', green)
menu.append(cmd_green)
cmd_blue = gtk.MenuItem('Blue')
cmd_blue.connect('activate', blue)
menu.append(cmd_blue)
cmd_exit = gtk.MenuItem('Exit')
cmd_exit.connect('activate', quit)
menu.append(cmd_exit)
menu.show_all()
return menu
def lightOff(_):
off = [0, 0, 0]
writeToLux(off)
def red(_):
red = [255, 0, 0]
writeToLux(red)
def green(_):
green = [0, 128, 0]
writeToLux(green)
def blue(_):
blue = [0, 0, 255]
writeToLux(blue)
def quit(_):
gtk.main_quit()
def writeToLux(color):
device = hid.device()
device.open(0x04D8, 0xF372)
device.write([2] + [0x41] + color + [10])
device.close()
if __name__ == "__main__":
main()
| StarcoderdataPython |
1627645 | import sys
from .config import COLORS
def prompt(message):
if sys.version_info.major == 3:
action = input(message)
else:
action = raw_input(message)
return action
def default(message):
print(message)
def success(message):
print('{}{}\033[1;m'.format(COLORS['SUCCESS'], message))
def error(message):
print('{}{}\033[1;m'.format(COLORS['ERROR'], message))
def info(message):
print('{}{}\033[1;m'.format(COLORS['INFO'], message)) | StarcoderdataPython |
1774123 | <reponame>henriquekirchheck/Curso-em-video-Python
# Escreva um programa que leia um número N inteiro qualquer e mostre na tela os N primeiros elementos de uma Sequência de Fibonacci
tt = 0
t = int(input('Digite o numero de termos da sequência de Fibonacci: '))
n1 = 0
n2 = 1
print(f'\n{n1} -> ', end='')
while(tt != (t - 1)):
n = n1 + n2
n2 = n1
n1 = n
if(tt <= (t - 3)):
print(n, end=' -> ')
elif(tt == (t - 2)):
print(f'{n} -> Fim')
tt = tt + 1
| StarcoderdataPython |
17390 | # The MIT License (MIT)
#
# Copyright © 2021 <NAME>, <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import numpy as np
from random import random, seed
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from sklearn.model_selection import train_test_split
from sklearn import linear_model
from sklearn.preprocessing import StandardScaler
from sklearn.utils import resample
# FrankeFunction: a two-variables function to create the dataset of our vanilla problem
def FrankeFunction(x,y):
term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2))
term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1))
term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2))
term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2)
return term1 + term2 + term3 + term4
# 3D plot of FrankeFunction
def Plot_FrankeFunction(x,y,z, title="Dataset"):
fig = plt.figure(figsize=(8, 7))
ax = fig.gca(projection="3d")
# Plot the surface.
surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm, linewidth=0, antialiased=False)
# Customize the z axis.
ax.set_zlim(-0.10, 1.40)
ax.set_xlabel(r"$x$")
ax.set_ylabel(r"$y$")
ax.set_zlabel(r"$z$")
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.title(title)
plt.show()
# Create xyz dataset from the FrankeFunction with a added normal distributed noise
def create_xyz_dataset(n,mu_N, sigma_N):
x = np.linspace(0,1,n)
y = np.linspace(0,1,n)
x,y = np.meshgrid(x,y)
z = FrankeFunction(x,y) +mu_N +sigma_N*np.random.randn(n,n)
return x,y,z
# Error analysis: MSE and R2 score
def R2(z_data, z_model):
return 1 - np.sum((z_data - z_model) ** 2) / np.sum((z_data - np.mean(z_data)) ** 2)
def MSE(z_data,z_model):
n = np.size(z_model)
return np.sum((z_data-z_model)**2)/n
# SVD theorem
def SVD(A):
U, S, VT = np.linalg.svd(A,full_matrices=True)
D = np.zeros((len(U),len(VT)))
print("shape D= ", np.shape(D))
print("Shape S= ",np.shape(S))
print("lenVT =",len(VT))
print("lenU =",len(U))
D = np.eye(len(U),len(VT))*S
"""
for i in range(0,VT.shape[0]): #was len(VT)
D[i,i]=S[i]
print("i=",i)"""
return U @ D @ VT
# SVD inversion
def SVDinv(A):
U, s, VT = np.linalg.svd(A)
# reciprocals of singular values of s
d = 1.0 / s
# create m x n D matrix
D = np.zeros(A.shape)
# populate D with n x n diagonal matrix
D[:A.shape[1], :A.shape[1]] = np.diag(d)
UT = np.transpose(U)
V = np.transpose(VT)
return np.matmul(V,np.matmul(D.T,UT))
# Design matrix for two indipendent variables x,y
def create_X(x, y, n):
if len(x.shape) > 1:
x = np.ravel(x)
y = np.ravel(y)
N = len(x)
l = int((n+1)*(n+2)/2) # Number of elements in beta, number of feutures (degree of polynomial)
X = np.ones((N,l))
for i in range(1,n+1):
q = int((i)*(i+1)/2)
for k in range(i+1):
X[:,q+k] = (x**(i-k))*(y**k)
return X
def scale_Xz(X_train, X_test, z_train, z_test, with_std=False):
scaler_X = StandardScaler(with_std=with_std) #with_std=False
scaler_X.fit(X_train)
X_train = scaler_X.transform(X_train)
X_test = scaler_X.transform(X_test)
scaler_z = StandardScaler(with_std=with_std) #with_std=False
z_train = np.squeeze(scaler_z.fit_transform(z_train.reshape(-1, 1))) #scaler_z.fit_transform(z_train) #
z_test = np.squeeze(scaler_z.transform(z_test.reshape(-1, 1))) #scaler_z.transform(z_test) #
return X_train, X_test, z_train, z_test
# Splitting and rescaling data (rescaling is optional)
# Default values: 20% of test data and the scaler is StandardScaler without std.dev.
def Split_and_Scale(X,z,test_size=0.2, scale=True, with_std=False):
#Splitting training and test data
X_train, X_test, z_train, z_test = train_test_split(X, z, test_size=test_size)
# Rescaling X and z (optional)
if scale:
X_train, X_test, z_train, z_test = scale_Xz(X_train, X_test, z_train, z_test, with_std=with_std)
return X_train, X_test, z_train, z_test
# OLS equation
def OLS_solver(X_train, X_test, z_train, z_test):
# Calculating Beta Ordinary Least Square Equation with matrix pseudoinverse
# Altervatively to Numpy pseudoinverse it is possible to use the SVD theorem to evalute the inverse of a matrix (even in case it is singular). Just replace 'np.linalg.pinv' with 'SVDinv'.
ols_beta = np.linalg.pinv(X_train.T @ X_train) @ X_train.T @ z_train
z_tilde = X_train @ ols_beta # z_prediction of the train data
z_predict = X_test @ ols_beta # z_prediction of the test data
return ols_beta, z_tilde, z_predict
# Return the rolling mean of a vector and two values at one sigma from the rolling average
def Rolling_Mean(vector, windows=3):
vector_df = pd.DataFrame({'vector': vector})
# computing the rolling average
rolling_mean = vector_df.vector.rolling(windows).mean().to_numpy()
# computing the values at two sigmas from the rolling average
rolling_std = vector_df.vector.rolling(windows).std().to_numpy()
value_up = rolling_mean + rolling_std
value_down = rolling_mean - rolling_std
return rolling_mean, value_down, value_up
# Plot MSE in function of complexity of the model (rolling mean)
def plot_ols_complexity(x, y, z, maxdegree = 20, title="MSE as a function of model complexity"):
complexity = np.arange(0,maxdegree+1)
MSE_train_set = []
MSE_test_set = []
for degree in complexity:
X = create_X(x, y, degree)
X_train, X_test, z_train, z_test = Split_and_Scale(X,np.ravel(z)) #StardardScaler, test_size=0.2, scale=true
ols_beta, z_tilde,z_predict = OLS_solver(X_train, X_test, z_train, z_test)
MSE_train_set.append(MSE(z_train,z_tilde))
MSE_test_set.append(MSE(z_test,z_predict))
plt.figure( figsize = ( 10, 7))
MSE_train_mean, MSE_train_down, MSE_train_up = Rolling_Mean(MSE_train_set)
plt.plot(complexity, MSE_train_mean, label ="Train (rolling ave.)", color="purple")
plt.fill_between(complexity, MSE_train_down, MSE_train_up, alpha=0.2, color="purple")
MSE_test_mean, MSE_test_down, MSE_test_up = Rolling_Mean(MSE_test_set)
plt.plot(complexity, MSE_test_mean, label ="Test (rolling ave.)", color="orange")
plt.fill_between(complexity, MSE_test_down, MSE_test_up, alpha=0.2, color="orange")
plt.plot(complexity, MSE_train_set, '--', alpha=0.3, color="purple", label ="Train (actual values)")
plt.plot(complexity, MSE_test_set, '--', alpha=0.3, color="orange", label ="Test (actual values)")
plt.xlabel("Complexity")
plt.ylabel("MSE")
plt.xlim(complexity[~np.isnan(MSE_train_mean)][0]-1,complexity[-1]+1)
plt.title("Plot of the MSE as a function of complexity of the model\n– Rolling mean and one-sigma region –")
plt.legend()
plt.grid()
plt.show()
def ridge_reg(X_train, X_test, z_train, z_test, lmd = 10**(-12)):
ridge_beta = np.linalg.pinv(X_train.T @ X_train + lmd*np.eye(len(X_train.T))) @ X_train.T @ z_train #psudoinverse
z_model = X_train @ ridge_beta #calculates model
z_predict = X_test @ ridge_beta
#finds the lambda that gave the best MSE
#best_lamda = lambdas[np.where(MSE_values == np.min(MSE_values))[0]]
return ridge_beta, z_model, z_predict
def lasso_reg(X_train, X_test, z_train, z_test, lmd = 10**(-12)):
RegLasso = linear_model.Lasso(lmd)
_ = RegLasso.fit(X_train,z_train)
z_model = RegLasso.predict(X_train)
z_predict = RegLasso.predict(X_test)
return z_model, z_predict
| StarcoderdataPython |
3230336 | from direct.distributed import DistributedObjectAI
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from otp.otpbase.PythonUtil import nonRepeatingRandomList
import DistributedGagAI, DistributedProjectileAI
from direct.task import Task
import random, time, Racer, RaceGlobals
from direct.distributed.ClockDelta import *
class DistributedRaceAI(DistributedObjectAI.DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedRaceAI')
def __init__(self, air, trackId, zoneId, avIds, laps, raceType, racerFinishedFunc, raceDoneFunc, circuitLoop, circuitPoints, circuitTimes, qualTimes=[], circuitTimeList={}, circuitTotalBonusTickets={}):
DistributedObjectAI.DistributedObjectAI.__init__(self, air)
self.trackId = trackId
self.direction = self.trackId % 2
self.zoneId = zoneId
self.racers = {}
self.avIds = []
self.kickedAvIds = []
self.circuitPoints = circuitPoints
self.circuitTimes = circuitTimes
self.finishPending = []
self.flushPendingTask = None
self.kickSlowRacersTask = None
for avId in avIds:
if avId and avId in self.air.doId2do:
self.avIds.append(avId)
self.racers[avId] = Racer.Racer(self, air, avId, zoneId)
self.toonCount = len(self.racers)
self.startingPlaces = nonRepeatingRandomList(self.toonCount, 4)
self.thrownGags = []
self.ready = False
self.setGo = False
self.racerFinishedFunc = racerFinishedFunc
self.raceDoneFunc = raceDoneFunc
self.lapCount = laps
self.raceType = raceType
if raceType == RaceGlobals.Practice:
self.gagList = []
else:
self.gagList = [
0] * len(RaceGlobals.TrackDict[trackId][4])
self.circuitLoop = circuitLoop
self.qualTimes = qualTimes
self.circuitTimeList = circuitTimeList
self.qualTimes.append(RaceGlobals.TrackDict[trackId][1])
self.circuitTotalBonusTickets = circuitTotalBonusTickets
return
def generate(self):
DistributedObjectAI.DistributedObjectAI.generate(self)
self.notify.debug('generate %s, id=%s, ' % (self.doId, self.trackId))
trackFilepath = RaceGlobals.TrackDict[self.trackId][0]
taskMgr.doMethodLater(0.5, self.enableEntryBarrier, 'enableWaitingBarrier')
def enableEntryBarrier(self, task):
self.enterRaceBarrier = self.beginBarrier('waitingForJoin', self.avIds, 60, self.b_racersJoined)
self.notify.debug('Waiting for Joins!!!!')
self.sendUpdate('waitingForJoin', [])
def removeObject(self, object):
if object:
self.notify.debug('deleting object: %s' % object.doId)
object.requestDelete()
def requestDelete(self, lastRace=True):
self.notify.debug('requestDelete: %s' % self.doId)
self.ignoreBarrier('waitingForExit')
for i in self.thrownGags:
i.requestDelete()
del self.thrownGags
if lastRace:
for i in self.racers:
racer = self.racers[i]
self.ignore(racer.exitEvent)
if racer.kart:
racer.kart.requestDelete()
racer.kart = None
if racer.avatar:
racer.avatar.kart = None
racer.avatar = None
self.racers = {}
if self.flushPendingTask:
taskMgr.remove(self.flushPendingTask)
self.flushPendingTask = None
if self.kickSlowRacersTask:
taskMgr.remove(self.kickSlowRacersTask)
self.kickSlowRacersTask = None
DistributedObjectAI.DistributedObjectAI.requestDelete(self)
return
def delete(self):
self.notify.debug('delete: %s' % self.doId)
DistributedObjectAI.DistributedObjectAI.delete(self)
del self.raceDoneFunc
del self.racerFinishedFunc
def getTaskZoneId(self):
return self.zoneId
def allToonsGone(self):
self.notify.debug('allToonsGone')
self.requestDelete()
def getZoneId(self):
return self.zoneId
def getTrackId(self):
return self.trackId
def getRaceType(self):
return self.raceType
def getCircuitLoop(self):
return self.circuitLoop
def getAvatars(self):
avIds = []
for i in self.racers:
avIds.append(i)
return avIds
def getStartingPlaces(self):
return self.startingPlaces
def getLapCount(self):
return self.lapCount
def requestKart(self):
avId = self.air.getAvatarIdFromSender()
if avId in self.racers:
kart = self.racers[avId].kart
if kart:
kart.request('Controlled', avId)
def b_racersJoined(self, avIds):
self.ignoreBarrier('waitingForJoin')
racersOut = []
for i in self.avIds:
if i not in avIds:
racersOut.append(i)
if len(avIds) == 0:
self.exitBarrier = self.beginBarrier('waitingForExit', self.avIds, 10, self.endRace)
for i in self.avIds:
self.d_kickRacer(i)
return
for i in racersOut:
self.d_kickRacer(i)
self.avIds = avIds
self.waitingForPrepBarrier = self.beginBarrier('waitingForPrep', self.avIds, 30, self.b_prepForRace)
avAndKarts = []
for i in self.racers:
avAndKarts.append([self.racers[i].avId, self.racers[i].kart.doId])
self.sendUpdate('setEnteredRacers', [avAndKarts])
def b_prepForRace(self, avIds):
self.notify.debug('Prepping!!!')
self.ignoreBarrier('waitingForPrep')
racersOut = []
for i in self.avIds:
if i not in avIds:
racersOut.append(i)
if len(avIds) == 0:
self.exitBarrier = self.beginBarrier('waitingForExit', self.avIds, 10, self.endRace)
for i in racersOut:
self.d_kickRacer(i)
if len(avIds) == 0:
return
self.avIds = avIds
for i in xrange(len(self.gagList)):
self.d_genGag(i)
self.waitingForReadyBarrier = self.beginBarrier('waitingForReady', self.avIds, 20, self.b_startTutorial)
self.sendUpdate('prepForRace', [])
def b_startTutorial(self, avIds):
self.ignoreBarrier('waitingForReady')
racersOut = []
for i in self.avIds:
if i not in avIds:
racersOut.append(i)
if len(avIds) == 0:
self.exitBarrier = self.beginBarrier('waitingForExit', self.avIds, 10, self.endRace)
for i in racersOut:
self.d_kickRacer(i)
if len(avIds) == 0:
return
for avId in avIds:
av = self.air.doId2do.get(avId, None)
if not av:
self.notify.warning('b_racersJoined: Avatar not found with id %s' % avId)
elif not self.raceType == RaceGlobals.Practice:
if self.isCircuit() and not self.isFirstRace():
continue
raceFee = RaceGlobals.getEntryFee(self.trackId, self.raceType)
avTickets = av.getTickets()
if avTickets < raceFee:
self.notify.warning('b_racersJoined: Avatar %s does not own enough tickets for the race!')
av.b_setTickets(0)
else:
av.b_setTickets(avTickets - raceFee)
self.avIds = avIds
self.readRulesBarrier = self.beginBarrier('readRules', self.avIds, 10, self.b_startRace)
self.sendUpdate('startTutorial', [])
return
def b_startRace(self, avIds):
self.ignoreBarrier('readRules')
if self.isDeleted():
return
self.notify.debug('Going!!!!!!')
self.ignoreBarrier(self.waitingForReadyBarrier)
self.toonCount = len(self.avIds)
self.baseTime = globalClock.getFrameTime() + 0.5 + RaceGlobals.RaceCountdown
for i in self.racers:
self.racers[i].baseTime = self.baseTime
self.sendUpdate('startRace', [globalClockDelta.localToNetworkTime(self.baseTime)])
qualTime = RaceGlobals.getQualifyingTime(self.trackId)
timeout = qualTime + 60 + 3
self.kickSlowRacersTask = taskMgr.doMethodLater(timeout, self.kickSlowRacers, 'kickSlowRacers')
def kickSlowRacers(self, task):
self.kickSlowRacersTask = None
if self.isDeleted():
return
for racer in self.racers.values():
avId = racer.avId
av = simbase.air.doId2do.get(avId, None)
if av and not av.allowRaceTimeout:
continue
if not racer.finished and avId not in self.kickedAvIds:
self.notify.info('Racer %s timed out - kicking.' % racer.avId)
self.d_kickRacer(avId, RaceGlobals.Exit_Slow)
self.ignore(racer.exitEvent)
racer.exited = True
racer.finished = True
taskMgr.doMethodLater(10, self.removeObject, 'removeKart-%s' % racer.kart.doId, extraArgs=[racer.kart])
taskMgr.remove('make %s invincible' % avId)
self.racers[avId].anvilTarget = True
self.checkForEndOfRace()
return
def d_kickRacer(self, avId, reason=RaceGlobals.Exit_Barrier):
if avId not in self.kickedAvIds:
self.kickedAvIds.append(avId)
if self.isCircuit() and not self.isFirstRace() and reason == RaceGlobals.Exit_Barrier:
reason = RaceGlobals.Exit_BarrierNoRefund
self.sendUpdate('goToSpeedway', [self.kickedAvIds, reason])
def d_genGag(self, slot):
index = random.randint(0, 5)
self.gagList[slot] = index
pos = slot
self.sendUpdate('genGag', [slot, pos, index])
def d_dropAnvil(self, ownerId):
possibleTargets = []
for i in self.racers:
if not self.racers[i].anvilTarget:
possibleTargets.append(self.racers[i])
while len(possibleTargets) > 1:
if possibleTargets[0].lapT <= possibleTargets[1].lapT:
possibleTargets = possibleTargets[1:]
else:
possibleTargets = possibleTargets[1:] + possibleTargets[:1]
if len(possibleTargets):
id = possibleTargets[0].avId
if id != ownerId:
possibleTargets[0].anvilTarget = True
taskMgr.doMethodLater(4, setattr, 'make %s invincible' % id, extraArgs=[self.racers[id], 'anvilTarget', False])
self.sendUpdate('dropAnvilOn', [ownerId, id, globalClockDelta.getFrameNetworkTime()])
def d_makeBanana(self, avId, x, y, z):
gag = DistributedGagAI.DistributedGagAI(simbase.air, avId, self, 3, x, y, z, 0)
self.thrownGags.append(gag)
gag.generateWithRequired(self.zoneId)
def d_launchPie(self, avId):
ownerRacer = simbase.air.doId2do.get(avId, None)
targetId = 0
type = 0
targetDist = 10000
for iiId in self.racers:
targetRacer = simbase.air.doId2do.get(iiId, None)
if not (targetRacer and targetRacer.kart and ownerRacer and ownerRacer.kart):
continue
if targetRacer.kart.getPos(ownerRacer.kart)[1] < 500 and targetRacer.kart.getPos(ownerRacer.kart)[1] >= 0 and abs(targetRacer.kart.getPos(ownerRacer.kart)[0]) < 50 and avId != iiId and targetDist > targetRacer.kart.getPos(ownerRacer.kart)[1]:
targetId = iiId
targetDist = targetRacer.kart.getPos(ownerRacer.kart)[1]
if targetId == 0:
for iiId in self.racers:
targetRacer = simbase.air.doId2do.get(iiId, None)
if not (targetRacer and targetRacer.kart and ownerRacer and ownerRacer.kart):
continue
if targetRacer.kart.getPos(ownerRacer.kart)[1] > -80 and targetRacer.kart.getPos(ownerRacer.kart)[1] <= 0 and abs(targetRacer.kart.getPos(ownerRacer.kart)[0]) < 50 and avId != iiId:
targetId = iiId
self.sendUpdate('shootPiejectile', [avId, targetId, type])
return
def d_makePie(self, avId, x, y, z):
gag = DistributedProjectileAI.DistributedProjectileAI(simbase.air, self, avId)
self.thrownGags.append(gag)
gag.generateWithRequired(self.zoneId)
def endRace(self, avIds):
if hasattr(self, 'raceDoneFunc'):
self.raceDoneFunc(self, False)
def racerLeft(self, avIdFromClient):
avId = self.air.getAvatarIdFromSender()
if avId in self.racers and avId == avIdFromClient:
self.notify.debug('Removing %d from race %d' % (avId, self.doId))
racer = self.racers[avId]
taskMgr.doMethodLater(10, self.removeObject, racer.kart.uniqueName('removeIt'), extraArgs=[racer.kart])
if racer.avatar:
racer.avatar.kart = None
self.racers[avId].exited = True
taskMgr.remove('make %s invincible' % id)
self.racers[avId].anvilTarget = True
raceDone = True
for i in self.racers:
if not self.racers[i].exited:
raceDone = False
if raceDone:
self.notify.debug('race over, sending callback to raceMgr')
self.raceDoneFunc(self)
if avId in self.finishPending:
self.finishPending.remove(avId)
return
def hasGag(self, slot, type, index):
avId = self.air.getAvatarIdFromSender()
if avId in self.racers:
if self.racers[avId].hasGag:
return
if self.gagList[slot] == index:
self.gagList[slot] = None
taskMgr.doMethodLater(5, self.d_genGag, 'remakeGag-' + str(slot), extraArgs=[slot])
self.racers[avId].hasGag = True
self.racers[avId].gagType = type
else:
return
return
def requestThrow(self, x, y, z):
avId = self.air.getAvatarIdFromSender()
if avId in self.racers:
racer = self.racers[avId]
if racer.hasGag:
if racer.gagType == 1:
self.d_makeBanana(avId, x, y, z)
if racer.gagType == 2:
pass
if racer.gagType == 3:
self.d_dropAnvil(avId)
if racer.gagType == 4:
self.d_launchPie(avId)
racer.hasGag = False
racer.gagType = 0
def heresMyT(self, inputAvId, numLaps, t, timestamp):
avId = self.air.getAvatarIdFromSender()
if avId in self.racers and avId == inputAvId:
me = self.racers[avId]
me.setLapT(numLaps, t, timestamp)
if me.maxLap == self.lapCount and not me.finished:
me.finished = True
taskMgr.remove('make %s invincible' % id)
me.anvilTarget = True
someoneIsClose = False
for racer in self.racers.values():
if not racer.exited and not racer.finished:
if me.lapT - racer.lapT < 0.15:
someoneIsClose = True
break
index = 0
for racer in self.finishPending:
if me.totalTime < racer.totalTime:
break
index += 1
self.finishPending.insert(index, me)
if self.flushPendingTask:
taskMgr.remove(self.flushPendingTask)
self.flushPendingTask = None
if someoneIsClose:
task = taskMgr.doMethodLater(3, self.flushPending, self.uniqueName('flushPending'))
self.flushPendingTask = task
else:
self.flushPending()
return
def flushPending(self, task=None):
for racer in self.finishPending:
self.racerFinishedFunc(self, racer)
self.finishPending = []
self.flushPendingTask = None
return
def d_setPlace(self, avId, totalTime, place, entryFee, qualify, winnings, bonus, trophies, circuitPoints, circuitTime):
self.sendUpdate('setPlace', [avId, totalTime, place, entryFee, qualify, winnings, bonus, trophies, circuitPoints, circuitTime])
def d_setCircuitPlace(self, avId, place, entryFee, winnings, bonus, trophies):
self.sendUpdate('setCircuitPlace', [avId, place, entryFee, winnings, bonus, trophies])
def d_endCircuitRace(self):
self.sendUpdate('endCircuitRace')
def unexpectedExit(self, avId):
self.notify.debug('racer disconnected: %s' % avId)
racer = self.racers.get(avId, None)
if racer:
self.sendUpdate('racerDisconnected', [avId])
self.ignore(racer.exitEvent)
racer.exited = True
taskMgr.doMethodLater(10, self.removeObject, 'removeKart-%s' % racer.kart.doId, extraArgs=[racer.kart])
taskMgr.remove('make %s invincible' % id)
self.racers[avId].anvilTarget = True
self.checkForEndOfRace()
return
def checkForEndOfRace(self):
if self.isCircuit() and self.everyoneDone():
simbase.air.raceMgr.endCircuitRace(self)
raceOver = True
for i in self.racers:
if not self.racers[i].exited:
raceOver = False
if raceOver:
self.raceDoneFunc(self)
def sendToonsToNextCircuitRace(self, raceZone, trackId):
for avId in self.avIds:
self.notify.debug('Handling Circuit Race transisiton for avatar %s' % avId)
self.sendUpdateToAvatarId(avId, 'setRaceZone', [raceZone, trackId])
def isCircuit(self):
return self.raceType == RaceGlobals.Circuit
def isLastRace(self):
return len(self.circuitLoop) == 0
def isFirstRace(self):
return len(self.circuitLoop) == 2
def everyoneDone(self):
done = True
for racer in self.racers.values():
if not racer.exited and racer.avId not in self.playersFinished and racer.avId not in self.kickedAvIds:
done = False
break
return done
| StarcoderdataPython |
111932 | <reponame>opencomputeproject/HWMgmt-DeviceMgr-PSME
"""
* @section LICENSE
*
* @copyright
* Copyright (c) 2015-2017 Intel Corporation
*
* @copyright
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* @copyright
* http://www.apache.org/licenses/LICENSE-2.0
*
* @copyright
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
"""
from include.common.shell_command import ShellCommand
from include.psme_xml_structure.managers.drive_manager import DriveManager as DriveManager_abstract
from include.common.globals import *
DRIVE_RPM_DATA_SIZE = 2
DRIVE_RPM_SECTION = 0
class DriveManager(DriveManager_abstract):
@classmethod
def set_fields(cls, drive, data, context=None):
try:
if XML_NODE in data.keys():
if isinstance(data[XML_NODE], dict):
if data[XML_NODE][XML_AT_ID].startswith(LSHW_DISK):
data = data[XML_NODE]
capabilities = data[LSHW_CAPABILITIES][LSHW_CAPABILITY]
if not isinstance(capabilities, list):
capabilities = [capabilities]
for cap in capabilities:
rpm_info = cap[XML_AT_ID].split(LSHW_RPM)
if len(rpm_info) == DRIVE_RPM_DATA_SIZE:
try:
drive_hdparm = ShellCommand("hdparm -I " + str(data[LSHW_LOGICALNAME]) + " | grep Rotation | cut -d':' -f2")
drive_hdparm.execute()
drive.rpm = int(drive_hdparm.get_stdout())
except ValueError:
# unable to parse hdparm output - it returned "solid state drive" in "rotation" section
drive.rpm = 0
if DISK_SSD_TYPE in data[XML_PRODUCT]:
drive.type = DISK_SSD_TYPE
del drive.rpm
else:
drive.type = DISK_HDD_TYPE
drive.capacityGB = int(int(data[LSHW_SIZE][XML_HASH_TEXT]) / 1000 ** 3)
drive.physicalId = data[LSHW_PHYSID]
drive.firmwareVersion = data[XML_VERSION]
except (KeyError, TypeError):
return None
return drive
@classmethod
def prepare_data(cls, data):
ret = []
drives = data[LSHW_DRIVES]
if not isinstance(drives, list):
drives = [drives]
for drive in drives:
if drive[LSHW_PCI_STORAGE_CNTRL] in data[LSHW_STORAGE][LSHW_BUSINFO]:
ret.append(drive)
return ret
@classmethod
def split_data(cls, data):
return data
| StarcoderdataPython |
1734682 | <gh_stars>0
def announce(f):
def wrapper():
print("Starting function")
f()
print("Function completed execution")
return wrapper
@announce
def hello():
print("Hello, world!")
hello() | StarcoderdataPython |
1699739 | <filename>sistemas_rpg/ficha.py
sistema_ficha ='''『🗃️- ° } F̶i̶c̶h̶a̶ P̶e̶r̶s̶o̶n̶a̶g̶e̶m̶ { ° -🗃️』
→: Identificação do Player
╘ N̶o̶m̶e̶ o̶u̶ N̶i̶c̶k̶ ↝:
╘ N̶ú̶m̶e̶r̶o̶ T̶e̶l̶e̶f̶o̶n̶e̶ ↝:
╘ R̶e̶c̶r̶u̶t̶a̶d̶o̶ P̶o̶r̶.̶.̶.̶ ↝:
→: Identificação Do Personagem
╘ N̶o̶m̶e̶ ↝:
╘ A̶p̶a̶r̶ê̶n̶c̶i̶a̶ ↝:
╘ I̶d̶a̶d̶e̶ (̶A̶t̶é̶ 1̶3̶)̶ ↝:
╘ S̶e̶x̶o̶ ↝:
╘ T̶i̶p̶o̶ S̶a̶n̶g̶u̶í̶n̶e̶o̶ ↝: use o comando /rollsangue
→: Dados
╘ B̶a̶s̶e̶ (̶S̶ó̶ 1̶)̶ ↝:
╘ C̶l̶ã̶ (̶A̶t̶é̶ 2)̶ ↝:
╘ E̶l̶e̶m̶e̶n̶t̶o̶ I̶n̶i̶c̶i̶a̶s̶(̶S̶ó̶ 1̶)̶ ↝:
╘ ̶S̶h̶i̶n̶o̶b̶i / ̶N̶u̶k̶k̶e̶n̶i̶n / A̶n̶d̶a̶r̶i̶l̶h̶o̶ ↝:
╘ D̶a̶t̶a̶ d̶e̶ C̶r̶i̶a̶ç̶ã̶o̶ ↝:
╘N̶o̶t̶a̶s̶ (̶A̶D̶M̶'̶s̶)̶↝̶:''' | StarcoderdataPython |
3201648 | import os
import time
import math
import asyncio
import requests
if bool(os.environ.get("WEBHOOK", False)):
from sample_config import Config
else:
from config import Config
from script import script
headers = {
"User-Agent":"Mozilla/5.0 (Windows NT 6.1; rv:80.0) Gecko/20100101 Firefox/80.0",
"Referer":"https://www.zee5.com",
"Accept":"*/*",
"Accept-Encoding":"gzip, deflate, br",
"Connection":"keep-alive",
"Accept-Language":"en-US,en;q=0.9",
"Origin":"https://www.zee5.com",
"sec-fetch-dest":"empty",
"sec-fetch-mode":"cors",
"sec-fetch-site":"same-site"
}
async def progress_for_pyrogram(
current,
total,
ud_type,
message,
start
):
now = time.time()
diff = now - start
if round(diff % 10.00) == 0 or current == total:
# if round(current / total * 100, 0) % 5 == 0:
percentage = current * 100 / total
speed = current / diff
elapsed_time = round(diff) * 1000
time_to_completion = round((total - current) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
elapsed_time = TimeFormatter(milliseconds=elapsed_time)
estimated_total_time = TimeFormatter(milliseconds=estimated_total_time)
progress = "\n[{0}{1}] \n\n⭕️Progress: {2}%\n".format(
''.join(["▣" for i in range(math.floor(percentage / 5))]),
''.join(["▢" for i in range(20 - math.floor(percentage / 5))]),
round(percentage, 2))
tmp = progress + "{0} of {1}\n\n⭕️Speed: {2}/s\n\n⭕️ETA: {3}\n".format(
humanbytes(current),
humanbytes(total),
humanbytes(speed),
# elapsed_time if elapsed_time != '' else "0 s",
estimated_total_time if estimated_total_time != '' else "0 s"
)
try:
await message.edit(
text="{}\n {}".format(
ud_type,
tmp
)
)
except:
pass
def humanbytes(size):
# https://stackoverflow.com/a/49361727/4723940
# 2**10 = 1024
if not size:
return ""
power = 2**10
n = 0
Dic_powerN = {0: ' ', 1: 'Ki', 2: 'Mi', 3: 'Gi', 4: 'Ti'}
while size > power:
size /= power
n += 1
return str(round(size, 2)) + " " + Dic_powerN[n] + 'B'
def TimeFormatter(milliseconds: int) -> str:
seconds, milliseconds = divmod(int(milliseconds), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = ((str(days) + "d, ") if days else "") + \
((str(hours) + "h, ") if hours else "") + \
((str(minutes) + "m, ") if minutes else "") + \
((str(seconds) + "s, ") if seconds else "") + \
((str(milliseconds) + "ms, ") if milliseconds else "")
return tmp[:-2]
async def take_screen_shot(video_file, output_directory, ttl):
out_put_file_name = output_directory + \
"/" + str(time.time()) + ".jpg"
file_genertor_command = [
"ffmpeg",
"-ss",
str(ttl),
"-i",
video_file,
"-vframes",
"1",
out_put_file_name
]
# width = "90"
process = await asyncio.create_subprocess_exec(
*file_genertor_command,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await process.communicate()
e_response = stderr.decode().strip()
t_response = stdout.decode().strip()
if os.path.lexists(out_put_file_name):
return out_put_file_name
else:
return None
def DownLoadFile(url, file_name, chunk_size, client, ud_type, message_id, chat_id):
if os.path.exists(file_name):
os.remove(file_name)
if not url:
return file_name
r = requests.get(url, allow_redirects=True, stream=True)
total_size = int(r.headers.get("content-length", 0))
downloaded_size = 0
with open(file_name, 'wb') as fd:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk:
fd.write(chunk)
downloaded_size += chunk_size
if client is not None:
if ((total_size // downloaded_size) % 5) == 0:
time.sleep(0.3)
try:
client.edit_message_text(
chat_id,
message_id,
text="{}: {} of {}".format(
ud_type,
humanbytes(downloaded_size),
humanbytes(total_size)
)
)
except:
pass
return file_name
| StarcoderdataPython |
3228149 | import os
import threading
import time
import unittest
import subprocess
import signal
if "CI" in os.environ:
def tqdm(x):
return x
else:
from tqdm import tqdm # type: ignore
import cereal.messaging as messaging
from collections import namedtuple
from tools.lib.logreader import LogReader
from selfdrive.test.process_replay.test_processes import get_segment
from common.basedir import BASEDIR
ProcessConfig = namedtuple('ProcessConfig', ['proc_name', 'pub_sub', 'ignore', 'command', 'path', 'segment', 'wait_for_response'])
CONFIGS = [
ProcessConfig(
proc_name="ubloxd",
pub_sub={
"ubloxRaw": ["ubloxGnss", "gpsLocationExternal"],
},
ignore=[],
command="./ubloxd",
path="selfdrive/locationd/",
segment="0375fdf7b1ce594d|2019-06-13--08-32-25--3",
wait_for_response=True
),
]
class TestValgrind(unittest.TestCase):
def extract_leak_sizes(self, log):
log = log.replace(",","") # fixes casting to int issue with large leaks
err_lost1 = log.split("definitely lost: ")[1]
err_lost2 = log.split("indirectly lost: ")[1]
err_lost3 = log.split("possibly lost: ")[1]
definitely_lost = int(err_lost1.split(" ")[0])
indirectly_lost = int(err_lost2.split(" ")[0])
possibly_lost = int(err_lost3.split(" ")[0])
return (definitely_lost, indirectly_lost, possibly_lost)
def valgrindlauncher(self, arg, cwd):
os.chdir(os.path.join(BASEDIR, cwd))
# Run valgrind on a process
command = "valgrind --leak-check=full " + arg
p = subprocess.Popen(command, stderr=subprocess.PIPE, shell=True, preexec_fn=os.setsid) # pylint: disable=W1509
while not self.done:
time.sleep(0.1)
os.killpg(os.getpgid(p.pid), signal.SIGINT)
_, err = p.communicate()
error_msg = str(err, encoding='utf-8')
with open(os.path.join(BASEDIR, "selfdrive/test/valgrind_logs.txt"), "a") as f:
f.write(error_msg)
f.write(5 * "\n")
definitely_lost, indirectly_lost, possibly_lost = self.extract_leak_sizes(error_msg)
if max(definitely_lost, indirectly_lost, possibly_lost) > 0:
self.leak = True
print("LEAKS from", arg, "\nDefinitely lost:", definitely_lost, "\nIndirectly lost", indirectly_lost, "\nPossibly lost", possibly_lost)
else:
self.leak = False
def replay_process(self, config, logreader):
pub_sockets = [s for s in config.pub_sub.keys()] # We dump data from logs here
sub_sockets = [s for _, sub in config.pub_sub.items() for s in sub] # We get responses here
pm = messaging.PubMaster(pub_sockets)
sm = messaging.SubMaster(sub_sockets)
print("Sorting logs")
all_msgs = sorted(logreader, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(config.pub_sub.keys())]
thread = threading.Thread(target=self.valgrindlauncher, args=(config.command, config.path))
thread.daemon = True
thread.start()
time.sleep(5) # We give the process time to start
for msg in tqdm(pub_msgs):
pm.send(msg.which(), msg.as_builder())
if config.wait_for_response:
sm.update(100)
self.done = True
def test_config(self):
open(os.path.join(BASEDIR, "selfdrive/test/valgrind_logs.txt"), "w")
for cfg in CONFIGS:
self.done = False
URL = cfg.segment
lr = LogReader(get_segment(URL))
self.replay_process(cfg, lr)
time.sleep(1) # Wait for the logs to get written
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
79693 | <filename>config.py<gh_stars>0
# -*- coding: utf-8 -*-
import os
import yaml
basedir = os.path.abspath(os.path.dirname(__file__))
# Load ACL Action file
_ACL_ACTIONS = None
with open(basedir + '/acl-actions.yaml') as _f:
_ACL_ACTIONS = yaml.load(_f.read())
class Config(object):
ADMIN_USERNAME = os.environ.get('ADMIN_USERNAME')
SECRET_KEY = os.environ.get('SECRET_KEY') or 'h3bF9paWv9nNfAEo'
SSL_DISABLE = True
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_RECORD_QUERIES = True
SQLALCHEMY_TRACK_MODIFICATIONS = True
BOOTSTRAP_SERVE_LOCAL = True
RECORDS_PER_PAGE = 15
FLASKY_SLOW_DB_QUERY_TIME = 0.5
BABEL_DEFAULT_LOCALE = 'en'
ACL_ACTIONS = _ACL_ACTIONS['aclActions']
ADMIN_DEFAULT_ACL_ACTIONS = _ACL_ACTIONS['adminDefaultAclActions']
LIMITED_ACL_ACTIONS = _ACL_ACTIONS['limitedAclActions']
@classmethod
def init_app(cls, app):
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('WAT_DB_DEV_URL') or \
'sqlite:///' + os.path.join(basedir, 'db-dev.sqlite')
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('WAT_DB_URL') or \
'sqlite:///' + os.path.join(basedir, 'db.sqlite')
config = {
'development': DevelopmentConfig,
'production' : ProductionConfig,
'default': DevelopmentConfig
}
| StarcoderdataPython |
4834724 | <gh_stars>0
import unittest2 as unittest
from Products.CMFCore.utils import getToolByName
from isaw.policy.testing import ISAW_POLICY_INTEGRATION_TESTING
from isaw.policy import config
class TestInstallation(unittest.TestCase):
layer = ISAW_POLICY_INTEGRATION_TESTING
def setUp(self):
self.app = self.layer['app']
self.portal = self.layer['portal']
self.qi_tool = getToolByName(self.portal, 'portal_quickinstaller')
def test_product_is_installed(self):
""" Validate that our products GS profile has been run and the product
installed
"""
pid = 'isaw.policy'
installed = [p['id'] for p in self.qi_tool.listInstalledProducts()]
self.assertTrue(pid in installed,
'package appears not to have been installed')
def testIS_PRODUCTION_globalIsFalseInTests(self):
self.assertFalse(
config.IS_PRODUCTION,
'We should not think we are running in production!'
)
| StarcoderdataPython |
3368095 | <reponame>draustin/otk
import numpy as np
from otk.sdb import *
def test_transforms():
m = orthographic(-2, 3, -4, 5, 6, 7)
assert np.allclose(np.dot([-2,-4,-6,1], m), [-1.0, -1.0, -1.0, 1.0])
assert np.allclose(np.dot([3,5,-7,1], m), [1.0, 1.0, 1.0, 1.0])
assert np.allclose(lookat([1.0, 3.0, -1.0], [1.0, -4.0, -1.0], [0.0, 0.0, 2.0]),
np.asarray((
(-1.0, 0.0, 0.0, 1.0),
(0.0, -0.0, 1.0, 3.0),
(0.0, 1.0, 0.0, -1.0),
(0.0, 0.0, 0.0, 1.0))).T)
def test_misc():
assert all(pix2norm(np.asarray((0,3)), 4) == (-0.75, 0.75))
invP = np.linalg.inv(orthographic(-2.0, 3.0, -4.0, 5.0, 6.0, 7.0))
x0, v, d_max = ndc2ray(-1, -1, invP)
assert np.allclose(x0, (-2.0, -4.0, -6.0, 1.0))
assert np.allclose(v, (0.0, 0.0, -1.0, 0.0))
assert np.isclose(d_max, 1.0) | StarcoderdataPython |
1664898 | # coding: utf-8
from .request import Request
class UserGetRequest(Request):
def __init__(self):
self.fields = None # 查询字段:User数据结构的公开信息字段列表,以半角逗号(,)分隔
self.nick = None # 用户昵称,多个以半角逗号(,)分隔,最多40个
self.method = 'taobao.user.get'
self.p = {}
def set_nick(self, nick):
self.fields = nick
self.p['nick'] = nick
def set_fields(self, fields):
self.fields = fields
self.p['fields'] = fields
class UsersGetRequest(Request):
def __init__(self):
self.fields = None # 查询字段:User数据结构的公开信息字段列表,以半角逗号(,)分隔
self.nicks = None # 用户昵称,多个以半角逗号(,)分隔,最多40个
self.method = 'taobao.users.get'
self.p = {}
def set_nicks(self, nicks):
self.fields = nicks
self.p['nicks'] = nicks
def set_fields(self, fields):
self.fields = fields
self.p['fields'] = fields
| StarcoderdataPython |
4812446 | # -*- coding: utf-8 -*-
# Uncomment the import only for coding support
# import numpy
# import pandas
# import geopandas
# import torch
# import torchvision
# import tensorflow
# import tensorboard
# from shapely.geometry import Point
from openeo_udf.api.feature_collection import FeatureCollection
from openeo_udf.api.udf_data import UdfData
__license__ = "Apache License, Version 2.0"
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, <NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
def fct_buffer(udf_data: UdfData):
"""Compute buffer of size 10 around features
This function creates buffer around all features in the provided feature collection tiles.
The resulting geopandas.GeoDataFrame contains the new geometries and a copy of the original attribute data.
Args:
udf_data (UdfData): The UDF data object that contains raster and vector tiles
Returns:
This function will not return anything, the UdfData object "udf_data" must be used to store the resulting
data.
"""
fct_list = []
# Iterate over each tile
for tile in udf_data.feature_collection_list:
# Buffer all features
gseries = tile.data.buffer(distance=10)
# Create a new GeoDataFrame that includes the buffered geometry and the attribute data
new_data = tile.data.set_geometry(gseries)
# Create the new feature collection tile
fct = FeatureCollection(id=tile.id + "_buffer", data=new_data,
start_times=tile.start_times, end_times=tile.end_times)
fct_list.append(fct)
# Insert the new tiles as list of feature collection tiles in the input object. The new tiles will
# replace the original input tiles.
udf_data.set_feature_collection_list(fct_list)
| StarcoderdataPython |
1788425 | #
# Copyright (C) 2020 Arm Mbed. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
from unittest import TestCase
from mbed_build._internal.mbed_tools.configure import configure
from mbed_build import mbed_tools
class TestExport(TestCase):
def test_aliases_export(self):
self.assertEqual(mbed_tools.configure, configure)
| StarcoderdataPython |
3216139 | <reponame>Himusoka/Beatmap-gen_Thesis
import os
import random
from collections import deque
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import numpy as np
from sklearn.metrics import f1_score, precision_recall_curve
import matplotlib.pyplot as plt
import glob
from tqdm import tqdm
from utilities.feature_extractor import FeatureExtractor, convert_time
BATCH_SIZE = 16
VAL_SIZE = 0.15
EPOCHS = 50
PATIENCE = 5
LR_RATE = 0.0005
class TypeDataset(Dataset):
def __init__(self, file):
self.extractor = FeatureExtractor()
ground, data, comboground = self.extractor.extract_types(file)
self.x = torch.from_numpy(np.array(data))
self.y = torch.from_numpy(np.array(ground))
self.z = torch.from_numpy(np.array(comboground))
self.samples = self.x.shape[0]
def __getitem__(self, index):
return self.x[index].float(), self.y[index].long(), self.z[index].float()
def __len__(self):
return self.samples
class LstmClustering(nn.Module):
def __init__(self):
super().__init__()
self.lstm1 = nn.LSTM(input_size=13, hidden_size=128, batch_first=True, num_layers=2)
self.lin = nn.Linear(3*128, 128)
self.out = nn.Linear(128, 3)
self.clu = nn.Linear(3*128, 256)
self.clu2 = nn.Linear(256, 128)
self.cluout = nn.Linear(128, 1)
self.sig = nn.Sigmoid()
self.soft = nn.Softmax(dim=1)
def forward(self, x, h_t=None, c_t=None):
if h_t is None or c_t is None:
x, (h_n, c_n) = self.lstm1(x)
else:
x, (h_n, c_n) = self.lstm1(x, (h_t, c_t))
x = F.relu(x)
lstmout = torch.flatten(x, start_dim=1)
x1 = F.dropout(F.relu(self.lin(lstmout)), training=self.training)
x1 = self.out(x1)
x2 = F.dropout(F.relu(self.clu(lstmout)), training=self.training)
x2 = self.cluout(F.relu(self.clu2(x2)))
if not self.training:
x1 = self.soft(x1)
x2 = self.sig(x2)
return x1, x2, h_n, c_n
def start_training(self, dir, device, outputdir="..\\models\\default", ev_set=None, file_set=None):
if not os.path.exists(outputdir):
os.mkdir(outputdir)
modelname = dir.split('\\')[-1]
all_files = [f for f in glob.glob(os.path.join(dir, "**/*.osu"), recursive=True)]
eval_files_len = int(len(all_files) * VAL_SIZE) + 1
folders = glob.glob(os.path.join(dir, "*\\"))
np.random.shuffle(folders)
eval_files = []
i = 0
while len(eval_files) < eval_files_len:
eval_files.extend([f for f in glob.glob(os.path.join(folders[i], "*.osu"))])
i += 1
files = [x for x in all_files if x not in eval_files]
np.random.shuffle(files)
if ev_set is not None and file_set is not None:
eval_files = np.load(ev_set)
files = np.load(file_set)
optimizer = optim.Adam(self.parameters(), lr=LR_RATE)
loss_fn1 = nn.CrossEntropyLoss()
loss_fn2 = nn.BCEWithLogitsLoss()
loss_vals = []
val_losses = []
highest_f = 0
loss_vals = []
f_scores = []
prev_val_loss = float('inf')
prev_state = self.state_dict()
model_thresh = 0
training_patience = PATIENCE
for epoch in range(EPOCHS):
self.train()
running_loss = 0
dataset_len = 0
np.random.shuffle(files)
for i, file in enumerate(files):
try:
dataset = TypeDataset(file)
loader = DataLoader(dataset, shuffle=False, batch_size=BATCH_SIZE)
dataset_len += len(loader)
print("Epoch: " + str(epoch) + "/" + str(EPOCHS) + ", data: " + str(i) + "/" + str(len(files)))
for (batch_X, batch_Y, batch_Z) in tqdm(loader):
optimizer.zero_grad()
out1, out2, _, _ = self(batch_X.to(device))
loss1 = loss_fn1(out1.view(-1, 3), batch_Y.to(device))
loss2 = loss_fn2(out2.view(-1), batch_Z.to(device))
loss = loss1 + loss2
loss.backward()
optimizer.step()
running_loss += loss.item()
except FileNotFoundError as e:
print(str(e))
files.remove(file)
train_loss = running_loss/dataset_len
print("loss: ", train_loss)
loss_vals.append(train_loss)
val_loss, f1, thresh, _ = self.evaluate(eval_files, device)
if prev_val_loss < val_loss:
print("loss increased", abs(training_patience - 5))
training_patience -= 1
if training_patience == -1:
print("Early training stop checkpoint after", epoch, "epochs")
torch.save(prev_state, os.path.join(outputdir, "seq_clust_model_check.pth"))
np.save(os.path.join(outputdir, "seq_clust_thresh.npy"), np.array(model_thresh))
else:
prev_state = self.state_dict()
training_patience = PATIENCE
model_thresh = thresh
prev_val_loss = val_loss
f_scores.append(f1)
val_losses.append(val_loss)
if f_scores[-1] > highest_f:
np.save(os.path.join(outputdir, "seq_clust_thresh_best_f1.npy"), np.array(thresh))
torch.save(self.state_dict(), os.path.join(outputdir, "seq_clust_model_best_f1.pth"))
highest_f = f_scores[-1]
np.save(os.path.join(outputdir, "seq_clust_thresh.npy"), np.array(thresh))
np.save(os.path.join(outputdir, "train_files.npy"), np.array(files))
np.save(os.path.join(outputdir, "val_files.npy"), np.array(eval_files))
torch.save(self.state_dict(), os.path.join(outputdir, "seq_clust_model.pth"))
return loss_vals, val_losses, f_scores
def evaluate(self, files, device, dir=None, model=None):
if model is not None:
self.load_state_dict(torch.load(os.path.join(model, "seq_clust_model.pth"), map_location=device))
if dir is not None:
files = [f for f in glob.glob(os.path.join(dir, "**/*.osu"), recursive=True)]
ground = []
loss_fn1 = nn.CrossEntropyLoss()
loss_fn2 = nn.BCEWithLogitsLoss()
running_loss = 0
dataset_len = 0
with torch.no_grad():
self.eval()
predictions = []
combo_pred = []
ground = []
combo_ground = []
for i, file in tqdm(enumerate(files)):
try:
dataset = TypeDataset(file)
loader = DataLoader(dataset, shuffle=False, batch_size=BATCH_SIZE)
dataset_len += len(loader)
for i, (batch_X, batch_Y, batch_Z) in enumerate(loader):
out1, out2, _, _ = self(batch_X.to(device))
loss1 = loss_fn1(out1.view(-1, 3), batch_Y.to(device))
loss2 = loss_fn2(out2.view(-1), batch_Z.to(device))
loss = loss1 + loss2
running_loss += loss.item()
predictions.extend(torch.argmax(out1.cpu(), dim=1))
ground.extend(batch_Y.numpy())
combo_pred.extend(out2.cpu())
combo_ground.extend(batch_Z.cpu())
except FileNotFoundError as e:
print(str(e))
files.remove(file)
predictions = np.array(predictions)
ground = np.array(ground)
combo_pred = np.array(combo_pred)
combo_ground = np.array(combo_ground)
print(combo_pred)
pr, re, thresh = precision_recall_curve(combo_ground, combo_pred)
fscore = (2*pr*re)/(pr+re)
ix = np.argmax(fscore)
print("Best:", thresh[ix], "f1score:", fscore[ix])
print(running_loss/dataset_len)
sequence_f1 = f1_score(ground, predictions, average='micro')
combo_threshed = np.zeros(len(combo_pred))
for i, pred in enumerate(combo_pred):
if pred >= thresh[ix]:
combo_threshed[i] = 1
print(combo_threshed)
combo_f1 = f1_score(combo_ground, combo_threshed)
print("ppl:", torch.exp(torch.tensor(running_loss/dataset_len)))
print("seqf1:", sequence_f1)
print("combof1:", combo_f1)
print((sequence_f1 + combo_f1) / 2)
return running_loss/dataset_len, ((sequence_f1 + combo_f1) / 2), thresh[ix], torch.exp(torch.tensor(running_loss/dataset_len))
def infer(self, onsets, target_diff, sections, global_tempo, local_tempo, device, model="..\\models\\default"):
self.load_state_dict(torch.load(os.path.join(model, "seq_clust_model.pth"), map_location=device))
thresh = np.load(os.path.join(model, "seq_clust_thresh.npy"))
predictions = []
combo_preds = []
with torch.no_grad():
self.eval()
h_0 = None
c_0 = None
prev_time = 0
out = 0
curr_tempo = -1
tempo = (1 / local_tempo[0][1]) * 60 * 1000
past_var_feat = deque([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, convert_time(onsets[0], (1 / local_tempo[0][1]) * 60 * 1000)]], maxlen=3)
const_t = np.array(global_tempo)
difficulty = target_diff
for x in tqdm(range(onsets[:-1].shape[0])):
for (t, flag, _, _) in np.flip(sections):
if t < x:
if flag == -1 and target_diff != 0:
difficulty = target_diff - 1
elif flag == 1 and target_diff != 5:
difficulty = target_diff + 1
else:
difficulty = target_diff
const_feat = np.append(const_t, np.eye(6)[difficulty])
if curr_tempo + 1 < local_tempo.shape[0]:
if onsets[x] >= local_tempo[curr_tempo][0]:
curr_tempo += 1
tempo = (1 / local_tempo[curr_tempo][1]) * 60 * 1000
if out == 1:
typ = np.eye(3)[out]
out = 0
predictions.append(2)
combo_preds.append(0)
prev_time = onsets[x] - prev_time
next_time = onsets[x + 1] - onsets[x]
past_var_feat.append(np.append(typ, [0, convert_time(prev_time, tempo), convert_time(next_time, tempo)]))
continue
if out == 2:
typ = np.eye(3)[out]
out = 0
predictions.append(5)
combo_preds.append(0)
prev_time = onsets[x] - prev_time
next_time = onsets[x + 1] - onsets[x]
past_var_feat.append(np.append(typ, [0, convert_time(prev_time, tempo), convert_time(next_time, tempo)]))
continue
input = []
features = list(past_var_feat)
for i in features:
frame = np.append(const_feat, i)
input.append(frame)
input = torch.from_numpy(np.array(input)).float()
out, combo, h_0, c_0 = self(input.view(-1, 3, 13).to(device), h_0, c_0)
out = torch.argmax(out.view(3), dim=0).cpu()
if convert_time(onsets[x + 1] - onsets[x], tempo) > 2 and out == 1:
out == 0
combo = combo.cpu().item()
if combo > thresh:
combo = 1
else:
combo = 0
combo_preds.append(combo)
typ = np.eye(3)[out]
if out == 2:
predictions.append(4)
else:
predictions.append(out)
prev_time = onsets[x] - prev_time
next_time = onsets[x + 1] - onsets[x]
past_var_feat.append(np.append(typ, [combo, convert_time(prev_time, tempo), convert_time(next_time, tempo)]))
if out == 1:
combo_preds.append(0)
elif out == 2:
combo_preds.append(0)
else:
input = []
features = list(past_var_feat)
for i in features:
frame = np.append(const_feat, i)
input.append(frame)
input = torch.from_numpy(np.array(input)).float()
out, combo, h_0, c_0 = self(input.view(-1, 3, 13).to(device), h_0, c_0)
out = torch.argmax(out.view(3), dim=0).cpu()
if out == 1 or out == 2:
out = 0
combo = combo.cpu().item()
if combo > thresh:
combo = 1
else:
combo = 0
combo_preds.append(combo)
predictions.append(out)
return np.array(predictions), np.array(combo_preds)
def prob_func(combo_len):
return -0.3038 + 3.3241 / combo_len
def cluster_onsets(onsets, tempo):
random.seed(onsets[0])
n_combo = np.zeros_like(onsets)
n_combo[0] = 1
combo_len = 1
prev_onset = onsets[0]
local_avg = 0
curr_tempo = -1
for i, onset in enumerate(onsets[1:-1]):
if curr_tempo + 1 < tempo.shape[0]:
if onset >= tempo[curr_tempo + 1][0]:
curr_tempo += 1
dist = convert_time(onset - prev_onset, 1 / tempo[curr_tempo][1] * 60 * 1000)
if n_combo[i] == 1:
local_avg = dist
else:
local_avg += dist
local_avg /= 2
if dist > (local_avg + 0.1) or dist > 4.95:
n_combo[i + 1] = 1
combo_len = 0
elif round(combo_len / 2) >= 4:
if random.random() > prob_func(combo_len):
n_combo[i + 1] = 1
combo_len = 0
combo_len += 1
prev_onset = onset
return n_combo
| StarcoderdataPython |
1642530 | <reponame>amcclead7336/Enterprise_Data_Science_Final
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator 2.3.33.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RunDefinition(Model):
"""A class to manage Run Definition. Run Definition encompass all the
properties including Run Configuration required to create a Run.
:param configuration: Fully specified configuration information for the
run. Even when that information
is contained in configuration files within the project folder, the client
collapses
it all and inlines it into the run definition when submitting a run.
:type configuration: ~_restclient.models.RunConfiguration
:param snapshot_id: Snapshots are user project folders that have been
uploaded to the cloud for subsequent
execution. This field is required when executing against cloud-based
compute targets
unless the run submission was against the API endpoint that takes a zipped
project folder
inline with the request.
:type snapshot_id: str
:param snapshots: The code snapshots that have been uploaded to the cloud
for subsequent execution.
At least one snapshot is required when executing against cloud-based
compute targets
unless the run submission was against the API endpoint that takes a zipped
project folder
inline with the request.
:type snapshots: list[~_restclient.models.Snapshot]
:param parent_run_id: Specifies that the run history entry for this
execution should be scoped within
an existing run as a child. Defaults to null, meaning the run has no
parent.
This is intended for first-party service integration, not third-party API
users.
:type parent_run_id: str
:param run_type: Specifies the runsource property for this run. The
default value is "experiment" if not specified.
:type run_type: str
:param description: Description provided by the user for the run.
:type description: str
:param properties: Any Properties users like to add to the run. This is an
immutable property.
:type properties: dict[str, str]
:param tags: Tags may contain free form text provided by users.
:type tags: dict[str, str]
"""
_attribute_map = {
'configuration': {'key': 'configuration', 'type': 'RunConfiguration'},
'snapshot_id': {'key': 'snapshotId', 'type': 'str'},
'snapshots': {'key': 'snapshots', 'type': '[Snapshot]'},
'parent_run_id': {'key': 'parentRunId', 'type': 'str'},
'run_type': {'key': 'runType', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, configuration=None, snapshot_id=None, snapshots=None, parent_run_id=None, run_type=None, description=None, properties=None, tags=None):
super(RunDefinition, self).__init__()
self.configuration = configuration
self.snapshot_id = snapshot_id
self.snapshots = snapshots
self.parent_run_id = parent_run_id
self.run_type = run_type
self.description = description
self.properties = properties
self.tags = tags
| StarcoderdataPython |
28306 | #
# Copyright © 2021 United States Government as represented by the Administrator
# of the National Aeronautics and Space Administration. No copyright is claimed
# in the United States under Title 17, U.S. Code. All Other Rights Reserved.
#
# SPDX-License-Identifier: NASA-1.3
#
"""Generate a grid of pointings on the sky."""
import astropy.units as u
from astropy.table import QTable
import numpy as np
from ligo.skymap.tool import ArgumentParser, FileType
from .. import skygrid
def parser():
p = ArgumentParser(prog='dorado-scheduling-skygrid')
p.add_argument('--area', default='50 deg2', type=u.Quantity,
help='Average area per tile')
p.add_argument('--method', default='healpix', help='Tiling algorithm',
choices=[key.replace('_', '-') for key in skygrid.__all__])
p.add_argument('-o', '--output', metavar='OUTPUT.ecsv', default='-',
type=FileType('w'), help='Output filename')
return p
def main(args=None):
args = parser().parse_args(args)
method = getattr(skygrid, args.method.replace('-', '_'))
coords = method(args.area)
table = QTable({'field_id': np.arange(len(coords)), 'center': coords})
table.write(args.output, format='ascii.ecsv')
if __name__ == '__main__':
main()
| StarcoderdataPython |
1701076 | <reponame>ardila/python-docs-samples
#!/usr/bin/env python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An Apache Beam streaming pipeline example.
It reads JSON encoded messages from Pub/Sub, transforms the message data and
writes the results to BigQuery.
"""
import argparse
import json
import logging
import time
# Unused dependency used to replicate bug.
import matplotlib
import numpy as np
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
import apache_beam.transforms.window as window
def useless_numpy_function(x):
return str(np.array(x))
def run(args, output_text):
"""Build and run the pipeline."""
options = PipelineOptions(args, save_main_session=True)
with beam.Pipeline(options=options) as pipeline:
# Read the messages from PubSub and process them.
_ = (
pipeline
| "Create tiny collection" >> beam.Create(["a", "b", "c"])
| "Useless Numpy Function" >> beam.Map(useless_numpy_function)
| "Write output" >> beam.io.Write(beam.io.WriteToText(output_text))
)
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
"--output_text", help="Path to output location (should be in a bucket)"
)
known_args, pipeline_args = parser.parse_known_args()
run(pipeline_args, known_args.output_text)
| StarcoderdataPython |
3229192 | import uuid
from datetime import datetime
from sentry_sdk._types import MYPY
from sentry_sdk.utils import format_timestamp
if MYPY:
from typing import Optional
from typing import Union
from typing import Any
from typing import Dict
from sentry_sdk._types import SessionStatus
def _minute_trunc(ts):
# type: (datetime) -> datetime
return ts.replace(second=0, microsecond=0)
def _make_uuid(
val, # type: Union[str, uuid.UUID]
):
# type: (...) -> uuid.UUID
if isinstance(val, uuid.UUID):
return val
return uuid.UUID(val)
class Session(object):
def __init__(
self,
sid=None, # type: Optional[Union[str, uuid.UUID]]
did=None, # type: Optional[str]
timestamp=None, # type: Optional[datetime]
started=None, # type: Optional[datetime]
duration=None, # type: Optional[float]
status=None, # type: Optional[SessionStatus]
release=None, # type: Optional[str]
environment=None, # type: Optional[str]
user_agent=None, # type: Optional[str]
ip_address=None, # type: Optional[str]
errors=None, # type: Optional[int]
user=None, # type: Optional[Any]
):
# type: (...) -> None
if sid is None:
sid = uuid.uuid4()
if started is None:
started = datetime.utcnow()
if status is None:
status = "ok"
self.status = status
self.did = None # type: Optional[str]
self.started = started
self.release = None # type: Optional[str]
self.environment = None # type: Optional[str]
self.duration = None # type: Optional[float]
self.user_agent = None # type: Optional[str]
self.ip_address = None # type: Optional[str]
self.errors = 0
self.update(
sid=sid,
did=did,
timestamp=timestamp,
duration=duration,
release=release,
environment=environment,
user_agent=user_agent,
ip_address=ip_address,
errors=errors,
user=user,
)
@property
def truncated_started(self):
# type: (...) -> datetime
return _minute_trunc(self.started)
def update(
self,
sid=None, # type: Optional[Union[str, uuid.UUID]]
did=None, # type: Optional[str]
timestamp=None, # type: Optional[datetime]
started=None, # type: Optional[datetime]
duration=None, # type: Optional[float]
status=None, # type: Optional[SessionStatus]
release=None, # type: Optional[str]
environment=None, # type: Optional[str]
user_agent=None, # type: Optional[str]
ip_address=None, # type: Optional[str]
errors=None, # type: Optional[int]
user=None, # type: Optional[Any]
):
# type: (...) -> None
# If a user is supplied we pull some data form it
if user:
if ip_address is None:
ip_address = user.get("ip_address")
if did is None:
did = user.get("id") or user.get("email") or user.get("username")
if sid is not None:
self.sid = _make_uuid(sid)
if did is not None:
self.did = str(did)
if timestamp is None:
timestamp = datetime.utcnow()
self.timestamp = timestamp
if started is not None:
self.started = started
if duration is not None:
self.duration = duration
if release is not None:
self.release = release
if environment is not None:
self.environment = environment
if ip_address is not None:
self.ip_address = ip_address
if user_agent is not None:
self.user_agent = user_agent
if errors is not None:
self.errors = errors
if status is not None:
self.status = status
def close(
self, status=None # type: Optional[SessionStatus]
):
# type: (...) -> Any
if status is None and self.status == "ok":
status = "exited"
if status is not None:
self.update(status=status)
def get_json_attrs(
self, with_user_info=True # type: Optional[bool]
):
# type: (...) -> Any
attrs = {}
if self.release is not None:
attrs["release"] = self.release
if self.environment is not None:
attrs["environment"] = self.environment
if with_user_info:
if self.ip_address is not None:
attrs["ip_address"] = self.ip_address
if self.user_agent is not None:
attrs["user_agent"] = self.user_agent
return attrs
def to_json(self):
# type: (...) -> Any
rv = {
"sid": str(self.sid),
"init": True,
"started": format_timestamp(self.started),
"timestamp": format_timestamp(self.timestamp),
"status": self.status,
} # type: Dict[str, Any]
if self.errors:
rv["errors"] = self.errors
if self.did is not None:
rv["did"] = self.did
if self.duration is not None:
rv["duration"] = self.duration
attrs = self.get_json_attrs()
if attrs:
rv["attrs"] = attrs
return rv
| StarcoderdataPython |
3322090 | <filename>plots/midterm/activity.py<gh_stars>1-10
import time
import copy
import os
from multiprocessing import Pool
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
from matplotlib.animation import FuncAnimation
import matplotlib.animation as animation
import flowrect
from flowrect.simulations.util import calculate_age, calculate_mt, eta_SRM
from flowrect.simulations import particle_population
from flowrect.simulations import flow_rectification
from flowrect.simulations import quasi_renewal
save = False
save_path = ""
save_name = "activity.pdf"
def moving_average(x, w):
return np.convolve(x, np.ones(w), "valid") / w
dt = 1e-2
N = 25000
I_ext = 2.5
# Take similar as in article
time_end = 40
params = dict(
time_end=time_end,
dt=dt,
Lambda=[1.0, 5.5],
Gamma=[-4.0, -1.0],
# Lambda=np.array([28.0, 8.0, 1.0]),
# Gamma=np.array([-3.5, 3.0, -1.0]),
c=1,
lambda_kappa=2,
I_ext=I_ext,
I_ext_time=20,
interaction=0,
)
print(f"QR approximation")
QR_params = copy.copy(params)
QR_params["dt"] = 1e-2
t = time.time()
ts_QR, A_QR, cutoff = quasi_renewal(**QR_params)
print(f"{time.time() - t:.2f}s")
print(f"Particle simulation")
t = time.time()
ts, M, spikes, A, X = particle_population(**params, N=N, Gamma_ext=True)
m_t = calculate_mt(M, spikes)
A_av = moving_average(A, 50)
# m_ts = np.zeros(m_t.T.shape)
# w = 50
# m_ts[: -w + 1, 0] = moving_average(m_t.T[:, 0], w)
# m_ts[: -w + 1, 1] = moving_average(m_t.T[:, 1], w)
# m_ts[-w + 1 :, :] = m_ts[-w, :]
print(f"{time.time() - t:.2f}")
print(f"Flow rectification approximation")
t = time.time()
ts, a_grid, rho_t, m_t_exact, x_t, en_cons, A_t = flow_rectification(a_cutoff=10, **params)
print(f"{time.time() - t:.2f}s")
I_ext_vec = np.concatenate((np.zeros(int(len(ts) / 2)), I_ext * np.ones(int(len(ts) / 2))))
from_t = int(5 / dt)
fig = plt.figure(figsize=(8, 8))
gs = gridspec.GridSpec(2, 1, height_ratios=[5, 1])
ax1 = plt.subplot(gs[0])
# fig.suptitle(r"Activity response to a step input ($\Delta t=10^{-2}$)")
(A_1,) = ax1.plot(ts[from_t:], A[from_t:], "--k", linewidth=0.5, label=f"Particle ({N=})")
(A_2,) = ax1.plot(ts[from_t : len(A_av)], A_av[from_t:], "--r", label="P. rolling av.")
(A_3,) = ax1.plot(ts[from_t:], A_t[from_t:], "-.g", linewidth=1.5, label="PDE")
(A_4,) = ax1.plot(ts_QR[from_t:], A_QR[from_t:], "-b", linewidth=1.5, label="QR")
ax1.set_ylim(0, 1.5)
ax1.set_ylabel(r"$A(t)$ (Hz)")
ax1.legend(handles=[A_1, A_2, A_3, A_4])
ax2 = plt.subplot(gs[1], sharex=ax1)
ax2.plot(ts[from_t:], I_ext_vec[from_t:], "-k")
ax2.set_xlabel(r"$t$ (s)")
ax2.set_xlim(5, time_end)
ax2.set_ylabel(r"$I_0$ (A)")
if save:
fig.savefig(os.path.join(save_path, save_name), transparent=True)
plt.show() | StarcoderdataPython |
3212888 | # file: insertSort_p3.py
# Example of InsesrSort program
# that is not inputted by user
def insertSort(array):
length = len(array)
i = 0
while(i < length - 1):
j = i + 1
tmp = array[j]
while( (j > 0) & (tmp > array[j - 1]) ):
array[j] = array[j - 1]
j = j - 1
array[j] = tmp
i = i + 1
return(array)
userInput = [ 6, 5, 3, 2, 1, 9, 23, 11 ]
print("Array: ",userInput)
result = insertSort(userInput)
print("Array after sorting: ",result)
| StarcoderdataPython |
5661 | <reponame>scwolof/doepy<gh_stars>1-10
"""
MIT License
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
from scipy.integrate import odeint
from ..continuous_time import MSFB2014
"""
<NAME>, <NAME>, <NAME> and <NAME> (2014)
"Active fault diagnosis for nonlinear systems with probabilistic uncertainties"
IFAC Proceedings (2014): 7079-7084
"""
class Model (MSFB2014.Model):
def __init__ (self, name):
super().__init__(name)
def __call__ (self, x, u, p):
f = lambda x,t: self._ode_func(x,u,p)
t = np.linspace(0, self.dt, 51)
X = odeint(f, x, t)
return X[-1]
class M1 (Model):
"""
Nominal scenario (no fault)
"""
def __init__ (self):
super().__init__('M1')
self._ode_func = MSFB2014.M1()
self.p0 = self._ode_func.p0
class M2 (Model):
"""
Multiplicative actuator fault in inlet pump
"""
def __init__ (self):
super().__init__('M2')
self._ode_func = MSFB2014.M2()
self.p0 = self._ode_func.p0
class M3 (Model):
"""
Circular leak in tank
"""
def __init__ (self):
super().__init__('M3')
self._ode_func = MSFB2014.M3()
self.p0 = self._ode_func.p0
class DataGen (M2):
def __init__ (self):
super().__init__()
self.true_param = np.array([ 0.97, 0.82, 0.96, 0.67 ])
def __call__ (self, x, u):
return super().__call__(x, u, self.true_param)
def get ():
return DataGen(), [M1(), M2(), M3()] | StarcoderdataPython |
179136 | n = int(input('Digite um número para ver sua tabuada: '))
print('____________ \n'
'{} x 1 = {} \n'
'{} x 2 = {} \n'
'{} x 3 = {} \n'
'{} x 4 = {} \n'
'{} x 5 = {} \n'
'{} x 6 = {} \n'
'{} x 7 = {} \n'
'{} x 8 = {} \n'
'{} x 9 = {} \n'
'{} x 10 = {} \n'
'____________'.format(n, n, n, 2*n, n, 3*n, n, 4*n, n, 5*n, n, 6*n, n, 7*n, n, 8*n, n, 9*n, n, 10*n)) | StarcoderdataPython |
150932 | <reponame>kanesoban/pulmonary_fibrosys<gh_stars>0
import numpy as np
import tensorflow as tf
def laplace_log_likelihood(y_true, y_pred):
uncertainty_clipped = tf.maximum(y_pred[:, 1:2] * 1000.0, 70)
prediction = y_pred[:, :1]
delta = tf.minimum(tf.abs(y_true - prediction), 1000.0)
metric = -np.sqrt(2.0) * delta / uncertainty_clipped - tf.math.log(np.sqrt(2.0) * uncertainty_clipped)
return tf.reduce_mean(metric)
class LaplaceLogLikelihood(tf.keras.metrics.Metric):
def __init__(self, name='laplace_log_likkelihood', **kwargs):
super(LaplaceLogLikelihood, self).__init__(name=name, **kwargs)
self.y_true = []
self.y_pred = []
def reset_states(self):
self.y_true = []
self.y_pred = []
def update_state(self, y_true, y_pred, sample_weight=None):
self.y_true.append(y_true)
self.y_pred.append(y_pred)
def result(self):
y_true = tf.concat(self.y_true, axis=1)
y_pred = tf.concat(self.y_pred, axis=1)
uncertainty_clipped = tf.cast(tf.constant(100), tf.float32)
delta = tf.minimum(tf.abs(y_true - y_pred), 1000.0)
metric = -np.sqrt(2.0) * delta / uncertainty_clipped - np.log(np.sqrt(2.0) * uncertainty_clipped)
return tf.reduce_mean(metric)
| StarcoderdataPython |
4833413 | from .cky import CKY
from .deptree import DepTree
from .linearchain import LinearChain
from .semimarkov import SemiMarkov
from .semirings import LogSemiring, MaxSemiring, StdSemiring, SampledSemiring
import torch
from hypothesis import given, settings
from hypothesis.strategies import integers, data, sampled_from
smint = integers(min_value=2, max_value=4)
tint = integers(min_value=1, max_value=2)
lint = integers(min_value=2, max_value=10)
@given(smint, smint, smint)
def test_simple(batch, N, C):
vals = torch.ones(batch, N, C, C)
semiring = StdSemiring
alpha = LinearChain(semiring).sum(vals)
assert (alpha == pow(C, N + 1)).all()
LinearChain(SampledSemiring).sum(vals)
@given(data())
@settings(max_examples=50, deadline=None)
def test_generic(data):
model = data.draw(sampled_from([LinearChain, SemiMarkov, DepTree, CKY]))
semiring = data.draw(sampled_from([LogSemiring, MaxSemiring]))
struct = model(semiring)
vals, _ = model._rand()
alpha = struct.sum(vals)
count = struct.enumerate(vals)
print(alpha, count)
assert torch.isclose(count[0], alpha[0])
vals, _ = model._rand()
struct = model(MaxSemiring)
score = struct.sum(vals)
marginals = struct.marginals(vals)
assert torch.isclose(score, struct.score(vals, marginals)).all()
@given(data(), integers(min_value=1, max_value=10))
@settings(max_examples=50, deadline=None)
def test_generic_lengths(data, seed):
model = data.draw(sampled_from([LinearChain, SemiMarkov, DepTree, CKY]))
struct = model()
torch.manual_seed(seed)
vals, (batch, N) = struct._rand()
lengths = torch.tensor(
[data.draw(integers(min_value=2, max_value=N)) for b in range(batch - 1)] + [N]
)
m = model(MaxSemiring).marginals(vals, lengths=lengths)
maxes = struct.score(vals, m)
part = model().sum(vals, lengths=lengths)
assert (maxes <= part).all()
m_part = model(MaxSemiring).sum(vals, lengths=lengths)
assert (torch.isclose(maxes, m_part)).all(), maxes - m_part
# m2 = deptree(vals, lengths=lengths)
# assert (m2 < part).all()
seqs, extra = struct.from_parts(m)
# assert (seqs.shape == (batch, N))
# assert seqs.max().item() <= N
full = struct.to_parts(seqs, extra, lengths=lengths)
if isinstance(full, tuple):
for i in range(len(full)):
if i == 1:
p = m[i].sum(1).sum(1)
else:
p = m[i]
assert (full[i] == p.type_as(full[i])).all(), "%s %s %s" % (
i,
full[i].nonzero(),
p.nonzero(),
)
else:
assert (full == m.type_as(full)).all(), "%s %s %s" % (
full.shape,
m.shape,
(full - m.type_as(full)).nonzero(),
)
@given(data(), integers(min_value=1, max_value=10))
def test_params(data, seed):
model = data.draw(sampled_from([LinearChain, SemiMarkov, DepTree, CKY]))
struct = model()
torch.manual_seed(seed)
vals, (batch, N) = struct._rand()
if isinstance(vals, tuple):
vals = (v.requires_grad_(True) for v in vals)
else:
vals.requires_grad_(True)
# torch.autograd.set_detect_anomaly(True)
semiring = StdSemiring
alpha = model(semiring).sum(vals)
alpha.sum().backward()
def test_hmm():
C, V, batch, N = 5, 20, 2, 5
transition = torch.rand(C, C)
emission = torch.rand(V, C)
init = torch.rand(C)
observations = torch.randint(0, V, (batch, N))
out = LinearChain.hmm(transition, emission, init, observations)
LinearChain().sum(out)
| StarcoderdataPython |
1605440 | #coding: utf-8
import os
__all__ = [
"UTILS_DIR", "MODULE_DIR", "REPO_DIR", "DATA_DIR", "SAMPLE_LIST_PATH",
]
UTILS_DIR = os.path.dirname(os.path.abspath(__file__)) #: path/to/TeiLab-BasicLaboratoryWork-in-LifeScienceExperiments/teilab/utils
MODULE_DIR = os.path.dirname(UTILS_DIR) #: path/to/TeiLab-BasicLaboratoryWork-in-LifeScienceExperiments/teilab
REPO_DIR = os.path.dirname(MODULE_DIR) #: path/to/TeiLab-BasicLaboratoryWork-in-LifeScienceExperiments
DATA_DIR = os.path.join(MODULE_DIR, "data") #: path/to/TeiLab-BasicLaboratoryWork-in-LifeScienceExperiments/teilab/data
SAMPLE_LIST_PATH = os.path.join(DATA_DIR, "sample_list.txt") #: path/to/TeiLab-BasicLaboratoryWork-in-LifeScienceExperiments/teilab/data/sample_list.txt | StarcoderdataPython |
78515 | <gh_stars>0
# This file is part of the Extra-P software (http://www.scalasca.org/software/extra-p)
#
# Copyright (c) 2020, Technical University of Darmstadt, Germany
#
# This software may be modified and distributed under the terms of a BSD-style license.
# See the LICENSE file in the base directory for details.
import argparse
import logging
import sys
import threading
import traceback
import warnings
from PySide2.QtCore import Qt
from PySide2.QtGui import QPalette, QColor
from PySide2.QtWidgets import QApplication, QMessageBox, QToolTip
from matplotlib import font_manager
import extrap
from extrap.fileio.cube_file_reader2 import read_cube_file
from extrap.fileio.experiment_io import read_experiment
from extrap.fileio.extrap3_experiment_reader import read_extrap3_experiment
from extrap.fileio.json_file_reader import read_json_file
from extrap.fileio.talpas_file_reader import read_talpas_file
from extrap.fileio.text_file_reader import read_text_file
from extrap.gui.MainWidget import MainWidget
from extrap.util.exceptions import RecoverableError, CancelProcessError
TRACEBACK = logging.DEBUG - 1
logging.addLevelName(TRACEBACK, 'TRACEBACK')
def main(*, args=None, test=False):
_update_mac_app_info()
# preload fonts for matplotlib
font_preloader = _preload_common_fonts()
arguments = parse_arguments(args)
# configure logging
log_level = min(logging.getLevelName(arguments.log_level), logging.INFO)
if arguments.log_file:
logging.basicConfig(format="%(levelname)s: %(asctime)s: %(message)s", level=log_level,
filename=arguments.log_file)
else:
logging.basicConfig(format="%(levelname)s: %(asctime)s: %(message)s", level=log_level)
logging.getLogger().handlers[0].setLevel(logging.getLevelName(arguments.log_level))
app = QApplication(sys.argv) if not test else QApplication.instance()
apply_style(app)
window = MainWidget()
_init_warning_system(window, test)
window.show()
try:
load_from_command(arguments, window)
except CancelProcessError:
pass
if not test:
app.exec_()
font_preloader.join()
else:
font_preloader.join()
return window, app
def parse_arguments(args=None):
parser = argparse.ArgumentParser(description=extrap.__description__)
parser.add_argument("--log", action="store", dest="log_level", type=str.upper, default='CRITICAL',
choices=['TRACEBACK', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help="set program's log level (default: CRITICAL)")
parser.add_argument("--logfile", action="store", dest="log_file",
help="set path of log file")
parser.add_argument("--version", action="version", version=extrap.__title__ + " " + extrap.__version__)
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument("--cube", action="store_true", default=False, dest="cube", help="load data from cube files")
group.add_argument("--text", action="store_true", default=False, dest="text", help="load data from text files")
group.add_argument("--talpas", action="store_true", default=False, dest="talpas",
help="load data from talpas data format")
group.add_argument("--json", action="store_true", default=False, dest="json",
help="load data from json or jsonlines file")
group.add_argument("--extra-p-3", action="store_true", default=False, dest="extrap3",
help="load data from Extra-P 3 experiment")
parser.add_argument("path", metavar="FILEPATH", type=str, action="store", nargs='?',
help="specify a file path for Extra-P to work with")
parser.add_argument("--scaling", action="store", dest="scaling_type", default="weak", type=str.lower,
choices=["weak", "strong"],
help="set weak or strong scaling when loading data from cube files [weak (default), strong]")
arguments = parser.parse_args(args)
return arguments
def load_from_command(arguments, window):
if arguments.path:
if arguments.text:
window.import_file(read_text_file, file_name=arguments.path)
elif arguments.json:
window.import_file(read_json_file, file_name=arguments.path)
elif arguments.talpas:
window.import_file(read_talpas_file, file_name=arguments.path)
elif arguments.cube:
window.import_file(lambda x, y: read_cube_file(x, arguments.scaling_type, y), file_name=arguments.path)
elif arguments.extrap3:
window.import_file(read_extrap3_experiment, model=False, file_name=arguments.path)
else:
window.import_file(read_experiment, model=False, file_name=arguments.path)
def _init_warning_system(window, test=False):
open_message_boxes = []
current_warnings = set()
# save old handlers
_old_warnings_handler = warnings.showwarning
_old_exception_handler = sys.excepthook
def activate_box(box):
box.raise_()
box.activateWindow()
def display_messages(event):
for w in open_message_boxes:
w.raise_()
w.activateWindow()
if sys.platform.startswith('darwin'):
window.activate_event_handlers.append(display_messages)
def _warnings_handler(message: Warning, category, filename, lineno, file=None, line=None):
nonlocal current_warnings
message_str = str(message)
if message_str not in current_warnings:
warn_box = QMessageBox(QMessageBox.Warning, 'Warning', message_str, QMessageBox.Ok, window)
warn_box.setModal(False)
warn_box.setAttribute(Qt.WA_DeleteOnClose)
warn_box.destroyed.connect(
lambda x: (current_warnings.remove(message_str), open_message_boxes.remove(warn_box)))
if not test:
warn_box.show()
activate_box(warn_box)
open_message_boxes.append(warn_box)
current_warnings.add(message_str)
_old_warnings_handler(message, category, filename, lineno, file, line)
logging.warning(message_str)
logging.log(TRACEBACK, ''.join(traceback.format_stack()))
QApplication.processEvents()
def _exception_handler(type, value, traceback_):
traceback_text = ''.join(traceback.extract_tb(traceback_).format())
if issubclass(type, CancelProcessError):
logging.log(TRACEBACK, str(value))
logging.log(TRACEBACK, traceback_text)
return
parent, modal = _parent(window)
msg_box = QMessageBox(QMessageBox.Critical, 'Error', str(value), QMessageBox.Ok, parent)
print()
if hasattr(value, 'NAME'):
msg_box.setWindowTitle(getattr(value, 'NAME'))
msg_box.setDetailedText(traceback_text)
open_message_boxes.append(msg_box)
logging.error(str(value))
logging.log(TRACEBACK, traceback_text)
if test:
return _old_exception_handler(type, value, traceback_)
_old_exception_handler(type, value, traceback_)
if issubclass(type, RecoverableError):
msg_box.open()
activate_box(msg_box)
else:
activate_box(msg_box)
msg_box.exec_() # ensures waiting
exit(1)
warnings.showwarning = _warnings_handler
sys.excepthook = _exception_handler
warnings.simplefilter('always', UserWarning)
def apply_style(app):
app.setStyle('Fusion')
palette = QPalette()
palette.setColor(QPalette.Window, QColor(190, 190, 190))
palette.setColor(QPalette.WindowText, Qt.black)
palette.setColor(QPalette.Base, QColor(220, 220, 220))
palette.setColor(QPalette.AlternateBase, QColor(10, 10, 10))
palette.setColor(QPalette.Text, Qt.black)
palette.setColor(QPalette.Button, QColor(220, 220, 220))
palette.setColor(QPalette.ButtonText, Qt.black)
palette.setColor(QPalette.Highlight, QColor(31, 119, 180))
palette.setColor(QPalette.HighlightedText, Qt.white)
palette.setColor(QPalette.ToolTipBase, QColor(230, 230, 230))
palette.setColor(QPalette.ToolTipText, Qt.black)
palette.setColor(QPalette.Disabled, QPalette.Text, QColor(80, 80, 80))
palette.setColor(QPalette.Disabled, QPalette.ButtonText, QColor(80, 80, 80))
palette.setColor(QPalette.Disabled, QPalette.Button, QColor(150, 150, 150))
app.setPalette(palette)
QToolTip.setPalette(palette)
def _preload_common_fonts():
common_fonts = [
font_manager.FontProperties('sans\\-serif:style=normal:variant=normal:weight=normal:stretch=normal:size=10.0'),
'STIXGeneral', 'STIXGeneral:italic', 'STIXGeneral:weight=bold',
'STIXNonUnicode', 'STIXNonUnicode:italic', 'STIXNonUnicode:weight=bold',
'STIXSizeOneSym', 'STIXSizeTwoSym', 'STIXSizeThreeSym', 'STIXSizeFourSym', 'STIXSizeFiveSym',
'cmsy10', 'cmr10', 'cmtt10', 'cmmi10', 'cmb10', 'cmss10', 'cmex10',
'DejaVu Sans', 'DejaVu Sans:italic', 'DejaVu Sans:weight=bold', 'DejaVu Sans Mono', 'DejaVu Sans Display',
font_manager.FontProperties('sans\\-serif:style=normal:variant=normal:weight=normal:stretch=normal:size=12.0'),
font_manager.FontProperties('sans\\-serif:style=normal:variant=normal:weight=normal:stretch=normal:size=6.0')
]
def _thread(fonts):
for f in fonts:
font_manager.findfont(f)
thread = threading.Thread(target=_thread, args=(common_fonts,))
thread.start()
return thread
def _parent(window):
if not sys.platform.startswith('darwin'):
return window, False
modal = QApplication.activeModalWidget()
parent = modal if modal else window
return parent, bool(modal)
def _update_mac_app_info():
if sys.platform.startswith('darwin'):
try:
from Foundation import NSBundle # noqa
bundle = NSBundle.mainBundle()
if bundle:
app_info = bundle.localizedInfoDictionary() or bundle.infoDictionary()
if app_info:
app_info['CFBundleName'] = extrap.__title__
from AppKit import NSWindow
NSWindow.setAllowsAutomaticWindowTabbing_(False)
except ImportError:
pass
if __name__ == "__main__":
main()
| StarcoderdataPython |
1627589 | from seleniumwire.thirdparty.mitmproxy.addons import core
from seleniumwire.thirdparty.mitmproxy.addons import streambodies
from seleniumwire.thirdparty.mitmproxy.addons import upstream_auth
def default_addons():
return [
core.Core(),
streambodies.StreamBodies(),
upstream_auth.UpstreamAuth(),
]
| StarcoderdataPython |
178441 | <reponame>ardihikaru/mlsp<gh_stars>0
# Source: https://github.com/ninpnin/isomap/blob/master/isomap.py
import numpy as np
from scipy import sparse
from scipy.sparse.csgraph import connected_components
from scipy import spatial
from scipy.spatial import distance_matrix
import matplotlib.pyplot as plt
import pandas
import math
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--filename', type=str, default="toydata.csv", help='path of data file')
parser.add_argument('--k', type=int, default=11, help='number of nearest neighbors for the k-means')
parser.add_argument('--out_dim', type=int, default=2, help='dimensionality of the output')
parser.add_argument('--color_column', type=int, default=0, help='which column in the data represents the color')
parser.add_argument('--max_iterations', type=int, default=300,
help='how many iterations will be performed for the gradient descent')
parser.add_argument('--neighborhood_radius', type=float, default=0.15,
help='how many iterations will be performed for the gradient descent')
args = parser.parse_args()
# read data from csv, convert to data matrix
data = pandas.read_csv(args.filename).values
print(data)
# delete label row
z = data[:, args.color_column].astype(np.float)
for i in range(0, args.color_column + 1):
data = np.delete(data, 0, 1)
print("#fdjnksnkjfsd")
print(data)
print(data)
data = data.astype(np.float)
print(data)
print("Z")
print(z)
df = pandas.DataFrame(data)
# calculate pairwise distances
dist_matrix = pandas.DataFrame(distance_matrix(df.values, df.values)).values
# find k closest neighbors for each data point
D0 = np.zeros(dist_matrix.shape)
for index in range(0, dist_matrix.shape[0]):
vector = dist_matrix[:, index]
k_smallest = np.argpartition(vector, args.k)[:args.k + 1]
for k_index in k_smallest:
D0[k_index, index] = dist_matrix[k_index, index]
D0[index, k_index] = dist_matrix[k_index, index]
# find distances in graph of k neighbors (dijkstra)
print("D0")
print(D0)
shortest_paths = sparse.csgraph.shortest_path(D0, 'D', False)
# minimize error in distances in a lower dimensional space
X = gradient_descent(shortest_paths, args.out_dim, args.max_iterations)
rc = recall(dist_matrix, d_matrix(X), args.neighborhood_radius)
pr = precision(dist_matrix, d_matrix(X), args.neighborhood_radius)
print("precision:")
print(pr)
print("recall:")
print(rc)
np.savetxt('isomap.csv', X, delimiter=';', fmt='%1.3f')
plot_data(X, z)
def gradient_descent(D0, dim, iterations):
data_length = D0.shape[0]
# distance matrix in lower dimensional space
D = np.random.rand(data_length, data_length)
# data matrix in lower dimensional space
X = np.random.rand(data_length, dim)
for iteration in range(0, iterations):
# update distance matrix
df = pandas.DataFrame(X)
D = pandas.DataFrame(distance_matrix(df.values, df.values)).values
# calculate gradient
d_d = (D0 - D) / D
np.fill_diagonal(d_d, 0)
d_d_rowsum = d_d @ np.ones((data_length, dim))
gradient = (d_d @ X - d_d_rowsum * X) * 2
# normalize gradient to unit length
magnitude = math.sqrt(np.sum(gradient ** 2))
gradient = gradient # / magnitude
# update X
X = X - (gradient * 0.001)
if iteration % 10 == 0:
print("Iteration: " + str(iteration) + " / " + str(iterations))
print("Precision: " + str(precision(D0, D, 0.15)))
print("Recall: " + str(recall(D0, D, 0.15)))
print("Updated X: ")
print(X)
print("Pairwise distances in reduced dimension")
print(D)
return X
def plot_data(data_matrix, color):
vector1 = data_matrix[:, 0]
vector2 = data_matrix[:, 1]
cm = plt.cm.get_cmap('RdYlBu')
sc = plt.scatter(vector1, vector2, c=color, s=7, cmap=cm)
plt.colorbar(sc)
plt.show()
def d_matrix(X):
df = pandas.DataFrame(X)
D = pandas.DataFrame(distance_matrix(df.values, df.values)).values
return D
def intersect(b1, b2):
return [val for val in b1 if val in b2]
def precision(o, re, radius):
print(o)
original = np.copy(o)
reduction = np.copy(re)
# 1 true, 0 false
original[original == 0] = -1
original[original > radius] = 0
original[original > 0] = 1
original[original == -1] = 1
# 2 true, -2 false
reduction[reduction > radius] = -2
reduction[reduction >= 0] = 2
combined = original + reduction
# if both conditions true 1, otherwise 0
combined[combined < 0] = 0
combined[combined % 2 == 0] = 0
combined[combined > 0] = 1
n = original.shape[0]
pr = 0
for i in range(0, n):
c = np.argwhere(combined[i, :] == 1).tolist()
o = np.argwhere(original[i, :] == 1).tolist()
inter = len(c)
pr += inter / len(o)
return pr / n
def recall(o, re, radius):
original = np.copy(o)
reduction = np.copy(re)
# 1 true, 0 false
original[original == 0] = -1
original[original > radius] = 0
original[original > 0] = 1
original[original == -1] = 1
# 2 true, -2 false
reduction[reduction > radius] = -2
reduction[reduction >= 0] = 2
combined = original + reduction
# if both conditions true 1, otherwise 0
combined[combined < 0] = 0
combined[combined % 2 == 0] = 0
combined[combined > 0] = 1
n = original.shape[0]
rc = 0
for i in range(0, n):
c = np.argwhere(combined[i, :] == 1).tolist()
o = np.argwhere(reduction[i, :] == 2).tolist()
inter = len(c)
rc += inter / len(o)
return rc / n
if __name__ == '__main__':
main() | StarcoderdataPython |
1741296 | <reponame>DO-Ui/grabble-bot
def RemoveFromList(thelist, val):
return [value for value in thelist if value != val]
def GetDic():
try:
dicopen = open("wordlist.txt", "r")
dicraw = dicopen.read()
dicopen.close()
diclist = dicraw.split("\n")
diclist = RemoveFromList(diclist, '')
return diclist
except FileNotFoundError:
print("No Dictionary!")
return
def Word2Vect(word):
l = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
v = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
w = word.lower()
wl = list(w)
for i in range(0, len(wl)):
if wl[i] in l:
ind = l.index(wl[i])
v[ind] += 1
return v
def Vect2Int(vect):
pv = 0
f = 0
for i in range(0, len(vect)):
wip = (vect[i]*(2**pv))
f += wip
pv += 4
return f
def Ints2Dic(dic):
d = {}
for i in range(0, len(dic)):
v = Word2Vect(dic[i])
Int = Vect2Int(v)
if Int in d:
tat = d.get(Int)
tat.append(dic[i])
d[Int] = tat
elif Int not in d:
d[Int] = [dic[i]]
return d
| StarcoderdataPython |
1768768 | <reponame>TingwenH/Project
# AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class Row(Component):
"""A Row component.
Row is one of the core layout components in Bootstrap. Build up your layout
as a series of rows of columns. Row has arguments for controlling the
vertical and horizontal alignment of its children, as well as the spacing
between columns.
Keyword arguments:
- children (a list of or a singular dash component, string or number; optional):
The children of this component.
- id (string; optional):
The ID of this component, used to identify dash components in
callbacks. The ID needs to be unique across all of the components
in an app.
- align (a value equal to: 'start', 'center', 'end', 'stretch', 'baseline'; optional):
Set vertical alignment of columns in this row. Options are
'start', 'center', 'end', 'stretch' and 'baseline'.
- className (string; optional):
**DEPRECATED** Use `class_name` instead. Often used with CSS to
style elements with common properties.
- class_name (string; optional):
Often used with CSS to style elements with common properties.
- justify (a value equal to: 'start', 'center', 'end', 'around', 'between', 'evenly'; optional):
Set horizontal spacing and alignment of columns in this row.
Options are 'start', 'center', 'end', 'around' and 'between'.
- key (string; optional):
A unique identifier for the component, used to improve performance
by React.js while rendering components See
https://reactjs.org/docs/lists-and-keys.html for more info.
- loading_state (dict; optional):
Object that holds the loading state object coming from
dash-renderer.
`loading_state` is a dict with keys:
- component_name (string; optional):
Holds the name of the component that is loading.
- is_loading (boolean; optional):
Determines if the component is loading or not.
- prop_name (string; optional):
Holds which property is loading.
- style (dict; optional):
Defines CSS styles which will override styles previously set."""
@_explicitize_args
def __init__(self, children=None, id=Component.UNDEFINED, style=Component.UNDEFINED, class_name=Component.UNDEFINED, className=Component.UNDEFINED, key=Component.UNDEFINED, align=Component.UNDEFINED, justify=Component.UNDEFINED, loading_state=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'align', 'className', 'class_name', 'justify', 'key', 'loading_state', 'style']
self._type = 'Row'
self._namespace = 'dash_bootstrap_components'
self._valid_wildcard_attributes = []
self.available_properties = ['children', 'id', 'align', 'className', 'class_name', 'justify', 'key', 'loading_state', 'style']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(Row, self).__init__(children=children, **args)
| StarcoderdataPython |