id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
87623 | #!/usr/bin/env python
#
# A lightweight Telegram Bot running on Flask
#
# Copyright 2020 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom Flask application"""
from flask import Flask
from redis import Redis
from rq import Queue
from telegram_bot import BotDispatcher
class BotApp(Flask):
"""Flask application class extended with application-specific attributes"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Application-specific fields and methods
self.bot_dispatcher: BotDispatcher
self.redis: Redis
self.task_queue: Queue
| StarcoderdataPython |
3341792 | <reponame>unparalleled-js/py42
from requests import Session
from py42._internal.initialization import SDKDependencies
from py42._internal.session_factory import AuthHandlerFactory
from py42._internal.session_factory import SessionFactory
from py42._internal.session_factory import SessionModifierFactory
def from_local_account(host_address, username, password):
"""Creates a :class:`~py42.sdk.SDKClient` object for accessing the Code42 REST APIs using the
supplied credentials. Currently, only accounts created within the Code42 console or using the
APIs (including py42) are supported. Username/passwords that are based on Active Directory,
Okta, or other Identity providers cannot be used with this method.
Args:
host_address (str): The domain name of the Code42 instance being authenticated to, e.g.
console.us.code42.com
username (str): The username of the authenticating account.
password (str): The <PASSWORD>.
Returns:
:class:`py42.sdk.SDKClient`
"""
return SDKClient.from_local_account(host_address, username, password)
class SDKClient(object):
def __init__(self, sdk_dependencies):
self._sdk_dependencies = sdk_dependencies
@classmethod
def from_local_account(cls, host_address, username, password):
"""Creates a :class:`~py42.sdk.SDKClient` object for accessing the Code42 REST APIs using
the supplied credentials. Currently, only accounts created within the Code42 console or
using the APIs (including py42) are supported. Username/passwords that are based on Active
Directory, Okta, or other Identity providers cannot be used with this method.
Args:
host_address (str): The domain name of the Code42 instance being authenticated to, e.g.
console.us.code42.com
username (str): The username of the authenticating account.
password (str): The <PASSWORD> the <PASSWORD>.
Returns:
:class:`py42.sdk.SDKClient`
"""
session_impl = Session
session_factory = SessionFactory(
session_impl, SessionModifierFactory(), AuthHandlerFactory()
)
basic_auth_session = session_factory.create_basic_auth_session(
host_address, username, password
)
sdk_dependencies = SDKDependencies(
host_address, session_factory, basic_auth_session
)
return cls(sdk_dependencies)
@property
def serveradmin(self):
"""A collection of methods for getting server information for on-premise environments
and tenant information for cloud environments.
Returns:
:class:`py42.clients.administration.AdministrationClient`
"""
return self._sdk_dependencies.administration_client
@property
def archive(self):
"""A collection of methods for accessing Code42 storage archives. Useful for doing
web-restores or finding a file on an archive.
Returns:
:class:`py42.modules.archive.ArchiveModule`
"""
return self._sdk_dependencies.archive_module
@property
def users(self):
"""A collection of methods for retrieving or updating data about users in the Code42
environment.
Returns:
:class:`py42.clients.users.UserClient`
"""
return self._sdk_dependencies.user_client
@property
def devices(self):
"""A collection of methods for retrieving or updating data about devices in the Code42
environment.
Returns:
:class:`py42.clients.devices.DeviceClient`
"""
return self._sdk_dependencies.device_client
@property
def orgs(self):
"""A collection of methods for retrieving or updating data about organizations in the
Code42 environment.
Returns:
:class:`py42.clients.orgs.OrgClient`
"""
return self._sdk_dependencies.org_client
@property
def legalhold(self):
"""A collection of methods for retrieving and updating legal-hold matters, policies, and
custodians.
Returns:
:class:`py42.clients.legalhold.LegalHoldClient`
"""
return self._sdk_dependencies.legal_hold_client
@property
def usercontext(self):
"""A collection of methods related to getting information about the currently logged in
user, such as the tenant ID.
Returns:
:class:`py42.usercontext.UserContext`
"""
return self._sdk_dependencies.user_context
@property
def securitydata(self):
"""A collection of methods and properties for getting security data such as:
* File events
* Alerts
* Security plan information
Returns:
:class:`py42.modules.securitydata.SecurityModule`
"""
return self._sdk_dependencies.security_module
@property
def detectionlists(self):
"""A collection of properties each containing methods for managing specific detection
lists, such as departing employees.
Returns:
:class:`py42.modules.detectionlists.DetectionListsModule`
"""
return self._sdk_dependencies.detection_lists_module
@property
def alerts(self):
"""A collection of methods related to retrieving and updating alerts rules.
Returns:
:class:`py42.modules.alertrules.AlertRulesModule`
"""
return self._sdk_dependencies.alerts_module
| StarcoderdataPython |
3396210 | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a new model on one or across multiple GPUs.
"""
import collections
import math
import random
import os
import re
import threading
import string
import pickle
import numpy as np
import torch
from fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils, bleu
from fairseq.data import iterators
from fairseq.trainer import Trainer
from fairseq.meters import AverageMeter, StopwatchMeter
def main(args, init_distributed=False):
utils.import_user_module(args)
assert args.max_tokens is not None or args.max_sentences is not None, \
'Must specify batch size either with --max-tokens or --max-sentences'
# Initialize CUDA and distributed training
if torch.cuda.is_available() and not args.cpu:
torch.cuda.set_device(args.device_id)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if init_distributed:
args.distributed_rank = distributed_utils.distributed_init(args)
if distributed_utils.is_master(args):
checkpoint_utils.verify_checkpoint_directory(args.save_dir)
# Print args
print(args)
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(args)
# Load valid dataset (we load training data below, based on the latest checkpoint)
for valid_sub_split in args.valid_subset.split(','):
task.load_dataset(valid_sub_split, combine=False, epoch=0)
# Build model and criterion
model = task.build_model(args)
criterion = task.build_criterion(args)
print(model)
print('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))
print('| num. model params: {} (num. trained: {})'.format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
))
# Build trainer
trainer = Trainer(args, task, model, criterion)
print('| training on {} GPUs'.format(args.distributed_world_size))
print('| max tokens per GPU = {} and max sentences per GPU = {}'.format(
args.max_tokens,
args.max_sentences,
))
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(args, trainer)
if epoch_itr.epoch > 70:
print('no training')
return
if getattr(checkpoint_utils.save_checkpoint, 'best', None):
del checkpoint_utils.save_checkpoint.best
# Train until the learning rate gets too small
max_epoch = args.max_epoch or math.inf
max_update = args.max_update or math.inf
not_best_checkpoint = 0
lr = trainer.get_lr()
train_meter = StopwatchMeter()
train_meter.start()
valid_subsets = args.valid_subset.split(',')
while lr > args.min_lr and epoch_itr.epoch < max_epoch and trainer.get_num_updates() < max_update:
# train for one epoch
train(args, trainer, task, epoch_itr)
if not args.disable_validation and epoch_itr.epoch % args.validate_interval == 0:
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
if args.patience > 0:
prev_best = getattr(checkpoint_utils.save_checkpoint, 'best', valid_losses[0])
def is_better(a, b):
return a > b if args.maximize_best_checkpoint_metric else a < b
if is_better(prev_best, valid_losses[0]):
not_best_checkpoint += 1
print("| Not the best ckpt... not best:", not_best_checkpoint)
if not_best_checkpoint >= args.patience:
print("| Ran out of patience. Early stop...")
break
else:
not_best_checkpoint = 0
else:
valid_losses = [None]
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
# save checkpoint
if epoch_itr.epoch % args.save_interval == 0:
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
reload_dataset = ':' in getattr(args, 'data', '')
# sharded data: get train iterator for next epoch
epoch_itr = trainer.get_train_iterator(epoch_itr.epoch, load_dataset=reload_dataset)
train_meter.stop()
print('| done training in {:.1f} seconds'.format(train_meter.sum))
def train(args, trainer, task, epoch_itr):
"""Train the model for one epoch."""
# Update parameters every N batches
update_freq = args.update_freq[epoch_itr.epoch - 1] \
if epoch_itr.epoch <= len(args.update_freq) else args.update_freq[-1]
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=args.fix_batches_to_gpus,
shuffle=(epoch_itr.epoch >= args.curriculum),
)
itr = iterators.GroupedIterator(itr, update_freq)
progress = progress_bar.build_progress_bar(
args, itr, epoch_itr.epoch, no_progress_bar='simple',
)
extra_meters = collections.defaultdict(lambda: AverageMeter())
valid_subsets = args.valid_subset.split(',')
max_update = args.max_update or math.inf
for i, samples in enumerate(progress, start=epoch_itr.iterations_in_epoch):
log_output = trainer.train_step(samples)
if log_output is None:
continue
# log mid-epoch stats
stats = get_training_stats(trainer)
for k, v in log_output.items():
if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']:
continue # these are already logged above
if 'loss' in k or k == 'accuracy':
extra_meters[k].update(v, log_output['sample_size'])
else:
extra_meters[k].update(v)
stats[k] = extra_meters[k].avg
progress.log(stats, tag='train', step=stats['num_updates'])
# ignore the first mini-batch in words-per-second calculation
num_updates = trainer.get_num_updates()
if (
not args.disable_validation
and args.save_interval_updates > 0
and num_updates % args.save_interval_updates == 0
and num_updates > 0
):
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
if num_updates >= max_update:
break
# log end-of-epoch stats
stats = get_training_stats(trainer)
for k, meter in extra_meters.items():
stats[k] = meter.avg
progress.print(stats, tag='train', step=stats['num_updates'])
# reset training meters
for k in [
'train_loss', 'train_nll_loss', 'wps', 'ups', 'wpb', 'bsz', 'gnorm', 'clip',
]:
meter = trainer.get_meter(k)
if meter is not None:
meter.reset()
def get_training_stats(trainer):
stats = collections.OrderedDict()
stats['loss'] = trainer.get_meter('train_loss')
if trainer.get_meter('train_nll_loss').count > 0:
nll_loss = trainer.get_meter('train_nll_loss')
stats['nll_loss'] = nll_loss
else:
nll_loss = trainer.get_meter('train_loss')
stats['ppl'] = utils.get_perplexity(nll_loss.avg)
stats['wps'] = trainer.get_meter('wps')
stats['ups'] = trainer.get_meter('ups')
stats['wpb'] = trainer.get_meter('wpb')
stats['bsz'] = trainer.get_meter('bsz')
stats['num_updates'] = trainer.get_num_updates()
stats['lr'] = trainer.get_lr()
stats['gnorm'] = trainer.get_meter('gnorm')
stats['clip'] = trainer.get_meter('clip')
stats['oom'] = trainer.get_meter('oom')
if trainer.get_meter('loss_scale') is not None:
stats['loss_scale'] = trainer.get_meter('loss_scale')
stats['wall'] = round(trainer.get_meter('wall').elapsed_time)
stats['train_wall'] = trainer.get_meter('train_wall')
return stats
def validate(args, trainer, task, epoch_itr, subsets):
"""Evaluate the model on the validation set(s) and return the losses."""
if args.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(args.fixed_validation_seed)
valid_losses = []
for subset in subsets:
# Initialize data iterator
itr = task.get_batch_iterator(
dataset=task.dataset(subset),
max_tokens=args.max_tokens_valid,
max_sentences=args.max_sentences_valid,
max_positions=utils.resolve_max_positions(
task.max_positions(),
trainer.get_model().max_positions(),
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
seed=args.seed,
num_shards=args.distributed_world_size,
shard_id=args.distributed_rank,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
progress = progress_bar.build_progress_bar(
args, itr, epoch_itr.epoch,
prefix='valid on \'{}\' subset'.format(subset),
no_progress_bar='simple'
)
# reset validation loss meters
for k in ['valid_loss', 'valid_nll_loss']:
meter = trainer.get_meter(k)
if meter is not None:
meter.reset()
extra_meters = collections.defaultdict(lambda: AverageMeter())
for sample in progress:
log_output = trainer.valid_step(sample)
for k, v in log_output.items():
if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']:
continue
extra_meters[k].update(v)
# log validation stats
stats = get_valid_stats(trainer, args, extra_meters)
for k, meter in extra_meters.items():
stats[k] = meter.avg
progress.print(stats, tag=subset, step=trainer.get_num_updates())
if args.distributed_rank == 0:
ent_file = open(os.path.join(args.save_dir, 'entropy.txt'),"a+")
ent_file.write(str({'loss':stats['loss'].avg, 'nll_loss':stats['nll_loss'].avg, 'entropy': stats['entropy']}) + '\n')
ent_file.close()
valid_losses.append(
stats[args.best_checkpoint_metric].avg
if args.best_checkpoint_metric == 'loss'
else stats[args.best_checkpoint_metric]
)
return valid_losses
def get_valid_stats(trainer, args, extra_meters=None):
stats = collections.OrderedDict()
stats['loss'] = trainer.get_meter('valid_loss')
if trainer.get_meter('valid_nll_loss').count > 0:
nll_loss = trainer.get_meter('valid_nll_loss')
stats['nll_loss'] = nll_loss
else:
nll_loss = stats['loss']
stats['ppl'] = utils.get_perplexity(nll_loss.avg)
stats['num_updates'] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, 'best'):
key = 'best_{0}'.format(args.best_checkpoint_metric)
best_function = max if args.maximize_best_checkpoint_metric else min
current_metric = None
if args.best_checkpoint_metric == 'loss':
current_metric = stats['loss'].avg
elif args.best_checkpoint_metric in extra_meters:
current_metric = extra_meters[args.best_checkpoint_metric].avg
elif args.best_checkpoint_metric in stats:
current_metric = stats[args.best_checkpoint_metric]
else:
raise ValueError("best_checkpoint_metric not found in logs")
stats[key] = best_function(
checkpoint_utils.save_checkpoint.best,
current_metric,
)
return stats
def distributed_main(i, args, start_rank=0):
args.device_id = i
if args.distributed_rank is None: # torch.multiprocessing.spawn
args.distributed_rank = start_rank + i
main(args, init_distributed=True)
def run_generation(ckpt, results, ents):
gen_parser = options.get_generation_parser()
args = options.parse_args_and_arch(gen_parser, input_args = [ data_set,
'--gen-subset', 'valid', '--path', ckpt, '--beam', '10','--max-tokens', '4000', '--sacrebleu',
'--remove-bpe', '--log-format', 'none'])
use_cuda = torch.cuda.is_available() and not args.cpu
# if use_cuda:
# lock.acquire()
# torch.cuda.set_device(device_id)
# lock.release()
utils.import_user_module(args)
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
# Set dictionaries
try:
src_dict = getattr(task, 'source_dictionary', None)
except NotImplementedError:
src_dict = None
tgt_dict = task.target_dictionary
# Load ensemble
print('| loading model(s) from {}'.format(args.path))
models, _model_args = checkpoint_utils.load_model_ensemble(
args.path.split(':'),
arg_overrides=eval(args.model_overrides),
task=task,
)
# Optimize ensemble for generation
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(args.replace_unk)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[model.max_positions() for model in models]
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
# Initialize generator
generator = task.build_generator(args)
# Generate and compute BLEU score
if args.sacrebleu:
scorer = bleu.SacrebleuScorer()
else:
scorer = bleu.Scorer(tgt_dict.pad(), tgt_dict.eos(), tgt_dict.unk())
num_sentences = 0
has_target = True
entropies = []
token_counts = []
with progress_bar.build_progress_bar(args, itr) as t:
for sample in t:
sample = utils.move_to_cuda(sample) if use_cuda else sample
if 'net_input' not in sample:
continue
prefix_tokens = None
if args.prefix_size > 0:
prefix_tokens = sample['target'][:, :args.prefix_size]
hypos = task.inference_step(generator, models, sample, prefix_tokens)
num_generated_tokens = sum(len(h[0]['tokens']) for h in hypos)
if 'avg_ent' in sample:
entropies.append(sample['avg_ent'][0])
token_counts.append(sample['avg_ent'][1])
for i, sample_id in enumerate(sample['id'].tolist()):
has_target = sample['target'] is not None
# Remove padding
src_tokens = utils.strip_pad(sample['net_input']['src_tokens'][i, :], tgt_dict.pad())
target_tokens = None
if has_target:
target_tokens = utils.strip_pad(sample['target'][i, :], tgt_dict.pad()).int().cpu()
# Either retrieve the original sentences or regenerate them from tokens.
if align_dict is not None:
src_str = task.dataset(args.gen_subset).src.get_original_text(sample_id)
target_str = task.dataset(args.gen_subset).tgt.get_original_text(sample_id)
else:
if src_dict is not None:
src_str = src_dict.string(src_tokens, args.remove_bpe)
else:
src_str = ""
if has_target:
target_str = tgt_dict.string(target_tokens, args.remove_bpe, escape_unk=True)
# Process top predictions
for j, hypo in enumerate(hypos[i][:args.nbest]):
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo['tokens'].int().cpu(),
src_str=src_str,
alignment=hypo['alignment'],
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=args.remove_bpe,
)
# Score only the top hypothesis
if has_target and j == 0:
if align_dict is not None or args.remove_bpe is not None:
# Convert back to tokens for evaluation with unk replacement and/or without BPE
target_tokens = tgt_dict.encode_line(target_str, add_if_not_exist=True)
if hasattr(scorer, 'add_string'):
scorer.add_string(target_str, hypo_str)
else:
scorer.add(target_tokens, hypo_tokens)
num_sentences += sample['nsentences']
results[ckpt] = scorer.score()
ents[ckpt] = sum(entropies)/sum(token_counts)
def train_main(alpha, beta, save_path):
parser = options.get_training_parser()
input_args = [data_set,
'--share-decoder-input-output-embed',
'--arch','transformer_iwslt_de_en', '--max-tokens', '4000', '--lr', '5e-4',
'--save-interval', '2', '--max-epoch', '85', '--patience', '5',
'--optimizer', 'adam', '--adam-betas', '(0.9, 0.98)',
'--clip-norm', '0.0', '--weight-decay', '0.0001', '--dropout', '0.3',
'--lr-scheduler', 'inverse_sqrt', '--warmup-updates', '4000',
'--keep-last-epochs', '4', '--criterion', 'jensen_cross_entropy', '--alpha', str(alpha),
'--beta', str(beta), '--use-uniform', '--fp16', '--save-dir', save_path]
args = options.parse_args_and_arch(parser, input_args=input_args)
if args.distributed_init_method is None:
distributed_utils.infer_init_method(args)
if args.distributed_init_method is not None:
# distributed training
if torch.cuda.device_count() > 1 and not args.distributed_no_spawn:
start_rank = args.distributed_rank
args.distributed_rank = None # assign automatically
torch.multiprocessing.spawn(
fn=distributed_main,
args=(args, start_rank),
nprocs=torch.cuda.device_count(),
)
else:
distributed_main(args.device_id, args)
elif args.distributed_world_size > 1:
# fallback for single node with multiple GPUs
assert args.distributed_world_size <= torch.cuda.device_count()
port = random.randint(10000, 20000)
args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)
args.distributed_rank = None # set based on device id
if max(args.update_freq) > 1 and args.ddp_backend != 'no_c10d':
print('| NOTE: you may get better performance with: --ddp-backend=no_c10d')
torch.multiprocessing.spawn(
fn=distributed_main,
args=(args, ),
nprocs=args.distributed_world_size,
)
else:
# single GPU training
main(args)
ckpts = os.listdir(args.save_dir)
try:
ckpts.remove('checkpoint_last.pt')
except ValueError:
print("no checkpoint_last.pt in folder", args.save_dir)
f = open(os.path.join(args.save_dir,"final_entropies.txt"), "a+")
results = {}
entropies = {}
for ckpt in ckpts:
if '.pt' in ckpt:
path = os.path.join(args.save_dir, ckpt)
f.write(path + '\n')
run_generation(path, results, entropies)
f.write('{entropy: '+ str(entropies[path]) + ', bleu: '+ str(results[path]) +'}\n')
f.close()
return results
def objective(alpha, beta):
print("alpha", alpha, 'beta', beta)
save_path = os.path.join(models_dir, str(round(alpha,3)).replace('.', '') + '_' + str(round(beta,3)).replace('.', ''))
if not os.path.exists(save_path):
os.mkdir(save_path)
val_scores = train_main(alpha, beta, save_path)
print("Best BLEU on validation set:", max(val_scores.values()))
return max(val_scores.values())
def optimize():
global ITERATIONS
ITERATIONS = 1
MAX_EVALS = 10
from bayes_opt import BayesianOptimization
# Bounded region of parameter space
pbounds = {'alpha': (0.001, 0.999), 'beta': (0.001, 1.5)}
optimizer = BayesianOptimization(
f=objective,
pbounds=pbounds,
random_state=1,
)
try:
from bayes_opt.util import load_logs
load_logs(optimizer, logs=["logs.json"]);
print("Rerunning from {} trials".format(
len(optimizer.res)))
except:
print("Starting from scratch: new trials.")
from bayes_opt.observer import JSONLogger
from bayes_opt.event import Events
logger = JSONLogger(path="logs.json")
optimizer.subscribe(Events.OPTMIZATION_STEP, logger)
# Results will be saved in ./logs.json
optimizer.maximize(
init_points=20,#max(0, 5 - len(optimizer.res)),
n_iter=MAX_EVALS,
)
print(optimizer.max)
global ITERATIONS
data_set ='data-bin/wmt17_en_zh'
models_dir = 'ckpts/'
if __name__ == '__main__':
optimize() | StarcoderdataPython |
3300133 | # Copyright 2013-2017 The Salish Sea MEOPAR Contributors
# and The University of British Columbia
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FVCOM-Cmd -- FVCOM command processor
"""
import sys
from setuptools import find_packages, setup
from fvcom_cmd import __pkg_metadata__
python_classifiers = [
'Programming Language :: Python :: {0}'.format(py_version)
for py_version in ['2', '2.7', '3', '3.4', '3.5']
]
other_classifiers = [
'Development Status :: ' + __pkg_metadata__.DEV_STATUS,
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: Implementation :: CPython',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Operating System :: Unix',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
]
try:
long_description = open('README.rst', 'rt').read()
except IOError:
long_description = ''
install_requires = [
# see environment-dev.yaml for conda environment dev installation
# see requirements.txt for package versions used during recent development
'arrow',
'attrs',
'cliff',
'python-hglib',
'PyYAML',
]
if sys.version_info[0] == 2:
install_requires.append('pathlib2')
setup(
name=__pkg_metadata__.PROJECT,
version=__pkg_metadata__.VERSION,
description=__pkg_metadata__.DESCRIPTION,
long_description=long_description,
author='<NAME>, <NAME>',
author_email='<EMAIL>, <EMAIL>',
#url='http://nemo-cmd.readthedocs.io/en/latest/', #TODO
license='Apache License, Version 2.0',
classifiers=python_classifiers + other_classifiers,
platforms=['MacOS X', 'Linux'],
install_requires=install_requires,
packages=find_packages(),
entry_points={
# The fvc command:
'console_scripts': ['fvc = fvcom_cmd.main:main'],
# Sub-command plug-ins:
'fvcom.app': [
'combine = fvcom_cmd.combine:Combine',
'deflate = fvcom_cmd.deflate:Deflate',
'gather = fvcom_cmd.gather:Gather',
'prepare = fvcom_cmd.prepare:Prepare',
'run = fvcom_cmd.run:Run',
],
},
)
| StarcoderdataPython |
79988 | # -*- coding: utf-8 -*-
from pathlib import Path
import requests
from packratt.cache import CacheEntry
class UrlCacheEntry(CacheEntry):
def __init__(self, url, sha_hash, filename):
self.url = url
self.sha_hash = sha_hash
self.filename = filename
def download(self, destination: Path) -> bool:
CHUNK_SIZE = 2**20
filename = destination / self.filename
with requests.Session() as session:
with session.get(self.url, stream=True) as response:
with open(filename, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
return True
@property
def type(self):
return "url"
def __eq__(self, other):
return (self.type == other.type and
self.url == other.url and
self.sha_hash == other.sha_hash and
self.filename == other.filename)
def __hash__(self):
return hash(self.url, self.sha_hash, self.filename) | StarcoderdataPython |
1649426 | # -*- coding:utf-8 -*-
# author: hpf
# create time: 2020/10/22 9:38
# file: 111_二叉树的最小深度.py
# IDE: PyCharm
# 题目描述:
# 给定一个二叉树,找出其最小深度。
#
# 最小深度是从根节点到最近叶子节点的最短路径上的节点数量。
#
# 说明: 叶子节点是指没有子节点的节点。
#
# 示例:
#
# 给定二叉树 [3,9,20,null,null,15,7],
#
# 3
# / \
# 9 20
# / \
# 15 7
# 返回它的最小深度 2.
# 解法一: BFS
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution1:
def minDepth(self, root: TreeNode) -> int:
if not root:
return 0
from collections import deque
q = deque()
# root本身就是一层,depth初始化为1
q.append(root)
depth = 1
while(q):
size = len(q)
# 将当前队列中的所有节点向四周扩散
for _ in range(size):
node = q.popleft()
# 判断是否到达终点
if not node.left and not node.right:
return depth
# 将node的相邻节点加入队列
if node.left:
q.append(node.left)
if node.right:
q.append(node.right)
# 这里增加步数
depth += 1
return depth
# 解法二: DFS
# 先看使用 DFS(深度优先搜索)的方法,具体做法如下:
#
# 根节点为空,返回 0;
# 如果根节点不为空,需要判断左右子节点:
# 左右子节点都为空,那么返回 1;
# 左右子节点其中一个为空,那么返回不为空子节点的最小深度;
# 左右子节点均不为空,返回其中较小深度的值。
class Solution:
def minDepth(self, root: TreeNode) -> int:
# 根节点为空
if not root:
return 0
# 根节点不为空,但不存在左右子节点,返回 1
if not root.left and not root.right:
return 1
depth = 1
# 返回不为空的右子节点最小深度
if not root.left:
depth += self.minDepth(root.right)
# 不存在右子节点,返回不为空的左子节点最小深度
elif not root.right:
depth += self.minDepth(root.left)
# 左右子节点均不为空,返回较小深度
else:
left_depth = self.minDepth(root.left)
right_depth = self.minDepth(root.right)
depth += min(left_depth, right_depth)
return depth | StarcoderdataPython |
1610788 | <reponame>StevenZ315/Optimization-Algorithms
"""
heuristic_algorithm module implements a variety of heuristic_algorithms
"""
from ._genetic_algorithm import GeneticAlgorithm
from ._pso import PSO
from ._local_search import HillClimbing, Annealing
__all__ = ['GeneticAlgorithm',
'PSO',
'HillClimbing',
'Annealing']
| StarcoderdataPython |
1740336 | <reponame>raulgranja/Python-Course
def area(l, c):
print(f'A área de um terreno {l} x {c} é de {l * c:.2f} m².')
# main
print('Controle de Terrenos')
print('--------------------')
l = float(input('LARGURA (m): '))
c = float(input('COMPRIMENTO (m): '))
area(l, c)
| StarcoderdataPython |
4805860 | import RPi.GPIO as GPIO
import time
import threading
interruptPin_A = 20;
interruptPin_B = 21;
GPIO.setmode(GPIO.BCM)
GPIO.setup(interruptPin_A, GPIO.IN)
GPIO.setup(interruptPin_B, GPIO.IN)
absoluteSteps = 0;
direction = 0;
global prev_A, prev_B, A, B
A = prev_A = 0;
B = prev_B = 0;
def interruptA(channel):
global A, B, prev_A, prev_B, direction, absoluteSteps;
prev_A = 0 + A
if (channel != 0):
A = 1;
else:
A = 0;
if (A == prev_A):
return;
if (B == 1):
if (A == 1):
direction = 1;
else:
direction = -1;
else:
if (A == 1):
direction = -1;
else:
direction = 1;
absoluteSteps += direction;
def interruptB(channel):
global A;
global B;
global absoluteSteps;
global direction;
prev_B = 0 + B;
if (channel != 0):
B = 1;
else:
B = 0;
if (B == prev_B):
return;
if (A == 1):
if (B == 1):
direction = -1;
else:
direction = 1;
else:
if (B == 1):
direction = 1;
else:
direction = -1;
absoluteSteps += direction;
freq = 0
def opticalEncoderThread():
global absoluteSteps;
global delta;
t = time.time()
while (True):
interruptB(GPIO.input(interruptPin_A))
interruptA(GPIO.input(interruptPin_B))
t2 = time.time()
delta = (t2-t);
t = t2;
def main():
global absoluteSteps;
# GPIO.add_event_detect(interruptPin_A, GPIO.BOTH, callback=interruptA) # add rising edge detection on a channel
#GPIO.add_event_detect(interruptPin_B, GPIO.BOTH, callback=interruptB) # add rising edge detection on a channel
t = threading.Thread(target=opticalEncoderThread)
t.start()
while (True):
t = absoluteSteps/32*360
print(absoluteSteps, delta*1000000)
time.sleep(0.1)
main()
| StarcoderdataPython |
82094 | <filename>images/get_images.py
import os
import urllib, urlparse
import simplejson as json
# query for images
url = 'http://www.panoramio.com/map/get_panoramas.php?order=popularity&\
set=public&from=0&to=20&minx=-77.037564&miny=38.896662&\
maxx=-17.035564&maxy=18.898662&size=medium'
c = urllib.urlopen(url)
# get the urls of individual images from JSON
j = json.loads(c.read())
imurls = []
for im in j['photos']:
imurls.append(im['photo_file_url'])
# download images
for url in imurls:
image = urllib.URLopener()
image.retrieve(url, os.path.basename(urlparse.urlparse(url).path))
print 'downloading:', url
| StarcoderdataPython |
1712677 | <reponame>manliu1225/Facebook_crawler
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
Helper functions for proxies.
.. seealso:: :ref:`proxy-known-issues`
"""
import functools
import sys
from zope.proxy import PyProxyBase
from zope.security._compat import PURE_PYTHON
from zope.security._compat import _BUILTINS
from zope.security.interfaces import ForbiddenAttribute
def _check_name(meth, wrap_result=True):
name = meth.__name__
def _wrapper(self, *args, **kw):
wrapped = super(PyProxyBase, self).__getattribute__('_wrapped')
checker = super(PyProxyBase, self).__getattribute__('_checker')
checker.check(wrapped, name)
res = meth(self, *args, **kw)
if not wrap_result:
return res
return checker.proxy(res)
return functools.update_wrapper(_wrapper, meth)
def _check_name_inplace(meth):
name = meth.__name__
def _wrapper(self, *args, **kw):
wrapped = super(PyProxyBase, self).__getattribute__('_wrapped')
checker = super(PyProxyBase, self).__getattribute__('_checker')
checker.check(wrapped, name)
w_meth = getattr(wrapped, name, None)
if w_meth is not None:
# The proxy object cannot change; we are modifying in place.
self._wrapped = w_meth(*args, **kw)
return self
x_name = '__%s__' % name[3:-2]
return ProxyPy(getattr(wrapped, x_name)(*args, **kw), checker)
return functools.update_wrapper(_wrapper, meth)
def _fmt_address(obj):
# Try to replicate PyString_FromString("%p", obj), which actually uses
# the platform sprintf(buf, "%p", obj), which we cannot access from Python
# directly (and ctypes seems like overkill).
if sys.platform != 'win32':
return '0x%0x' % id(obj)
if sys.maxsize < 2**32: # pragma: no cover
return '0x%08X' % id(obj)
return '0x%016X' % id(obj) # pragma: no cover
class ProxyPy(PyProxyBase):
"""
The pure-Python reference implementation of a security proxy.
This should normally not be created directly, instead use the
:func:`~.ProxyFactory`.
You can choose to use this implementation instead of the C implementation
by default by setting the ``PURE_PYTHON`` environment variable before
:mod:`zope.security` is imported.
"""
__slots__ = ('_wrapped', '_checker')
def __new__(cls, value, checker):
inst = super(ProxyPy, cls).__new__(cls)
inst._wrapped = value
inst._checker = checker
return inst
def __init__(self, value, checker):
if checker is None:
raise ValueError('checker may now be None')
self._wrapped = value
self._checker = checker
# Attribute protocol
def __getattribute__(self, name):
if name in ('_wrapped', '_checker'):
# Only allow _wrapped and _checker to be accessed from inside.
if sys._getframe(1).f_locals.get('self') is not self:
raise AttributeError(name)
wrapped = super(ProxyPy, self).__getattribute__('_wrapped')
if name == '_wrapped':
return wrapped
checker = super(ProxyPy, self).__getattribute__('_checker')
if name == '_checker':
return checker
if name not in ('__cmp__', '__hash__', '__bool__', '__nonzero__',
'__lt__', '__le__', '__eq__', '__ne__', '__ge__',
'__gt__'):
checker.check_getattr(wrapped, name)
if name in ('__reduce__', '__reduce_ex__'):
# The superclass specifically denies access to __reduce__
# and __reduce__ex__, not letting proxies be pickled. But
# for backwards compatibility, we need to be able to
# pickle proxies. See checker:Global for an example.
val = getattr(wrapped, name)
elif name == '__module__':
# The superclass deals with descriptors found in the type
# of this object just like the Python language spec states, letting
# them have precedence over things found in the instance. This
# normally makes us a better proxy implementation. However, the
# C version of this code in _proxy doesn't take that same care and instead
# uses the generic object attribute access methods directly on
# the wrapped object. This is a behaviour difference; so far, it's
# only been noticed for the __module__ attribute, which checker:Global
# wants to override but couldn't because this object's type's __module__ would
# get in the way. That broke pickling, and checker:Global can't return
# anything more sophisticated than a str (a tuple) because it gets proxied
# and breaks pickling again. Our solution is to match the C version for this
# one attribute.
val = getattr(wrapped, name)
else:
val = super(ProxyPy, self).__getattribute__(name)
return checker.proxy(val)
def __getattr__(self, name):
# We only get here if __getattribute__ has already raised an
# AttributeError (we have to implement this because the super
# class does). We expect that we will also raise that same
# error, one way or another---either it will be forbidden by
# the checker or it won't exist. However, if the underlying
# object is playing games in *its*
# __getattribute__/__getattr__, and we call getattr() on it,
# (maybe there are threads involved), we might actually
# succeed this time.
# The C implementation *does not* do two checks; it only does
# one check, and raises either the ForbiddenAttribute or the
# underlying AttributeError, *without* invoking any defined
# __getattribute__/__getattr__ more than once. So we
# explicitly do the same. The consequence is that we lose a
# good stack trace if the object implemented its own methods
# but we're consistent. We would provide a better error
# message or even subclass of AttributeError, but that's liable to break
# (doc)tests.
wrapped = super(ProxyPy, self).__getattribute__('_wrapped')
checker = super(ProxyPy, self).__getattribute__('_checker')
checker.check_getattr(wrapped, name)
raise AttributeError(name)
def __setattr__(self, name, value):
if name in ('_wrapped', '_checker'):
return super(ProxyPy, self).__setattr__(name, value)
wrapped = super(ProxyPy, self).__getattribute__('_wrapped')
checker = super(ProxyPy, self).__getattribute__('_checker')
checker.check_setattr(wrapped, name)
setattr(wrapped, name, value)
def __delattr__(self, name):
if name in ('_wrapped', '_checker'):
raise AttributeError()
wrapped = super(ProxyPy, self).__getattribute__('_wrapped')
checker = super(ProxyPy, self).__getattribute__('_checker')
checker.check_setattr(wrapped, name)
delattr(wrapped, name)
@_check_name
def __getslice__(self, start, end):
wrapped = object.__getattribute__(self, '_wrapped')
try:
getslice = wrapped.__getslice__
except AttributeError:
return wrapped.__getitem__(slice(start, end))
return getslice(start, end)
@_check_name
def __setslice__(self, start, end, value):
wrapped = object.__getattribute__(self, '_wrapped')
try:
setslice = wrapped.__setslice__
except AttributeError:
return wrapped.__setitem__(slice(start, end), value)
return setslice(start, end, value)
def __cmp__(self, other):
# no check
wrapped = super(ProxyPy, self).__getattribute__('_wrapped')
return cmp(wrapped, other)
def __lt__(self, other):
# no check
wrapped = super(ProxyPy, self).__getattribute__('_wrapped')
return wrapped < other
def __le__(self, other):
# no check
wrapped = super(ProxyPy, self).__getattribute__('_wrapped')
return wrapped <= other
def __eq__(self, other):
# no check
wrapped = super(ProxyPy, self).__getattribute__('_wrapped')
return wrapped == other
def __ne__(self, other):
# no check
wrapped = super(ProxyPy, self).__getattribute__('_wrapped')
return wrapped != other
def __ge__(self, other):
# no check
wrapped = super(ProxyPy, self).__getattribute__('_wrapped')
return wrapped >= other
def __gt__(self, other):
# no check
wrapped = super(ProxyPy, self).__getattribute__('_wrapped')
return wrapped > other
def __hash__(self):
# no check
wrapped = super(ProxyPy, self).__getattribute__('_wrapped')
return hash(wrapped)
def __nonzero__(self):
# no check
wrapped = super(ProxyPy, self).__getattribute__('_wrapped')
return bool(wrapped)
__bool__ = __nonzero__
def __length_hint__(self):
# no check
wrapped = super(ProxyPy, self).__getattribute__('_wrapped')
try:
hint = wrapped.__length_hint__
except AttributeError:
return NotImplemented
else:
return hint()
def __coerce__(self, other):
# For some reason _check_name does not work for coerce()
wrapped = super(ProxyPy, self).__getattribute__('_wrapped')
checker = super(ProxyPy, self).__getattribute__('_checker')
checker.check(wrapped, '__coerce__')
return super(ProxyPy, self).__coerce__(other)
def __str__(self):
try:
return _check_name(PyProxyBase.__str__)(self)
# The C implementation catches almost all exceptions; the
# exception is a TypeError that's raised when the repr returns
# the wrong type of object.
except TypeError:
raise
except:
# The C implementation catches all exceptions.
wrapped = super(ProxyPy, self).__getattribute__('_wrapped')
return '<security proxied %s.%s instance at %s>' %(
wrapped.__class__.__module__, wrapped.__class__.__name__,
_fmt_address(wrapped))
def __repr__(self):
try:
return _check_name(PyProxyBase.__repr__)(self)
# The C implementation catches almost all exceptions; the
# exception is a TypeError that's raised when the repr returns
# the wrong type of object.
except TypeError:
raise
except:
wrapped = super(ProxyPy, self).__getattribute__('_wrapped')
return '<security proxied %s.%s instance at %s>' %(
wrapped.__class__.__module__, wrapped.__class__.__name__,
_fmt_address(wrapped))
for name in ['__call__',
#'__repr__',
#'__str__',
#'__unicode__', # Unchecked in C proxy
'__reduce__',
'__reduce_ex__',
#'__lt__', # Unchecked in C proxy (rich coparison)
#'__le__', # Unchecked in C proxy (rich coparison)
#'__eq__', # Unchecked in C proxy (rich coparison)
#'__ne__', # Unchecked in C proxy (rich coparison)
#'__ge__', # Unchecked in C proxy (rich coparison)
#'__gt__', # Unchecked in C proxy (rich coparison)
#'__nonzero__', # Unchecked in C proxy (rich coparison)
#'__bool__', # Unchecked in C proxy (rich coparison)
#'__hash__', # Unchecked in C proxy (rich coparison)
#'__cmp__', # Unchecked in C proxy
'__getitem__',
'__setitem__',
'__delitem__',
'__iter__',
'__next__',
'next',
'__contains__',
'__neg__',
'__pos__',
'__abs__',
'__invert__',
'__complex__',
'__int__',
'__float__',
'__long__',
'__oct__',
'__hex__',
'__index__',
'__add__',
'__sub__',
'__mul__',
'__div__',
'__truediv__',
'__floordiv__',
'__mod__',
'__divmod__',
'__pow__',
'__radd__',
'__rsub__',
'__rmul__',
'__rdiv__',
'__rtruediv__',
'__rfloordiv__',
'__rmod__',
'__rdivmod__',
'__rpow__',
'__lshift__',
'__rshift__',
'__and__',
'__xor__',
'__or__',
'__rlshift__',
'__rrshift__',
'__rand__',
'__rxor__',
'__ror__',
]:
meth = getattr(PyProxyBase, name)
setattr(ProxyPy, name, _check_name(meth))
for name in (
'__len__',
):
meth = getattr(PyProxyBase, name)
setattr(ProxyPy, name, _check_name(meth, False))
for name in ['__iadd__',
'__isub__',
'__imul__',
'__idiv__',
'__itruediv__',
'__ifloordiv__',
'__imod__',
'__ilshift__',
'__irshift__',
'__iand__',
'__ixor__',
'__ior__',
'__ipow__',
]:
meth = getattr(PyProxyBase, name)
setattr(ProxyPy, name, _check_name_inplace(meth))
def getCheckerPy(proxy):
return super(ProxyPy, proxy).__getattribute__('_checker')
_builtin_isinstance = sys.modules[_BUILTINS].isinstance
def getObjectPy(proxy):
if not _builtin_isinstance(proxy, ProxyPy):
return proxy
return super(ProxyPy, proxy).__getattribute__('_wrapped')
_c_available = not PURE_PYTHON
if _c_available:
try:
from zope.security._proxy import _Proxy
except (ImportError, AttributeError): # pragma: no cover PyPy / PURE_PYTHON
_c_available = False
getChecker = getCheckerPy
getObject = getObjectPy
Proxy = ProxyPy
if _c_available:
from zope.security._proxy import getChecker
from zope.security._proxy import getObject
Proxy = _Proxy
removeSecurityProxy = getObject
def getTestProxyItems(proxy):
"""Return a sorted sequence of checker names and permissions for testing
"""
checker = getChecker(proxy)
return sorted(checker.get_permissions.items())
def isinstance(object, cls):
"""Test whether an *object* is an instance of a type.
This works even if the object is security proxied.
"""
# The removeSecurityProxy call is OK here because it is *only*
# being used for isinstance
return _builtin_isinstance(removeSecurityProxy(object), cls)
| StarcoderdataPython |
156813 | <filename>App/components/entradas.py<gh_stars>0
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
dato_entrada = [2,3,4,1]
class entradas(QDialog):
def __init__(self):
super(entradas ,self).__init__()
layout = QGridLayout()
self.setLayout(layout)
self.dato_entrada = dato_entrada
#tabla
tabla = QTableWidget(1, 4)
newitem = QTableWidgetItem()
l_entrada = QLabel()
l_entrada.setText("entrada")
#boton
ok = QPushButton('ok')
def datas():
for i in range(0, 4):
newitem = tabla.item(0,i)
if (newitem == None):
a = "x"
pass
elif (not newitem.text() == "1" and not newitem.text() == "0"):
a = "x"
pass
else:
a = newitem.text()
pass
dato_entrada[i] = a
def _print():
print(dato_entrada)
tabla.cellChanged.connect(datas)
ok.clicked.connect(_print)
#mostrar
layout.addWidget(l_entrada,0,0)
layout.addWidget(tabla,1,0)
# layout.addWidget(ok,2,0) | StarcoderdataPython |
1624529 | def get_temperature_edit_string(temperature_init, temperature_final, data_fraction, iter_):
if (temperature_init is None) or (temperature_final is None):
return
temperature = temperature_init*(temperature_final/temperature_init)**data_fraction
edit_config_lines = []
temperature_info = []
edit_config_lines.append(
"set-temperature temperature={0}".format(
temperature))
temperature_info.append("temperature={0}".format(
temperature))
return ("""nnet3-copy --edits='{edits}' - - |""".format(
edits=";".join(edit_config_lines)))
| StarcoderdataPython |
184572 | from functools import wraps
import json, click
import os
from os import mkdir
from instacli import BASE_DIR
class Settings():
SETTINGS_DIR = f'{BASE_DIR}/instacli.json'
def __init__(self) -> 'Settings':
"""Class that reppresents an abstraction of the settings
of the `instacli` package.
Tries to load settings from a `instacli.json` file. If no such
file is present or if the loaded data is none, a new instace of
the object will be created and saved in the `instacli.json` file.
Returns:
:class:`Setting`: Settings object instance.
"""
try:
with open(self.SETTINGS_DIR, 'r') as file:
try:
data = json.load(file)
except:
data = None
except FileNotFoundError:
data = None
if not data:
# Cretate New Setting:
self.driver_path = None
self.driver_visible = False
self.logging = False
self.output_path = None
# Save
des = self._to_dict()
with open(self.SETTINGS_DIR, 'w') as file:
json.dump(des, file)
else:
# Load existing Setting
self.driver_path = data.get('driver_path')
self.driver_visible = data.get('driver_visible')
self.logging = data.get('logging')
self.output_path = data.get('output_path')
def _persistence(func):
"""Function wrapper that saves the changes made to
the attributes of the `Settings` class into a settings.json file.
"""
@wraps(func)
def wrapper(self:'Settings', *args, **kwargs):
result = func(self, *args, **kwargs)
# Save
des = self._to_dict()
with open(self.SETTINGS_DIR, 'w') as file:
json.dump(des, file)
return result
return wrapper
def _to_dict(self):
return vars(self)
@_persistence
def set_driver_path(self, path:str) -> bool:
"""Validates the inputted path and sets it as
package setting.
Args:
path (str): Path of the chromedriver.exe
Returns:
bool: True if path is valid.
False if pathis invalid.
"""
path = os.path.abspath(path)
self.driver_path = path
@_persistence
def set_output_path(self, path:str) -> bool:
"""Validates the inputted path and sets it as
package setting.
Args:
path (str): Path of the output folder
Returns:
bool: True if path is valid.
False if pathis invalid.
"""
path = os.path.abspath(path)
self.output_path = path
@_persistence
def set_driver_visible(self, visible:bool):
self.driver_visible = visible
@_persistence
def set_logging(self, logging:bool):
self.logging = logging | StarcoderdataPython |
1641419 | import re
import os
import logging
import requests
import threading
import xml.etree.ElementTree as ET
from .models import Process
from django.conf import settings
from pathlib import Path
import time
#Initialisation des logs
logger = logging.getLogger(__name__)
class WhereIs(threading.Thread):
def __init__(self, ppn, rcr, num_line,log_file):
threading.Thread.__init__(self)
self.PPN = ppn
self.RCR = rcr
self.num_line = num_line
self.log_file = log_file
def run(self):
url = 'https://www.sudoc.fr/services/where/15/{}.xml'.format(self.PPN)
r = requests.get(url)
try:
r.raise_for_status()
except requests.exceptions.HTTPError:
self.status = 'Error'
# self.logger.error("{} :: XmlAbes_Init :: HTTP Status: {} || Method: {} || URL: {} || Response: {}".format(ppn, r.status_code, r.request.method, r.url, r.text))
self.log_file.write("{}\t{}\t{}\n".format(self.num_line,self.PPN,"PPN inconnu"))
logger.debug("{} :: PPN inconnu ou service indisponible".format(self.PPN))
else:
self.record = r.content.decode('utf-8')
is_located = self.test_rcr()
if is_located :
self.log_file.write("{}\t{}\t{}\n".format(self.num_line,self.PPN,"Localisé dans le SUDOC"))
logger.debug("{} :: Existe".format(self.PPN))
else :
self.log_file.write("{}\t{}\t{}\n".format(self.num_line,self.PPN,"Non localisé dans le SUDOC"))
logger.debug("{} :: N'Existe pas".format(self.PPN))
def test_rcr(self):
root = ET.fromstring(self.record)
for library in root.findall(".//library"):
rcr = library.attrib['rcr']
if self.RCR == rcr :
return True
return False
def handle_uploaded_file(f,process):
# Initialisation des compteurs
num_ppn_badly_formatted = 0
num_line = 1
logger.debug("lecture du fichier")
log_file = open("{}/static/sudoc/rapports/logs_{}_{}.txt".format(Path(__file__).resolve().parent,process.id,process.process_library.library_rcr), "w")
for line in f :
line = line.rstrip()
if (clean_ppn := re.search("(^|\(PPN\))([0-9]{8}[0-9Xx]{1})(;|$)", line.decode())) is None :
num_ppn_badly_formatted
logger.debug("{} - N'est pas un PPN valide ".format(line.decode()))
log_file.write("{}\t{}\t{}\n".format(num_line,line.decode(),"PPN mal formé"))
else :
ppn = clean_ppn.group(2)
logger.debug("{} - Est un PPN valide ".format(ppn))
traitement = WhereIs(ppn,process.process_library.library_rcr,num_line,log_file)
traitement.start()
num_line += 1
while threading.activeCount() > 3:
logger.debug("{}\n".format(threading.activeCount()))
logger.debug(threading.enumerate())
time.sleep(1)
logger.debug("JOB TERMINE !!!")
| StarcoderdataPython |
159984 | from flask import Blueprint, make_response, render_template, request, session, abort, send_from_directory
from jinja2.exceptions import TemplateNotFound
from .service_page import render_template_wo_statistics
# Инициализируем модуль Game
game_bp = Blueprint('Game', __name__, template_folder='../games', static_folder='../games', url_prefix='/games')
@game_bp.route('/<game>/<path:path>')
def static_route(game, path):
"""
Выполняет отправку статических файлов из нужной директории
Принимает на вход:
game - uuid - идентификатор игры
path - str - путь до файла. Он может быть многоуровневым, например "assets/script.js"
"""
response = make_response(game_bp.send_static_file(f'{game}/{path}'))
if path.split('.')[-1] == 'gz':
response.headers['Content-Encoding'] = 'gzip'
return response
@game_bp.route('/<game>/')
def game_route(game):
"""
Основной метод для получения игры
Принимает на вход:
game - uuid - идентификатор игры
args:
token - str - токен пользователя
*use_statistics - str - флаг использования класса по сбору статистики
* - опционально
"""
if game.count('.'):
return send_from_directory('./', game)
if not request.args.get('token'):
abort(401)
session['user'] = request.args.get('token')
template = None
# Если игра не использует класс по сбору статистики, то отобразим сначала пустой шаблон со скриптом для корректного
# выхода из игры. Внутри этого шаблона запустим игру в iframe уже без флага use_statistics
use_statistics = request.args.get('use_statistics')
if str(use_statistics).lower() == 'false':
url = f'/games/{game}/?token={request.args["token"]}'
resp = make_response(render_template_wo_statistics(url))
# resp.set_cookie('EndGame', '', expires=0, samesite=None, secure=True)
resp.headers.add('Set-Cookie', 'EndGame=false; SameSite=None; Secure=true; Path=/')
return resp
try:
template = render_template(f'{game}/index.html')
except TemplateNotFound:
abort(404)
session['current_game'] = game
resp = make_response(template)
# resp.set_cookie('EndGame', '', expires=0, samesite=None, secure=True)
resp.headers.add('Set-Cookie', 'EndGame=false; SameSite=None; Secure=true; Path=/')
return resp
| StarcoderdataPython |
1707018 | # Universal Power System Controller
# USAID Middle East Water Security Initiative
#
# Developed by: <NAME>
# Primary Investigator: <NAME>
#
# Version History (mm_dd_yyyy)
# 1.00 07_13_2018_NW
#
######################################################
import logging
import inspect
import sys
#global logger
#logger = function_logger(logging.DEBUG)
# Function for printing errors messages and calling logger ]
def UPS_Messages(MessageCode):
global logger
logger = function_logger(logging.DEBUG)
if (MessageCode == 'Error_VFD_Freq'):
print('VFD frequency set above maximum, shutting down motor')
logger.warn('VFD frequency set above maximum, shutting down motor')
elif MessageCode == 'Error_VFD_Volt':
print('VFD votlage set above maximum, shutting down motor')
logger.warn('VFD votlage set above maximum, shutting down motor')
elif MessageCode == 'Error_VFD_Amps':
print('VFD current set above maximum, shutting down motor')
logger.warn('VFD current set above maximum, shutting down motor')
elif MessageCode == 'Error_VFD_Power':
print('VFD power set above maximum, shutting down motor')
logger.warn('VFD power set above maximum, shutting down motor')
elif MessageCode == 'Error_VFD_BusVolt':
print('VFD bus voltage set above maximum, shutting down motor')
logger.warn('VFD bus voltage set above maximum, shutting down motor')
elif MessageCode == 'Error_VFD_Temp':
print('VFD temperature set above maximum, shutting down motor')
logger.warn('VFD temperature set above maximum, shutting down motor')
elif MessageCode == 'Error_Solar_Voltage':
print('Solar voltage set above maximum, shutting down motor and opening solar relay')
logger.warn('Solar voltage set above maximum, shutting down motor and opening solar relay')
elif MessageCode == 'Error_DC_Link_Voltage':
print('DC link voltage set above maximum, shutting down motor and opening solar relay')
logger.warn('DC link voltage set above maximum, shutting down motor and opening solar relay')
elif MessageCode == 'Error_Voltage_Measurement':
print('Error reading voltage measurement')
logger.warn('Error reading voltage measurement')
elif MessageCode == 'Error_Transfer_Switch':
print('Invalid transfer switch command')
logger.warn('Invalid transfer switch command')
elif MessageCode == 'Error_DC_Relay':
print('Invalid DC relay command')
logger.warn('Invalid DC relay command')
elif MessageCode == 'Error_VFD_Power':
print('Invalid power value calculated')
logger.warn('Invalid power value calculated')
elif MessageCode == 'Error_Duty_Cycle':
print('Invalid duty cycle value calculated')
logger.warn('Invalid duty cycle value calculated')
elif MessageCode == 'Error_Solar_Voltage_Relay':
print('Solar voltage out of accecptable range, cannot turn on solar relay')
logger.warn('Solar voltage out of accecptable range, cannot turn on solar relay')
elif MessageCode == 'Error Archive':
print('Could not archive database')
logger.warn('Could not archive database')
elif MessageCode == 'Error Archive Delete':
print('Could not update SQL or delete CSV and log file')
logger.warn('Could not update SQL or delete CSV and log file')
elif MessageCode == 'Error SQL Connection':
print('Could not connect to SQL database')
logger.warn('Could not connect to SQL database')
elif MessageCode == 'Error SQL Create':
print('Could not creat SQL database')
logger.warn('Could not connect to SQL database')
#print(logger)
#logging.Handler.close(self)
logging.shutdown()
#logger.Handler.close()
#print(logger)
#logger.shutdown()
#handlers = logger.handlers[:]
#for handler in handlers:
# handler.close()
# logger.removeHandler(handler)
#logging.Handler.close()
# Logger function for writing messages to error log file
def function_logger(file_level):
function_name = inspect.stack()[1][3]
logger = logging.getLogger(function_name)
logger.setLevel(logging.DEBUG) #By default, logs all messages
fh = logging.FileHandler("{0}.log".format(function_name))
fh.setLevel(file_level)
fh_format = logging.Formatter('%(asctime)s - %(lineno)d - %(levelname)-8s - %(message)s')
fh.setFormatter(fh_format)
logger.addHandler(fh)
return logger | StarcoderdataPython |
1724255 | <reponame>StevenHuang2020/ML
import matplotlib.pyplot as plt
import numpy as np
from distributions import Binomial_distribution, Discrete_uniform_distribution
def plotDistributeBar(ax, data, label='', width=0.3, offset=0, title='Probability Distribution of true'):
ax.bar(np.arange(len(data))+offset,data,width=width,label=label)
fontSize = 12
ax.set_title(title,fontsize=fontSize)
plt.xlabel('Different Teeth Bins',fontsize=fontSize)
plt.ylabel('Probability',fontsize=fontSize)
plt.xticks(np.arange(len(data)))
def plorDataDis(true_data,uniform,bino):
ax = plt.subplot(1,1,1)
offset = 0
width=0.2
plotDistributeBar(ax, true_data, label='True data', offset=offset, width=width)
offset += width
plotDistributeBar(ax, uniform, label='Uniform data', offset=offset, width=width)
offset += width
print(bino)
plotDistributeBar(ax, bino, label='Binomial data', offset=offset, width=width)
plt.legend()
plt.show()
def get_klpq_div(p_probs, q_probs):
kl_div = 0.0
for pi, qi in zip(p_probs, q_probs):
kl_div += pi*np.log(pi/qi)
return kl_div
def get_klqp_div(p_probs, q_probs):
kl_div = 0.0
for pi, qi in zip(p_probs, q_probs):
kl_div += qi*np.log(qi/pi)
return kl_div
def plotKLResult(true_data,minP):
ax = plt.subplot(1,1,1)
offset = 0
width=0.2
plotDistributeBar(ax, true_data, label='True data', offset=offset, width=width)
pAll = [0.02,0.1,minP,0.8]
for p in pAll:
offset += width
dis = Binomial_distribution(N=len(true_data),p=p)
plotDistributeBar(ax, dis, label='Binomial '+ str(p), offset=offset, width=width)
plt.legend()
plt.show()
def testDiscretKL():
true_data = [0.02, 0.03, 0.15, 0.14, 0.13, 0.12, 0.09, 0.08, 0.1, 0.08, 0.06]
print('sum=', sum(true_data))
assert sum(true_data)==1.0
unif_data = Discrete_uniform_distribution(true_data,N=len(true_data))
bino_data = Binomial_distribution(N=len(true_data),p=0.3)
plorDataDis(true_data,unif_data,bino_data)
print('KL(True||Uniform): ', get_klpq_div(true_data,unif_data))
print('KL(True||Binomial): ', get_klpq_div(true_data,bino_data))
p = np.arange(0.02, 1.0, 0.02) #np.linspace(0, 1.0, 50)
klpq = [get_klpq_div(true_data,Binomial_distribution(N=len(true_data),p=i)) for i in p]
klqp = [get_klqp_div(true_data,Binomial_distribution(N=len(true_data),p=i)) for i in p]
print('minimal klpq,', np.argmin(klpq), np.min(klpq))
ax = plt.subplot(1,1,1)
plotDistribute(ax,p,klpq,label='KL(P||Q)')
plotDistribute(ax,p,klqp,label='KL(Q||P)')
plotDistribute(ax,p,np.array(klpq)-np.array(klqp),label='KL(P||Q)-KL(Q||P)')
plt.show()
plotKLResult(true_data, np.min(klpq))
def plotDistribute(ax,x,y,label='', title='Binomial P vs KL'):
ax.plot(x,y,label=label)
fontSize = 12
ax.set_title(title,fontsize=fontSize)
ax.legend()
plt.xlabel('Binomial P',fontsize=fontSize)
plt.ylabel('KL(P||Q) divergence',fontsize=fontSize)
#plt.show()
def main():
testDiscretKL()
if __name__=='__main__':
main()
| StarcoderdataPython |
1690457 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from setuptools import setup
def read_description(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as fp:
return fp.read()
setup(name='calvin',
version='0.4',
url="http://github.com/EricssonResearch/calvin-base",
license="Apache Software License",
author="<NAME>",
author_email="N/A",
tests_require=[
'mock>1.0.1',
'pytest>=1.4.25',
'pytest-twisted'
],
install_requires=[
'colorlog>=2.6.0',
'kademlia>=0.4',
'ply>=3.6',
'Twisted>=15.0.0',
'requests >= 2.6.0',
'infi.traceback>=0.3.11',
'wrapt==1.10.2',
'pyserial>=2.6',
'netifaces>=0.10.4'
],
description="Calvin is a distributed runtime and development framework for an actor based dataflow"
"programming methodology",
long_description=read_description('README.md'),
packages=["calvin"],
include_package_data=True,
platforms='any',
test_suite="calvin.test.test_calvin",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Framework :: Twisted",
"Natural Language :: English",
"Intended Audience :: Developers",
"Topic :: Software Development",
],
extras_require={
'crypto': 'pyOpenSSL==0.15.1'
},
entry_points={
'console_scripts': [
'csruntime=calvin.Tools.csruntime:main',
'cscontrol=calvin.Tools.cscontrol:main',
'csdocs=calvin.Tools.calvindoc:main',
'cscompile=calvin.Tools.cscompiler:main',
'csinstall=calvin.Tools.csinstaller:main',
'csmanage=calvin.Tools.csmanage:main',
'csweb=calvin.Tools.www.csweb:main',
'csviz=calvin.Tools.csviz:main'
]
}
)
| StarcoderdataPython |
1740800 | <reponame>wise-east/LAUG<gh_stars>1-10
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
The ``evaluate`` subcommand can be used to
evaluate a trained model against a dataset
and report any metrics calculated by the model.
"""
import argparse
import json
import logging
from typing import Dict, Any
from allennlp.common import Params
from allennlp.common.util import prepare_environment
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.iterators import DataIterator
from allennlp.models.archival import load_archive
from allennlp.training.util import evaluate
from LAUG.nlu.milu_new import dataset_reader, model,iterator, dataset_reader_frames, model_frames
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
argparser = argparse.ArgumentParser(description="Evaluate the specified model + dataset.")
argparser.add_argument('archive_file', type=str, help='path to an archived trained model')
argparser.add_argument('input_file', type=str, help='path to the file containing the evaluation data')
argparser.add_argument('--output-file', type=str, help='path to output file')
argparser.add_argument('--weights-file',
type=str,
help='a path that overrides which weights file to use')
cuda_device = argparser.add_mutually_exclusive_group(required=False)
cuda_device.add_argument('--cuda-device',
type=int,
default=-1,
help='id of GPU to use (if any)')
argparser.add_argument('-o', '--overrides',
type=str,
default="",
help='a JSON structure used to override the experiment configuration')
argparser.add_argument('--batch-weight-key',
type=str,
default="",
help='If non-empty, name of metric used to weight the loss on a per-batch basis.')
argparser.add_argument('--extend-vocab',
action='store_true',
default=False,
help='if specified, we will use the instances in your new dataset to '
'extend your vocabulary. If pretrained-file was used to initialize '
'embedding layers, you may also need to pass --embedding-sources-mapping.')
argparser.add_argument('--embedding-sources-mapping',
type=str,
default="",
help='a JSON dict defining mapping from embedding module path to embedding'
'pretrained-file used during training. If not passed, and embedding needs to be '
'extended, we will try to use the original file paths used during training. If '
'they are not available we will use random vectors for embedding extension.')
def evaluate_from_args(args: argparse.Namespace) -> Dict[str, Any]:
# Disable some of the more verbose logging statements
logging.getLogger('allennlp.common.params').disabled = True
logging.getLogger('allennlp.nn.initializers').disabled = True
logging.getLogger('allennlp.modules.token_embedders.embedding').setLevel(logging.INFO)
# Load from archive
archive = load_archive(args.archive_file, args.cuda_device, args.overrides, args.weights_file)
config = archive.config
prepare_environment(config)
model = archive.model
model.eval()
# Load the evaluation data
# Try to use the validation dataset reader if there is one - otherwise fall back
# to the default dataset_reader used for both training and validation.
validation_dataset_reader_params = config.pop('validation_dataset_reader', None)
if validation_dataset_reader_params is not None:
dataset_reader = DatasetReader.from_params(validation_dataset_reader_params)
else:
dataset_reader = DatasetReader.from_params(config.pop('dataset_reader'))
evaluation_data_path = args.input_file
logger.info("Reading evaluation data from %s", evaluation_data_path)
instances = dataset_reader.read(evaluation_data_path)
embedding_sources: Dict[str, str] = (json.loads(args.embedding_sources_mapping)
if args.embedding_sources_mapping else {})
if args.extend_vocab:
logger.info("Vocabulary is being extended with test instances.")
model.vocab.extend_from_instances(Params({}), instances=instances)
model.extend_embedder_vocab(embedding_sources)
iterator_params = config.pop("validation_iterator", None)
if iterator_params is None:
iterator_params = config.pop("iterator")
iterator = DataIterator.from_params(iterator_params)
iterator.index_with(model.vocab)
metrics = evaluate(model, instances, iterator, args.cuda_device, args.batch_weight_key)
logger.info("Finished evaluating.")
logger.info("Metrics:")
for key, metric in metrics.items():
logger.info("%s: %s", key, metric)
output_file = args.output_file
if output_file:
with open(output_file, "w") as file:
json.dump(metrics, file, indent=4)
return metrics
if __name__ == "__main__":
args = argparser.parse_args()
evaluate_from_args(args) | StarcoderdataPython |
1796215 | <gh_stars>1-10
from .keyp_head import * # noqa F401
| StarcoderdataPython |
3335159 | # Basic Calculator: https://leetcode.com/problems/basic-calculator/
# Given a string s representing a valid expression, implement a basic calculator to evaluate it, and return the result of the evaluation.
# Note: You are not allowed to use any built-in function which evaluates strings as mathematical expressions, such as eval().
# Initial thought is to use a stack as I have done something similar before with postfix notation with reverse polish notation
# Look for hint as I can't figure out when to actually pop off of stack
# Technique is to reverse the string and parse
class initial():
def calculate(self, s):
stack = []
n, operand = 0, 0
for i in reversed(range(len(s))):
cur = s[i]
# print(cur, cur.isdigit())
if cur.isdigit():
# add digit to the write place
operand = (10**n * int(cur)) + operand
n += 1
elif s[i] != ' ':
if n:
stack.append(operand)
n, operand = 0, 0
if cur == '(':
res = self.eval(stack)
# Remove ')'
stack.pop()
stack.append(res)
else:
# If you are here we have + or -
stack.append(cur)
if n:
# Push whatever is currently half read onto stack
stack.append(operand)
return self.eval(stack)
def eval(self, stack):
# Since the eval has to be valid by rules we pop the first digit to get to the operation
res = stack.pop() if stack else 0
print(stack)
while stack and stack[-1] != ')':
# find the operation
sign = stack.pop()
# get next digit
if sign == '+':
res += stack.pop()
else:
res -= stack.pop()
return res
# Score Card
# Did I need hints? yes
# Did you finish within 30 min? No
# Was the solution optimal? I am unsure
# Were there any bugs? Yeah i took too long to figure out how to parse a number given a string and it took me a while to figure out
# 1 1 2 2 = 1.5
a = '1+2'
b = '1 + -2' # '-1+2'
c = '(1+(4+5+2)-3)+(6+8)'
sol = initial()
print(sol.calculate(a))
print(sol.calculate(b))
print(sol.calculate(c))
| StarcoderdataPython |
1773580 | from django.core.management.base import BaseCommand
from actors.get_actors import actors, import_data
class Command(BaseCommand):
help = 'Import Actors data'
ACTORS = staticmethod(actors)
IMPORT_DATA = staticmethod(import_data)
def handle(self, *args, **options):
i = 1
for actor in self.ACTORS:
self.IMPORT_DATA(i, actor)
i = i + 1
| StarcoderdataPython |
3240702 | <gh_stars>0
from django.db import models
from django.http import HttpResponse
# Create your models here.
def index(request):
return HttpResponse("Hello, world. You're at the polls index.")
class Customer(models.Model):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
cell = models.CharField(max_length=20)
email = models.EmailField()
def __str__(self):
return f"{self.first_name}, {self.last_name}"
| StarcoderdataPython |
4839162 | """api_gw_test"""
# Remove warnings when using pytest fixtures
# pylint: disable=redefined-outer-name
import json
from test.conftest import ENDPOINT_URL
# warning disabled, this is used as a pylint fixture
from test.elasticsearch_test import ( # pylint: disable=unused-import
es_client,
populate_es_test_case_1,
)
from urllib.parse import urlencode
import boto3
import pytest
import requests
def to_localstack_url(api_id: str, url: str):
"""
Converts a API GW url to localstack
"""
return url.replace("4566", f"4566/restapis/{api_id}").replace(
"dev", "dev/_user_request_"
)
def api_gw_lambda_integrate_deploy(
api_client,
api: dict,
api_resource: dict,
lambda_func: dict,
http_method: str = "GET",
) -> str:
"""
Integrate lambda with api gw method and deploy api.
Return the invokation URL
"""
lambda_integration_arn = (
"arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/"
f"{lambda_func['FunctionArn']}/invocations"
)
api_client.put_integration(
restApiId=api["id"],
resourceId=api_resource["id"],
httpMethod=http_method,
type="AWS",
integrationHttpMethod="POST",
uri=lambda_integration_arn,
)
api_client.create_deployment(
restApiId=api["id"], stageName="dev",
)
return f"http://localhost:4566/restapis/{api['id']}/dev/_user_request_{api_resource['path']}"
@pytest.fixture
def api_gw_method(request):
"""api gw for testing"""
marker = request.node.get_closest_marker("api_gw_method_args")
put_method_args = marker.args[0]["put_method_args"]
put_method_response_args = marker.args[0]["put_method_response_args"]
api = None
def fin():
"""fixture finalizer"""
if api:
api_client.delete_rest_api(restApiId=api["id"])
# Hook teardown (finalizer) code
request.addfinalizer(fin)
api_client = boto3.client("apigateway", endpoint_url=ENDPOINT_URL)
api = api_client.create_rest_api(name="testapi")
root_resource_id = api_client.get_resources(restApiId=api["id"])["items"][0]["id"]
api_resource = api_client.create_resource(
restApiId=api["id"], parentId=root_resource_id, pathPart="test"
)
api_client.put_method(
restApiId=api["id"],
resourceId=api_resource["id"],
authorizationType="NONE",
**put_method_args,
)
api_client.put_method_response(
restApiId=api["id"],
resourceId=api_resource["id"],
statusCode="200",
**put_method_response_args,
)
return api_client, api, api_resource
@pytest.mark.api_gw_method_args(
{
"put_method_args": {"httpMethod": "GET",},
"put_method_response_args": {"httpMethod": "GET",},
}
)
@pytest.mark.lambda_function_args(
{
"name": "stac_endpoint",
"handler": "code.handler",
"environment": {"CBERS_STAC_BUCKET": "bucket",},
"timeout": 30,
"layers": (
{
"output_dir": "./test",
"layer_dir": "./cbers2stac/layers/common",
"tag": "common",
},
),
}
)
def test_root(api_gw_method, lambda_function):
"""
test_root_endpoint
"""
# Based on
# https://stackoverflow.com/questions/58859917/creating-aws-lambda-integrated-api-gateway-resource-with-boto3
api_client, api, api_resource = api_gw_method
lambda_client, lambda_func = lambda_function # pylint: disable=unused-variable
url = api_gw_lambda_integrate_deploy(api_client, api, api_resource, lambda_func)
req = requests.get(url)
assert req.status_code == 200
@pytest.mark.api_gw_method_args(
{
"put_method_args": {"httpMethod": "GET",},
"put_method_response_args": {"httpMethod": "GET",},
}
)
@pytest.mark.lambda_function_args(
{
"name": "elasticsearch",
"handler": "es.stac_search_endpoint_handler",
"environment": {},
"timeout": 30,
"layers": (
{
"output_dir": "./test",
"layer_dir": "./cbers2stac/layers/common",
"tag": "common",
},
),
}
)
def test_item_search_get(
api_gw_method, lambda_function, es_client
): # pylint: disable=too-many-locals,too-many-statements
"""
test_item_search_get
"""
api_client, api, api_resource = api_gw_method
lambda_client, lambda_func = lambda_function # pylint: disable=unused-variable
# ES_ENDPOINT is set by lambda_function
lambda_client.update_function_configuration(
FunctionName=lambda_func["FunctionName"],
Environment={"Variables": {"ES_PORT": "4571", "ES_SSL": "NO",}},
)
populate_es_test_case_1(es_client)
# Empty GET, return all 2 items
original_url = api_gw_lambda_integrate_deploy(
api_client, api, api_resource, lambda_func
)
req = requests.get(original_url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 2
# Single collection, return single item
url = f"{original_url}?collections=CBERS4-MUX"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["collection"] == "CBERS4-MUX"
# Two collections, return all items
url = f"{original_url}?collections=CBERS4-MUX,CBERS4-AWFI"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 2
# Paging, no next case
url = f"{original_url}"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
# Paging, next page
url = f"{original_url}?limit=1"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" in fcol.keys()
assert len(fcol["links"]) == 1
next_href = to_localstack_url(api["id"], fcol["links"][0]["href"])
req = requests.get(next_href)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
# ids
url = f"{original_url}?ids=CBERS_4_MUX_20170528_090_084_L2"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
# query extension
url = f"{original_url}?"
url += urlencode({"query": '{"cbers:data_type": {"eq":"L4"}}'})
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["id"] == "CBERS_4_AWFI_20170409_167_123_L4"
@pytest.mark.api_gw_method_args(
{
"put_method_args": {"httpMethod": "POST",},
"put_method_response_args": {"httpMethod": "POST",},
}
)
@pytest.mark.lambda_function_args(
{
"name": "elasticsearch",
"handler": "es.stac_search_endpoint_handler",
"environment": {},
"timeout": 30,
"layers": (
{
"output_dir": "./test",
"layer_dir": "./cbers2stac/layers/common",
"tag": "common",
},
),
}
)
def test_item_search_post(
api_gw_method, lambda_function, es_client
): # pylint: disable=too-many-locals
"""
test_item_search_post
"""
api_client, api, api_resource = api_gw_method
lambda_client, lambda_func = lambda_function # pylint: disable=unused-variable
# ES_ENDPOINT is set by lambda_function
lambda_client.update_function_configuration(
FunctionName=lambda_func["FunctionName"],
Environment={"Variables": {"ES_PORT": "4571", "ES_SSL": "NO",}},
)
populate_es_test_case_1(es_client)
url = api_gw_lambda_integrate_deploy(
api_client, api, api_resource, lambda_func, http_method="POST"
)
# POST with invalid bbox order, check error status code and message
req = requests.post(
url,
data=json.dumps(
{
"collections": ["mycollection"],
"bbox": [160.6, -55.95, -170, -25.89],
"limit": 100,
"datetime": "2019-01-01T00:00:00Z/2019-01-01T23:59:59Z",
}
),
)
assert req.status_code == 400, req.text
assert "First lon corner is not western" in req.text
# Same as above with fixed bbox
req = requests.post(
url,
data=json.dumps(
{
"collections": ["mycollection"],
"bbox": [-170, -25.89, 160.6, -55.95],
"limit": 100,
"datetime": "2019-01-01T00:00:00Z/2019-01-01T23:59:59Z",
}
),
)
assert req.status_code == 200, req.text
# Paging, no next case
req = requests.post(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
# Paging, next page
body = {"limit": 1}
req = requests.post(url, data=json.dumps(body))
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" in fcol.keys()
assert len(fcol["links"]) == 1
next_href = to_localstack_url(api["id"], fcol["links"][0]["href"])
req = requests.post(
next_href, data=json.dumps({**body, **fcol["links"][0]["body"]})
)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
# ids
body = {"ids": ["CBERS_4_MUX_20170528_090_084_L2"]}
req = requests.post(url, data=json.dumps(body))
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
| StarcoderdataPython |
1681448 | import csv
import math
import random
import pandas as pd
from sklearn.naive_bayes import GaussianNB ,BernoulliNB
from sklearn import preprocessing,linear_model
import sklearn
import numpy as np
#from sklearn.utils import shuffle
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn import svm
from sklearn.ensemble import VotingClassifier
import statistics
data =pd.read_csv("Real Dataset.csv")
#print(data.head())
le =preprocessing.LabelEncoder()
cloudlet_ID =le.fit_transform(list(data["cloudlet ID"]))
Datacenter_ID =le.fit_transform(list(data["Data center ID"]))
VM_ID = le.fit_transform(list(data["VM ID"]))
Bwutil =le.fit_transform(list(data["Bwutil"]))
CPUutil =le.fit_transform(list(data["CPUutil"]))
memutil =le.fit_transform(list(data["memutil"]))
Disk_util =le.fit_transform(list(data["Disk util"]))
turn_aroundTime =data["turnAround"]
#Start_Time =le.fit_transform(list(data["Start Time"]))
#Finish_Time =le.fit_transform(list(data["Finish Time"]))
#namespace =le.fit_transform(list(data["namespace"]))
status =le.fit_transform(list(data["STATUS"]))
x=list(zip(cloudlet_ID,Datacenter_ID,VM_ID,Bwutil,CPUutil,memutil,Disk_util))
y=list(status)
x_train,x_test,y_train,y_test =sklearn.model_selection.train_test_split(x,y,test_size = 0.1)
model1 =RandomForestClassifier(n_estimators=10)
model2 =KNeighborsClassifier(n_neighbors=5)
model3=svm.SVC(gamma='auto')
model4=linear_model.LinearRegression()
model5=linear_model.LogisticRegression()
model6=GaussianNB()
model7=DecisionTreeClassifier()
model8=BernoulliNB()
model1.fit(x_train,y_train)
model2.fit(x_train,y_train)
model3.fit(x_train,y_train)
model4.fit(x_train,y_train)
model5.fit(x_train,y_train)
model6.fit(x_train,y_train)
model7.fit(x_train,y_train)
model8.fit(x_train,y_train)
acc1 =model1.score(x_test,y_test)
acc2=model2.score(x_test,y_test)
acc3=model3.score(x_test,y_test)
acc4=model4.score(x_test,y_test)
acc5=model5.score(x_test,y_test)
acc6=model6.score(x_test,y_test)
acc7=model7.score(x_test,y_test)
acc8=model8.score(x_test,y_test)
#final_pred =np.array([])
#for i in range(0,len(x_test)):
#final_pred =np.append(final_pred,statistics.mode([pred1[i],pred2[i],pred3[i]]))
model11 =VotingClassifier(estimators=[('rf',model1),('kn',model2),('svm',model3)],voting ='hard')
#model12=VotingClassifier(estimators=[('rf',model1),('kn',model2),('lr',model4)],voting='hard')
model13=VotingClassifier(estimators=[('rf',model1),('kn',model2),('lr',model5)],voting='hard')
model14=VotingClassifier(estimators=[('rf',model1),('kn',model2),('nb',model6)],voting='hard')
model15=VotingClassifier(estimators=[('rf',model1),('svm',model3),('nb',model6)],voting='hard')
model16=VotingClassifier(estimators=[('rf',model1),('lr',model5),('nb',model6)],voting='hard')
model17=VotingClassifier(estimators=[('svm',model3),('kn',model2),('nb',model6)],voting='hard')
model18=VotingClassifier(estimators=[('lr',model5),('kn',model2),('svm',model3)],voting='hard')
#Left model12 due to conversion error and the accuracy is very low
model11.fit(x_train,y_train)
#model12.fit(x_train,y_train)
model13.fit(x_train,y_train)
model14.fit(x_train,y_train)
model15.fit(x_train,y_train)
model16.fit(x_train,y_train)
model17.fit(x_train,y_train)
model18.fit(x_train,y_train)
acc11=model11.score(x_test,y_test)
#acc12=model12.score(x_test,y_test)
acc13=model13.score(x_test,y_test)
acc14=model14.score(x_test,y_test)
acc15=model15.score(x_test,y_test)
acc16=model16.score(x_test,y_test)
acc17=model17.score(x_test,y_test)
acc18=model18.score(x_test,y_test)
print("\n\n\n")
print("Random Forest :",end="")
print(acc1)
print("Kneighbors :",end="")
print(acc2)
print("SVM :",end="")
print(acc3)
print("Linear Regression :",end="")
print(acc4)
print("Logistic Regression :",end="")
print(acc5)
print("Naive Bayes :",end="")
print(acc6)
print("Bernoulli NaiveBayes :",end="")
print(acc8)
print("Decision Tree :",end="")
print(acc7)
print("\n BY USING MAX VOTING")
print("RandomForest,KNeighbors,SVM :",end ="")
print(acc11)
#print("RandomForest,KNeighbors,LinearRegression :",end="")
#print(acc12)
print("RandomForest,KNeighbors,LogisticRegression :",end="")
print(acc13)
print("RandomForest,KNeighbors,NaiveBayes :",end="")
print(acc14)
print("RandomForest,SVM,Naive Bayes :",end="")
print(acc15)
print("RandomForest,LogisticRegression,Naive Bayes :",end="")
print(acc16)
print("SVM,KNeighbors,NaiveBayes :",end="")
print(acc17)
print("LogisticRegression,KNeighbors,SVM :",end="")
print(acc18)
| StarcoderdataPython |
11322 | <reponame>junoteam/TelegramBot<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- author: Alex -*-
from Centos6_Bit64 import *
from SystemUtils import *
# Checking version of OS should happened before menu appears
# Check version of CentOS
SystemUtils.check_centos_version()
# Clear screen before to show menu
os.system('clear')
answer = True
while answer:
print ("""
LAMP Deploy Script V: 0.1 for CentOS 6.5/6.6 64Bit:
---------------------------------------------------
1. Check version of your CentOS
2. Check Internet connection
3. Show me my local IP address
4. Open port 80 to Web
5. Show me my localhost name
------- LAMP for CentOS 6.x -----------
6. Install EPEL & IUS repository
7. Install Web Server - Apache
8. Install Database - MySQL
9. Install Language - PHP
10. Install LAMP in "One Click" - CentOS 6.x
11. Exit/Quit
""")
answer = input("Please make your choice: ")
if answer == 1:
os.system('clear')
print ('\nChecking version of the system: ')
SystemUtils.check_centos_version()
elif answer == 2:
os.system('clear')
print ('\nChecking if you connected to the Internet')
SystemUtils.check_internet_connection()
elif answer == 3:
os.system('clear')
print ('\nYour local IP address is: ' + SystemUtils.check_local_ip())
elif answer == 4:
os.system('clear')
print('\nChecking firewall')
Centos6Deploy.iptables_port()
elif answer == 5:
print "Checking local hostname..."
SystemUtils.check_host_name()
elif answer == 6:
print ('\nInstalling EPEL and IUS repository to the system...')
Centos6Deploy.add_repository()
elif answer == 7:
print ('\nInstalling Web Server Apache...')
Centos6Deploy.install_apache()
elif answer == 8:
print ('\nInstalling database MySQL...')
Centos6Deploy.install_mysql()
elif answer == 9:
print('\nInstalling PHP...')
Centos6Deploy.install_php()
elif answer == 10:
print ('Install LAMP in "One Click" - CentOS 6.x')
Centos6Deploy.iptables_port()
Centos6Deploy.add_repository()
Centos6Deploy.install_mysql()
Centos6Deploy.install_php()
elif answer == 11:
print("\nGoodbye...\n")
answer = None
else:
print ('\nNot valid Choice, Try Again')
answer = True | StarcoderdataPython |
1699963 | <reponame>sdpython/papierstat<filename>_unittests/ut_datasets/test_tweet.py
# -*- coding: utf-8 -*-
"""
@brief test log(time=13s)
"""
import unittest
from pyquickhelper.pycode import ExtTestCase, get_temp_folder
from papierstat.datasets import load_tweet_dataset
class TestTweet(ExtTestCase):
def test_tweets(self):
temp = get_temp_folder(__file__, "temp_tweets")
df = load_tweet_dataset(cache=temp)
self.assertEqual(df.shape, (5088, 20))
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
41135 | <gh_stars>1-10
import torch
import torch.nn as nn
class LeNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=(5, 5), padding=2, stride=1)
self.pool1 = nn.AvgPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=(5, 5), padding=0, stride=1)
self.pool2 = nn.AvgPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0)
self.linear1 = nn.Linear(in_features=400, out_features=120)
self.linear2 = nn.Linear(in_features=120, out_features=84)
self.output = nn.Linear(in_features=84, out_features=10)
self.activation = nn.Tanh()
for module in self.modules():
print(module)
if isinstance(module, nn.Module):
if (weight := getattr(module, "weight", None)) is not None:
self.dtype = weight.dtype
break
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.conv1(x)
x = self.pool1(x)
x = self.activation(x)
x = self.conv2(x)
x = self.pool2(x)
x = self.activation(x)
x = x.flatten(start_dim=1)
x = self.linear1(x)
x = self.activation(x)
x = self.linear2(x)
x = self.activation(x)
return self.output(x)
if __name__ == "__main__":
LeNet() | StarcoderdataPython |
1734258 | from watchdog.observers import Observer
from watchdog.watchmedo import observe_with
from leanpub.shellcommandtrick import ShellCommandTrick
def pandoc_cmd(book):
"""Create the command to convert the files (listed in `book`)
into a pdf. This is wrapped with echos that the build has started and
is complete."""
with open(book + ".txt") as f:
return ('echo "Starting build of {book}.pdf"'
" && pandoc {files} "
"-f markdown-smart --table-of-contents --top-level-division=chapter -o {book}.pdf"
' && echo " {book}.pdf created."'
).format(book=book,
files=f.read().replace("\n", " "))
try:
MAKE_BOOK = pandoc_cmd("Book")
except IOError:
print("Can't find Book.txt in directory.")
exit(1)
try:
MAKE_SAMPLE = pandoc_cmd("Sample")
except IOError:
# Sample.txt is optional.
MAKE_SAMPLE = ""
# TODO watch images
PATTERNS = ["*.markdown", "*.md", "Book.txt", "Sample.txt"]
DIRECTORIES = "."
RECURSIVE = False
TIMEOUT = 1.0
def watch():
"""Watch for changes to the markdown files, and build the book and the
sample pdf upon each change."""
handler = ShellCommandTrick(shell_command=MAKE_BOOK + " && " + MAKE_SAMPLE,
patterns=PATTERNS,
terminate_on_event=True)
observer = Observer(timeout=TIMEOUT)
observe_with(observer, handler, DIRECTORIES, RECURSIVE)
| StarcoderdataPython |
148659 | # This caused an error in py2 because cupy expect non-unicode str
# from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
from chainer import cuda
class NonbiasWeightDecay(object):
"""Weight decay only for non-bias parameters.
This hook can be used just like chainer.optimizer_hooks.WeightDecay except
that this hook does not apply weight decay to bias parameters.
This hook assumes that all the bias parameters have the name of "b". Any
parameter whose name is "b" is considered as a bias and excluded from
weight decay.
"""
name = 'NonbiasWeightDecay'
call_for_each_param = True
timing = 'pre'
def __init__(self, rate):
self.rate = rate
def __call__(self, rule, param):
if param.name == 'b':
return
p, g = param.array, param.grad
if p is None or g is None:
return
with cuda.get_device_from_array(p) as dev:
if int(dev) == -1:
g += self.rate * p
else:
kernel = cuda.elementwise(
'T p, T decay', 'T g', 'g += decay * p', 'weight_decay')
kernel(p, self.rate, g)
| StarcoderdataPython |
174653 | import numpy as np
from sklearn import model_selection
from sklearn.metrics import confusion_matrix, mean_squared_error
from sklearn import metrics
from sklearn import model_selection, metrics #Additional sklearn functions
from sklearn.metrics import accuracy_score,f1_score,roc_auc_score,log_loss
from sklearn.metrics import mean_squared_error,median_absolute_error,mean_absolute_error
from sklearn.metrics import classification_report, confusion_matrix,mean_squared_log_error
from sklearn.metrics import precision_recall_curve
from sklearn.model_selection import cross_val_score, StratifiedKFold, KFold
from sklearn.metrics import make_scorer
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import f1_score
from sklearn.metrics import log_loss
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
#####################################################################################
from sklearn.metrics import confusion_matrix
def balanced_accuracy_score(y_true, y_pred, sample_weight=None,
adjusted=False):
C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
with np.errstate(divide='ignore', invalid='ignore'):
per_class = np.diag(C) / C.sum(axis=1)
if np.any(np.isnan(per_class)):
warnings.warn('y_pred contains classes not in y_true')
per_class = per_class[~np.isnan(per_class)]
score = np.mean(per_class)
if adjusted:
n_classes = len(per_class)
chance = 1 / n_classes
score -= chance
score /= 1 - chance
return score
def accu(results, y_cv):
return (results==y_cv).astype(int).sum(axis=0)/(y_cv.shape[0])
def rmse(results, y_cv):
return np.sqrt(np.mean((results - y_cv)**2, axis=0))
######## Defining objective functions for HyperOpt here ######################
def gini(truth, predictions):
g = np.asarray(np.c_[truth, predictions, np.arange(len(truth)) ], dtype=np.float)
g = g[np.lexsort((g[:,2], -1*g[:,1]))]
gs = g[:,0].cumsum().sum() / g[:,0].sum()
gs -= (len(truth) + 1) / 2.
return gs / len(truth)
def gini_sklearn(truth, predictions):
return gini(truth, predictions) / gini(truth, truth)
def gini_meae(truth, predictions):
score = median_absolute_error(truth, predictions)
return score
def gini_msle(truth, predictions):
score = mean_squared_log_error(truth, predictions)
return score
def gini_mae(truth, predictions):
score = mean_absolute_error(truth, predictions)
return score
def gini_mse(truth, predictions):
score = mean_squared_error(truth, predictions)
return score
def gini_rmse(truth, predictions):
score = np.sqrt(mean_squared_error(truth, predictions))
return score
def gini_accuracy(truth, predictions):
return accuracy_score(truth, predictions)
def gini_bal_accuracy(truth, predictions):
try:
return balanced_accuracy_score(truth, predictions)
except:
return accuracy_score(truth, predictions)
def gini_roc(truth, predictions):
return roc_auc_score(truth, predictions)
def gini_precision(truth, predictions,pos_label=1):
return precision_score(truth, predictions,average=None)[pos_label]
def gini_average_precision(truth, predictions):
return average_precision_score(truth, predictions.argmax(axis=1),average='weighted')
def gini_weighted_precision(truth, predictions):
return precision_score(truth, predictions.argmax(axis=1),average='weighted')
def gini_macro_precision(truth, predictions):
return precision_score(truth, predictions.argmax(axis=1),average='macro')
def gini_micro_precision(truth, predictions):
return precision_score(truth, predictions.argmax(axis=1),average='micro')
def gini_samples_precision(truth, predictions):
return precision_score(truth, predictions.argmax(axis=1),average='samples')
def gini_f1(truth, predictions,pos_label=1):
return f1_score(truth, predictions,average=None)[pos_label]
def gini_weighted_f1(truth, predictions):
return f1_score(truth, predictions.argmax(axis=1),average='weighted')
def gini_macro_f1(truth, predictions):
return f1_score(truth, predictions.argmax(axis=1),average='macro')
def gini_micro_f1(truth, predictions):
return f1_score(truth, predictions.argmax(axis=1),average='micro')
def gini_samples_f1(truth, predictions):
return f1_score(truth, predictions.argmax(axis=1),average='samples')
def gini_log_loss(truth, predictions):
return log_loss(truth, predictions,normalize=True)
def gini_recall(truth, predictions,pos_label=1):
return recall_score(truth, predictions,average=None)[pos_label]
def gini_weighted_recall(truth, predictions):
return recall_score(truth, predictions.argmax(axis=1),average='weighted')
def gini_samples_recall(truth, predictions):
return recall_score(truth, predictions.argmax(axis=1),average='samples')
def gini_macro_recall(truth, predictions):
return recall_score(truth, predictions.argmax(axis=1),average='macro')
def gini_micro_recall(truth, predictions):
return recall_score(truth, predictions.argmax(axis=1),average='micro')
| StarcoderdataPython |
4814833 | import logging
from typing import Dict, List
from django.conf import settings
from apps.authentication.models import OnlineUser as User
from apps.gsuite.mail_syncer.utils import (
get_excess_groups_for_user,
get_excess_users_in_g_suite,
get_g_suite_users_for_group,
get_missing_g_suite_group_names_for_user,
get_missing_ow4_users_for_g_suite,
get_ow4_users_for_group,
insert_ow4_user_into_g_suite_group,
remove_g_suite_user_from_group,
)
logger = logging.getLogger(__name__)
def insert_ow4_users_into_g_suite(
domain: str,
group_name: str,
missing_users: List[Dict[str, str]],
suppress_http_errors: bool = False,
):
"""
Inserts a list of OW4 users into a G Suite group.
:param domain: The domain in which to insert a user into a group.
:param group_name: The name of the group to insert the user into.
:param missing_users: A list of the missing users to be inserted into said group.
:param suppress_http_errors: Whether or not to suppress HttpErrors happening during execution.
"""
for missing_user in missing_users:
insert_ow4_user_into_g_suite_group(
domain, group_name, missing_user, suppress_http_errors=suppress_http_errors
)
def remove_excess_g_suite_users(
domain: str,
group_name: str,
g_suite_excess_users: List[Dict[str, str]],
suppress_http_errors: bool = False,
):
"""
Removes excess users from a G Suite group.
:param domain: The domain in which to remove a user from a group.
:param group_name: The name of the group to remove the users from.
:param g_suite_excess_users: A list of the excess users to be removed from said group.
:param suppress_http_errors: Whether or not to suppress HttpErrors happening during execution.
"""
logger.info(
"Cleaning G Suite group '{group}'.".format(group=group_name),
extra={"group": group_name, "excess_users": g_suite_excess_users},
)
for excess_user in g_suite_excess_users:
resp = remove_g_suite_user_from_group(
domain, group_name, excess_user, suppress_http_errors=suppress_http_errors
)
logger.debug(f"Response from cleaning {group_name}: {resp}")
def insert_ow4_user_into_groups(
domain: str, user: User, group_names: List[str], suppress_http_errors: bool = False
):
"""
Inserts a single OW4 user into a G Suite group.
:param domain: The domain in which to insert a user into a group.
:param user: The user to update group memberships for.
:param group_names: A list of group names to insert the user into.
:param suppress_http_errors: Whether or not to suppress HttpErrors happening during execution.
"""
groups = [f"{group_name}@{domain}" for group_name in group_names]
if groups:
logger.info(
f"Inserting {user} into some new G Suite groups.",
extra={"new_groups": group_names, "user": user},
)
for group in groups:
insert_ow4_user_into_g_suite_group(
domain, group, user, suppress_http_errors=suppress_http_errors
)
def cleanup_groups_for_user(
domain: str, user: User, suppress_http_errors: bool = False
):
"""
Finds excess groups for a OW4 user, and removes the user from said groups.
:param domain: The domain in which to find a users excess group memberships.
:param user: The user to remove excess group memberships for.
:param suppress_http_errors: Whether or not to suppress HttpErrors happening during execution.
"""
excess_groups = get_excess_groups_for_user(domain, user)
if excess_groups:
logger.debug(
f'Removing "{user}" from some G Suite groups.',
extra={"user": user, "excess_groups": excess_groups},
)
for group in excess_groups:
remove_g_suite_user_from_group(
domain, group, user.online_mail, suppress_http_errors=suppress_http_errors
)
def update_g_suite_user(
domain: str, ow4_user: User, suppress_http_errors: bool = False
):
"""
Finds missing and excess groups and adds and removes the user to/from them, respectively.
:param domain: The domain in which to update a users group memberships.
:param ow4_user: The user to update group memberships for.
:param suppress_http_errors: Whether or not to suppress HttpErrors happening during execution.
"""
cleanup_groups_for_user(domain, ow4_user, suppress_http_errors=suppress_http_errors)
insert_ow4_user_into_groups(
domain,
ow4_user,
get_missing_g_suite_group_names_for_user(domain, ow4_user),
suppress_http_errors=suppress_http_errors,
)
def update_g_suite_group(
domain: str, group_name: str, suppress_http_errors: bool = False
):
"""
Finds missing and excess users and adds and removes the users to/from them, respectively.
:param domain: The domain in which to find a group's user lists.
:param group_name: The name of the group to get group membership status for.
:param suppress_http_errors: Whether or not to suppress HttpErrors happening during execution.
"""
if group_name.lower() not in settings.OW4_GSUITE_SYNC.get("GROUPS", {}).keys():
logger.debug(
f"Not running group syncer for group {group_name} - group syncing not enabled for this group"
)
return
g_suite_users = get_g_suite_users_for_group(
domain, group_name, suppress_http_errors=suppress_http_errors
)
ow4_users = get_ow4_users_for_group(group_name)
excess_users = get_excess_users_in_g_suite(g_suite_users, ow4_users)
missing_users = get_missing_ow4_users_for_g_suite(g_suite_users, ow4_users)
# @ToDo: Look into bulk updates
insert_ow4_users_into_g_suite(
domain, group_name, missing_users, suppress_http_errors=suppress_http_errors
)
remove_excess_g_suite_users(
domain, group_name, excess_users, suppress_http_errors=suppress_http_errors
)
| StarcoderdataPython |
1757004 |
# coding: utf-8
# In[1]:
#!/usr/bin/env python2
#Inspire du fichier train_fcn8.py
import os
import argparse
import time
from getpass import getuser
from distutils.dir_util import copy_tree
import pickle
import numpy as np
import random
import theano
import theano.tensor as T
from theano import config
import lasagne
from lasagne.regularization import regularize_network_params
from lasagne.objectives import categorical_crossentropy
import PIL.Image as Image
from matplotlib import pyplot as plt
from matplotlib import colors
from matplotlib import gridspec
#from fcn_1D_general import buildFCN_1D
from metrics import jaccard, accuracy, crossentropy, weighted_crossentropy
# from data_loader.cortical_layers import CorticalLayersDataset
from data_loader.cortical_layers_w_regions_kfold import CorticalLayersDataset
from simple_model_1path import build_simple_model
from profile_functions import profile2indices
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-nf','--n_filters', help='Number of filters', default=64, type=int)
parser.add_argument('-fs','--filter_size', help='Filter size', default=49, type=int)
parser.add_argument('-d','--depth', help='Number of layers', default=6, type=int)
parser.add_argument('-wd','--weight_decay', help='Weight decay', default=0.001, type=float)
parser.add_argument('-sp','--smooth_penalty', help='Smooth penalty', default=0.0, type=float)
parser.add_argument('-ne','--num_epochs', help='Number of epochs', default=500, type=int)
parser.add_argument('-p','--patience', help='Patience', default=50, type=int)
parser.add_argument('-lr','--learning_rate', help='Initial learning rate', default=0.0005, type=float)
parser.add_argument('-sr','--smooth_or_raw', help='Smooth or raw', default="both", type=str)
parser.add_argument('-nl','--number_layers', help='Number of layers of the dataset', default=6, type=int)
parser.add_argument('-kf','--k_fold', help='Number of folds', default=10, type=int)
parser.add_argument('-vf','--val_fold', help='Validation fold', default=0, type=int)
parser.add_argument('-tf','--test_fold', help='Test fold', default=1, type=int)
parser.add_argument('-bs','--batch_size', help='Batch size', default=1000, type=int)
args = parser.parse_args()
print(args)
# In[2]:
_FLOATX = config.floatX
SAVEPATH = '/data1/users/kwagstyl/bigbrain/datasets'
LOADPATH = '/data1/users/kwagstyl/bigbrain/datasets'
WEIGHTS_PATH = LOADPATH
# In[22]:
#Model hyperparameters
n_filters = args.n_filters # 64
filter_size = [args.filter_size] # [25]#[7,15,25,49]
depth = args.depth #8
data_augmentation={} #{'horizontal_flip': True, 'fill_mode':'constant'}
block = 'bn_relu_conv'
#Training loop hyperparameters
weight_decay= args.weight_decay # 0.001
smooth_penalty = args.smooth_penalty #0.005
num_epochs= args.num_epochs # 500
max_patience= args.patience # 50
resume=False
learning_rate_value = args.learning_rate # 0.0005 #learning rate is defined below as a theano variable.
#Hyperparameters for the dataset loader
batch_size=[args.batch_size,args.batch_size,1] # [1000, 1000, 1]
smooth_or_raw = args.smooth_or_raw # 'both'
shuffle_at_each_epoch = True
minibatches_subset = 0
n_layers = args.number_layers # 6
kfold = args.k_fold # 8
val_fold = args.val_fold # 0
test_fold = args.test_fold # 1
# In[4]:
#
# Prepare load/save directories
#
savepath=SAVEPATH
loadpath=LOADPATH
exp_name = 'simple_model'
exp_name += '_lrate=' + str(learning_rate_value)
exp_name += '_fil=' + str(n_filters)
exp_name += '_fsizes=' + str(filter_size)
exp_name += '_depth=' + str(depth)
exp_name += '_data=' + smooth_or_raw
exp_name += '_decay=' + str(weight_decay)
exp_name += '_smooth=' + str(smooth_penalty)
exp_name += '_pat=' + str(max_patience)
exp_name += '_kfold=' + str(kfold)
exp_name += '_val=' + str(val_fold)
exp_name += '_test=' + str(test_fold)
exp_name += '_batch_size=' + str(batch_size[0])
exp_name += ('_noshuffle'+str(minibatches_subset)+'batch') if not shuffle_at_each_epoch else ''
#exp_name += 'test'
dataset = str(n_layers)+'cortical_layers_all'
savepath = os.path.join(savepath, dataset, exp_name)
loadpath = os.path.join(loadpath, dataset, exp_name)
print 'Savepath : '
print savepath
print 'Loadpath : '
print loadpath
if not os.path.exists(savepath):
os.makedirs(savepath)
else:
print('\033[93m The following folder already exists {}. '
'It will be overwritten in a few seconds...\033[0m'.format(
savepath))
print('Saving directory : ' + savepath)
with open(os.path.join(savepath, "config.txt"), "w") as f:
for key, value in locals().items():
f.write('{} = {}\n'.format(key, value))
# In[ ]:
# In[5]:
#
# Define symbolic variables
#
input_var = T.tensor3('input_var') #n_example*nb_in_channels*ray_size
target_var = T.ivector('target_var') #n_example*ray_size
weight_vector = T.fvector('weight_vector')
learn_step= theano.shared(np.array(learning_rate_value, dtype=theano.config.floatX))
# In[21]:
#
# Build dataset iterator
#
if smooth_or_raw =='both':
nb_in_channels = 2
use_threads = False
else:
nb_in_channels = 1
use_threads = True
train_iter = CorticalLayersDataset(
which_set='train',
smooth_or_raw = smooth_or_raw,
batch_size=batch_size[0],
data_augm_kwargs=data_augmentation,
shuffle_at_each_epoch = shuffle_at_each_epoch,
return_one_hot=False,
return_01c=False,
return_list=False,
use_threads=use_threads,
preload=True,
n_layers=n_layers,
kfold=kfold, # if None, kfold = number of regions (so there is one fold per region)
val_fold=val_fold, # it will use the first fold for validation
test_fold=test_fold) # this fold will not be used to train nor to validate
val_iter = CorticalLayersDataset(
which_set='valid',
smooth_or_raw = smooth_or_raw,
batch_size=batch_size[1],
shuffle_at_each_epoch = shuffle_at_each_epoch,
return_one_hot=False,
return_01c=False,
return_list=False,
use_threads=use_threads,
preload=True,
n_layers=n_layers,
kfold=kfold, # if None, kfold = number of regions (so there is one fold per region)
val_fold=val_fold, # it will use the first fold for validation
test_fold=test_fold) # this fold will not be used to train nor to validate
test_iter = None
n_batches_train = train_iter.nbatches
n_batches_val = val_iter.nbatches
n_batches_test = test_iter.nbatches if test_iter is not None else 0
n_classes = train_iter.non_void_nclasses
void_labels = train_iter.void_labels
#
# Build network
#
simple_net_output, net = build_simple_model(input_var,
filter_size = filter_size,
n_filters = n_filters,
depth = depth,
block= block,
nb_in_channels = nb_in_channels,
n_classes = n_classes)
#simple_net_output = last layer of the simple_model net
#
# Define and compile theano functions
#
#get weights
#Class=np.loadtxt('6layers_segmentation/training_cls.txt')
Class=np.loadtxt('/data1/users/kwagstyl/bigbrain/datasets/6layers_segmentation/training_cls.txt')
def compute_class_weights(Class):
#get unique labels and number of pixels of each class
unique, counts = np.unique(Class,return_counts=True)
#calculate freq(c) number of pixels per class divided by the total number of pixels in images where c is present
freq=counts.astype(float)/Class.size
return np.median(freq)/freq
weights=compute_class_weights(Class)
# penalty to enforce smoothness
def smooth_convolution(prediction, n_classes):
from lasagne.layers import Conv1DLayer as ConvLayer
from lasagne.layers import DimshuffleLayer, ReshapeLayer
prediction = ReshapeLayer(prediction, (-1, 200, n_classes))
# channels first
prediction = DimshuffleLayer(prediction, (0,2,1))
input_size = lasagne.layers.get_output(prediction).shape
# reshape to put each channel in the batch dimensions, to filter each
# channel independently
prediction = ReshapeLayer(prediction, (T.prod(input_size[0:2]),1,input_size[2]))
trans_filter = np.tile(np.array([0,-1.,1.]).astype('float32'), (1,1,1))
convolved = ConvLayer(prediction,
num_filters = 1,
filter_size = 3,
stride=1,
b = None,
nonlinearity=None,
W = trans_filter,
pad='same')
# reshape back
convolved = ReshapeLayer(convolved, input_size)
return convolved
print "Defining and compiling training functions"
convolved = smooth_convolution(simple_net_output[0], n_classes)
prediction, convolved = lasagne.layers.get_output([simple_net_output[0], convolved])
#loss = categorical_crossentropy(prediction, target_var)
#loss = loss.mean()
loss = weighted_crossentropy(prediction, target_var, weight_vector)
loss = loss.mean()
if weight_decay > 0:
weightsl2 = regularize_network_params(
simple_net_output, lasagne.regularization.l2)
loss += weight_decay * weightsl2
if smooth_penalty > 0:
smooth_cost = T.sum(abs(convolved), axis=(1,2))
loss += smooth_penalty * smooth_cost.mean()
train_acc, train_sample_acc = accuracy(prediction, target_var, void_labels)
params = lasagne.layers.get_all_params(simple_net_output, trainable=True)
updates = lasagne.updates.adam(loss, params, learning_rate=learn_step)
train_fn = theano.function([input_var, target_var, weight_vector], [loss, train_acc, train_sample_acc, prediction],
updates=updates)#, profile=True)
print "Done"
# In[11]:
print "Defining and compiling valid functions"
valid_prediction = lasagne.layers.get_output(simple_net_output[0],
deterministic=True)
#valid_loss = categorical_crossentropy(valid_prediction, target_var)
#valid_loss = valid_loss.mean()
valid_loss = weighted_crossentropy(valid_prediction, target_var, weight_vector)
valid_loss = valid_loss.mean()
#valid_loss = crossentropy(valid_prediction, target_var, void_labels)
valid_acc, valid_sample_acc = accuracy(valid_prediction, target_var, void_labels)
valid_jacc = jaccard(valid_prediction, target_var, n_classes)
valid_fn = theano.function([input_var, target_var, weight_vector],
[valid_loss, valid_acc, valid_sample_acc, valid_jacc])#,profile=True)
print "Done"
#whether to plot labels prediction or not during training
#(1 random example of the last minibatch for each epoch)
plot_results_train = False #from the training set
plot_results_valid = False #from the validation set
treshold = 0.7 # for extracting the very incorrect labelled samples
ratios=[0.80,0.85, 0.90] #ratios for the per sample accuracy
# In[ ]:
# Train loop
#
err_train = []
acc_train = []
sample_acc_train_tot = []
worse_indices_train = []
already_seen_idx = []
err_valid = []
acc_valid = []
jacc_valid = []
sample_acc_valid_tot = []
patience = 0
worse_indices_valid =[]
# Training main loop
print "Start training"
for epoch in range(num_epochs):
#learn_step.set_value((learn_step.get_value()*0.99).astype(theano.config.floatX))
# Single epoch training and validation
start_time = time.time()
#Cost train and acc train for this epoch
cost_train_epoch = 0
acc_train_epoch = 0
sample_acc_train_epoch = np.array([0.0 for i in range(len(ratios))])
# worse_indices_train_epoch = []
for i in range(n_batches_train):
# Get minibatch (comment the next line if only 1 minibatch in training)
train_batch = train_iter.next()
X_train_batch, L_train_batch = train_batch['data'], train_batch['labels']
L_train_batch = np.reshape(L_train_batch, np.prod(L_train_batch.shape))
# Training step
cost_train_batch, acc_train_batch, sample_acc_train_batch, pred = train_fn(
X_train_batch, L_train_batch, weights[L_train_batch].astype('float32'))
sample_acc_train_batch_mean = [np.mean([(i>=ratio)
for i in sample_acc_train_batch]) for ratio in ratios]
# worse_indices_train_batch = index_worse_than(sample_acc_train_batch,
# idx_train_batch, treshold=treshold)
#print i, 'training batch cost : ', cost_train_batch, ' batch accuracy : ', acc_train_batch
#Update epoch results
cost_train_epoch += cost_train_batch
acc_train_epoch += acc_train_batch
sample_acc_train_epoch += sample_acc_train_batch_mean
# worse_indices_train_epoch = np.hstack((worse_indices_train_epoch,worse_indices_train_batch))
#Add epoch results
err_train += [cost_train_epoch/n_batches_train]
acc_train += [acc_train_epoch/n_batches_train]
sample_acc_train_tot += [sample_acc_train_epoch/n_batches_train]
# worse_indices_train += [worse_indices_train_epoch]
# Validation
cost_val_epoch = 0
acc_val_epoch = 0
sample_acc_valid_epoch = np.array([0.0 for i in range(len(ratios))])
jacc_val_epoch = np.zeros((2, n_classes))
# worse_indices_val_epoch = []
for i in range(n_batches_val):
# Get minibatch (comment the next line if only 1 minibatch in training)
val_batch = val_iter.next()
X_val_batch, L_val_batch = val_batch['data'], val_batch['labels']
L_val_batch = np.reshape(L_val_batch, np.prod(L_val_batch.shape))
# Validation step
cost_val_batch, acc_val_batch, sample_acc_valid_batch, jacc_val_batch = valid_fn(X_val_batch, L_val_batch, weights[L_val_batch].astype('float32'))
#print i, 'validation batch cost : ', cost_val_batch, ' batch accuracy : ', acc_val_batch
sample_acc_valid_batch_mean = [np.mean([(i>=ratio)
for i in sample_acc_valid_batch]) for ratio in ratios]
#Update epoch results
cost_val_epoch += cost_val_batch
acc_val_epoch += acc_val_batch
sample_acc_valid_epoch += sample_acc_valid_batch_mean
jacc_val_epoch += jacc_val_batch
# worse_indices_val_epoch = np.hstack((worse_indices_val_epoch, worse_indices_val_batch))
#
#Add epoch results
err_valid += [cost_val_epoch/n_batches_val]
acc_valid += [acc_val_epoch/n_batches_val]
sample_acc_valid_tot += [sample_acc_valid_epoch/n_batches_val]
jacc_perclass_valid = jacc_val_epoch[0, :] / jacc_val_epoch[1, :]
jacc_valid += [np.mean(jacc_perclass_valid)]
# worse_indices_valid += [worse_indices_val_epoch]
#Print results (once per epoch)
out_str = "EPOCH %i: Avg cost train %f, acc train %f"+ ", cost val %f, acc val %f, jacc val %f took %f s"
out_str = out_str % (epoch, err_train[epoch],
acc_train[epoch],
err_valid[epoch],
acc_valid[epoch],
jacc_valid[epoch],
time.time()-start_time)
out_str2 = 'Per sample accuracy (ratios ' + str(ratios) + ') '
out_str2 += ' train ' +str(sample_acc_train_tot[epoch])
out_str2 += ' valid ' + str(sample_acc_valid_tot[epoch])
print out_str
print out_str2
# Early stopping and saving stuff
with open(os.path.join(savepath, "fcn1D_output.log"), "a") as f:
f.write(out_str + "\n")
if epoch == 0:
best_jacc_val = jacc_valid[epoch]
elif epoch > 1 and jacc_valid[epoch] > best_jacc_val:
print('saving best (and last) model')
best_jacc_val = jacc_valid[epoch]
patience = 0
np.savez(os.path.join(savepath, 'new_fcn1D_model_best.npz'),
*lasagne.layers.get_all_param_values(simple_net_output))
np.savez(os.path.join(savepath , "fcn1D_errors_best.npz"),
err_train=err_train, acc_train=acc_train,
err_valid=err_valid, acc_valid=acc_valid, jacc_valid=jacc_valid)
np.savez(os.path.join(savepath, 'new_fcn1D_model_last.npz'),
*lasagne.layers.get_all_param_values(simple_net_output))
np.savez(os.path.join(savepath , "fcn1D_errors_last.npz"),
err_train=err_train, acc_train=acc_train,
err_valid=err_valid, acc_valid=acc_valid, jacc_valid=jacc_valid)
else:
patience += 1
print('saving last model')
np.savez(os.path.join(savepath, 'new_fcn1D_model_last.npz'),
*lasagne.layers.get_all_param_values(simple_net_output))
np.savez(os.path.join(savepath , "fcn1D_errors_last.npz"),
err_train=err_train, acc_train=acc_train,
err_valid=err_valid, acc_valid=acc_valid, jacc_valid=jacc_valid)
# Finish training if patience has expired or max nber of epochs reached
if patience == max_patience or epoch == num_epochs-1:
if savepath != loadpath:
print('Copying model and other training files to {}'.format(loadpath))
copy_tree(savepath, loadpath)
break
| StarcoderdataPython |
3268181 | <filename>controllers/project.py
# -*- coding: utf-8 -*-
"""
Project
@author: <NAME> (<EMAIL>)
@date-created: 2010-08-25
Project Management
"""
module = request.controller
response.menu_options = org_menu
#==============================================================================
# @ToDo: Create should be restricted to Admin
def activity_type():
"RESTful CRUD controller"
resource = request.function
tablename = "%s_%s" % (module, resource)
table = db[tablename]
return shn_rest_controller(module, resource, listadd=False)
#==============================================================================
def activity():
"RESTful CRUD controller"
resource = request.function
tablename = "%s_%s" % (module, resource)
table = db[tablename]
def postp(jr, output):
shn_action_buttons(jr)
return output
response.s3.postp = postp
return shn_rest_controller(module, resource) | StarcoderdataPython |
1686825 | <gh_stars>1000+
# -*- coding: utf-8 -*-
from django.test import TestCase
from django_dynamic_fixture import get, new
from readthedocs.builds.constants import (
BRANCH,
LATEST,
STABLE,
TAG,
EXTERNAL,
)
from readthedocs.builds.models import Version
from readthedocs.projects.constants import REPO_TYPE_GIT, REPO_TYPE_HG
from readthedocs.projects.models import Project
class VersionCommitNameTests(TestCase):
def test_branch_name_unicode_non_ascii(self):
unicode_name = b'abc_\xd1\x84_\xe2\x99\x98'.decode('utf-8')
version = new(Version, identifier=unicode_name, type=BRANCH)
self.assertEqual(version.identifier_friendly, unicode_name)
def test_branch_name_made_friendly_when_sha(self):
commit_hash = '3d92b728b7d7b842259ac2020c2fa389f13aff0d'
version = new(
Version, identifier=commit_hash,
slug=STABLE, verbose_name=STABLE, type=TAG,
)
# we shorten commit hashes to keep things readable
self.assertEqual(version.identifier_friendly, '3d92b728')
def test_branch_name(self):
version = new(
Version, identifier='release-2.5.x',
slug='release-2.5.x', verbose_name='release-2.5.x',
type=BRANCH,
)
self.assertEqual(version.commit_name, 'release-2.5.x')
def test_tag_name(self):
version = new(
Version, identifier='10f1b29a2bd2', slug='release-2.5.0',
verbose_name='release-2.5.0', type=TAG,
)
self.assertEqual(version.commit_name, 'release-2.5.0')
def test_branch_with_name_stable(self):
version = new(
Version, identifier='origin/stable', slug=STABLE,
verbose_name='stable', type=BRANCH,
)
self.assertEqual(version.commit_name, 'stable')
def test_stable_version_tag(self):
version = new(
Version,
identifier='3d92b728b7d7b842259ac2020c2fa389f13aff0d',
slug=STABLE, verbose_name=STABLE, type=TAG,
)
self.assertEqual(
version.commit_name,
'3d92b728b7d7b842259ac2020c2fa389f13aff0d',
)
def test_hg_latest_branch(self):
hg_project = get(Project, repo_type=REPO_TYPE_HG)
version = new(
Version, identifier='default', slug=LATEST,
verbose_name=LATEST, type=BRANCH, project=hg_project,
)
self.assertEqual(version.commit_name, 'default')
def test_git_latest_branch(self):
git_project = get(Project, repo_type=REPO_TYPE_GIT)
version = new(
Version, project=git_project,
identifier='origin/master', slug=LATEST,
verbose_name=LATEST, type=BRANCH,
)
self.assertEqual(version.commit_name, 'master')
def test_external_version(self):
identifier = 'ec26de721c3235aad62de7213c562f8c821'
version = new(
Version, identifier=identifier,
slug='11', verbose_name='11',
type=EXTERNAL,
)
self.assertEqual(version.commit_name, identifier)
| StarcoderdataPython |
1717717 | <gh_stars>1-10
from enum import IntEnum
import typer
class LinterLevel(IntEnum):
"""
Linter output severity levels. Rough definitions:
Notice: Likely not a problem, but worth noting.
Caution: May be a problem.
Warning: Likely a problem.
Failure: Should be considered a test failure.
(For repository maintainer use only.)
"""
Empty = 0
Notice = 1
Caution = 2
Warning = 3
Failure = 4
color_map = {
LinterLevel.Empty: typer.colors.WHITE,
LinterLevel.Notice: typer.colors.BLUE,
LinterLevel.Caution: typer.colors.GREEN,
LinterLevel.Warning: typer.colors.YELLOW,
LinterLevel.Failure: typer.colors.RED,
}
class LinterOutput():
@property
def level(self):
return self._level
@level.setter
def level(self, value: LinterLevel):
if not isinstance(value, LinterLevel):
raise TypeError('Severity level must be a valid LinterLevel')
self._level = value
@property
def title(self):
return self._title
@title.setter
def title(self, value: str):
self._title = value
# TODO: message and help_string get their indentation from the
# indentation of the linter itself. It should be made uniform in
# the setters.
@property
def message(self):
return self._message
@message.setter
def message(self, value: str):
self._message = value
@property
def help_string(self):
return self._help_string
@help_string.setter
def help_string(self, value: str):
self._help_string = value
def pretty_str(self, verbose: bool = True):
if self.level is LinterLevel.Empty:
raise ValueError('Cannot pretty print an empty LinterOutput')
out = typer.style('\n+ ', fg=typer.colors.MAGENTA)
out += typer.style('FAULT: ', fg=typer.colors.BRIGHT_MAGENTA)
out += typer.style(self.title, fg=typer.colors.BRIGHT_MAGENTA, bold=True)
out += typer.style('\n SEVERITY: ', fg=typer.colors.BRIGHT_MAGENTA)
out += typer.style(self.level.name, fg=color_map[self.level], bold=True)
out += typer.style('\n DETAILS:\n', fg=typer.colors.BRIGHT_MAGENTA)
out += typer.style(self.message, fg=typer.colors.WHITE)
if verbose:
out += typer.style('\n SUGGESTIONS:\n', fg=typer.colors.BRIGHT_MAGENTA)
out += typer.style(self.help_string, fg=typer.colors.WHITE)
return out
| StarcoderdataPython |
167109 | <gh_stars>0
#!/usr/bin/env initPy
import sys
import myProj.newYears as nye
nCount = int(sys.argv[1]) \
if len(sys.argv) > 1 else 10
nye.countdown(nCount)
| StarcoderdataPython |
98513 | import os
import requests
import json
import discord
from discord.ext import commands
client = commands.Bot(command_prefix=commands.when_mentioned_or("d/","D/"),help_command=None)
#api key references
my_secret = os.environ['TOKEN']
apiSecret = os.environ['apexApi']
#notify me when bot has come online
@client.event
async def on_ready():
print('bot is now online!!!!')
@client.event
async def on_message(message):
msg_content = message.content.lower()
#bot command
atCommandMap = ["!map"]
if any(word in msg_content for word in atCommandMap):
r = requests.get(f"https://api.mozambiquehe.re/maprotation?version=2&auth={apiSecret}")
if r is None:
print("api is not working")
else:
j=r.json()
#battleRoyale Maps
currentMapName = j["battle_royale"]["current"]["map"]
nextMapName = j["battle_royale"]["next"]["map"]
timeRemaining = j["battle_royale"]["current"]["remainingMins"]
await message.channel.send(f"Current Map: {currentMapName}\nNext Map: {nextMapName}\nTime Remaining: {timeRemaining}")
client.run(my_secret)
| StarcoderdataPython |
196758 | # ----------------------------------------------------------------------
# Test core.clickhouse package
# ----------------------------------------------------------------------
# Copyright (C) 2007-2021 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
import datetime
# Third-party modules
import pytest
# NOC modules
from noc.core.clickhouse.model import Model, NestedModel
from noc.core.clickhouse.fields import StringField, Int8Field, NestedField, DateField
class Pair(NestedModel):
index = Int8Field()
text = StringField()
class MyModel(Model):
class Meta(object):
db_table = "mymodel"
date = DateField()
text = StringField()
pairs = NestedField(Pair)
@pytest.mark.parametrize(
"data,expected",
[
(
{
"date": datetime.date(year=2019, month=9, day=26),
"text": "Test",
"pairs": [{"index": 1, "text": "First"}, {"index": "2", "text": "Second"}],
},
{
"date": "2019-09-26",
"pairs.index": [1, 2],
"pairs.text": ["First", "Second"],
"text": "Test",
},
)
],
)
def test_to_json(data, expected):
assert MyModel.to_json(**data) == expected
@pytest.mark.xfail
def test_mymodel_to_python():
# Check TSV conversion
today = datetime.date.today()
ch_data = MyModel.to_python([today.isoformat(), "Test", "1:'First',2:'Second'"])
valid_data = {
"date": today,
"text": "Test",
"pairs": [{"index": 1, "text": "First"}, {"index": 2, "text": "Second"}],
}
assert ch_data == valid_data
| StarcoderdataPython |
4808928 | <filename>vision/ssd/config/vgg_ssd_config640.py
import numpy as np
from vision.utils.box_utils import SSDSpec, SSDBoxSizes, generate_ssd_priors
image_size = 640
image_mean = np.array([123, 117, 104]) # RGB layout
image_std = 1.0
iou_threshold = 0.45
center_variance = 0.1
size_variance = 0.2
specs = [
SSDSpec(80, 8, SSDBoxSizes(15, 30), [2, 3]),
SSDSpec(40, 16, SSDBoxSizes(30, 60), [2, 3]),
SSDSpec(20, 32, SSDBoxSizes(60, 105), [2, 3]),
SSDSpec(10, 64, SSDBoxSizes(105, 150), [2, 3]),
SSDSpec(8, 80, SSDBoxSizes(150, 200), [2, 3]),
SSDSpec(6, 107, SSDBoxSizes(250, 340), [2, 3])
]
#specs = [
# SSDSpec(38, 16, SSDBoxSizes(15, 30), [1, 2]),
# SSDSpec(19, 32, SSDBoxSizes(30, 60), [1, 2]),
# SSDSpec(10, 64, SSDBoxSizes(60, 105), [1, 2]),
# SSDSpec(5, 100, SSDBoxSizes(105, 150), [1, 2]),
# SSDSpec(3, 150, SSDBoxSizes(150, 195), [1, 2]),
# SSDSpec(1, 300, SSDBoxSizes(195, 240), [1, 2])
#]
#specs = [ # index-2
# SSDSpec(38, 8, SSDBoxSizes(15, 30), [2]),
# SSDSpec(19, 16, SSDBoxSizes(60, 111), [2, 3]),
# SSDSpec(10, 32, SSDBoxSizes(111, 162), [2, 3]),
# SSDSpec(5, 64, SSDBoxSizes(162, 213), [2, 3]),
# SSDSpec(3, 100, SSDBoxSizes(213, 264), [2]),
# SSDSpec(1, 300, SSDBoxSizes(264, 315), [2])
#]
# orig
#specs = [
# SSDSpec(38, 8, SSDBoxSizes(30, 60), [2]),
# SSDSpec(19, 16, SSDBoxSizes(60, 111), [2, 3]),
# SSDSpec(10, 32, SSDBoxSizes(111, 162), [2, 3]),
# SSDSpec(5, 64, SSDBoxSizes(162, 213), [2, 3]),
# SSDSpec(3, 100, SSDBoxSizes(213, 264), [2]),
# SSDSpec(1, 300, SSDBoxSizes(264, 315), [2])
#]
priors = generate_ssd_priors(specs, image_size)
| StarcoderdataPython |
1792891 | # Copyright 2014 CloudFounders NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import subprocess
from ovs.log.logHandler import LogHandler
logger = LogHandler('extensions', name='exportfs')
class Nfsexports(object):
"""
Basic management for /etc/exports
"""
def __init__(self):
self._exportsFile = '/etc/exports'
self._cmd = ['/usr/bin/sudo', '-u', 'root', '/usr/sbin/exportfs']
self._restart = ['/usr/bin/sudo', '-u', 'root', '/usr/sbin/exportfs', '-ra']
self._rpcmountd_stop = ['/usr/bin/sudo', '-u', 'root', 'pkill', 'rpc.mountd']
self._rpcmountd_start = ['/usr/bin/sudo', '-u', 'root', '/usr/sbin/rpc.mountd', '--manage-gids']
def _slurp(self):
"""
Read from /etc/exports
"""
f = open(self._exportsFile, 'r')
dlist = []
for line in f:
if not re.match('^\s*$', line):
dlist.append(line)
f.close()
dlist = [i.strip() for i in dlist if not i.startswith('#')]
dlist = [re.split('\s+|\(|\)', i) for i in dlist]
keys = ['dir', 'network', 'params']
ldict = [dict(zip(keys, line)) for line in dlist]
return ldict
def add(self, directory, network, params):
"""
Add entry to /etc/exports
@param directory: directory to export
@param network: network range allowed
@param params: params for export (eg, 'ro,async,no_root_squash,no_subtree_check')
"""
l = self._slurp()
for i in l:
if i['dir'] == directory:
logger.info('Directory already exported, to export with different params please first remove')
return
f = open(self._exportsFile, 'a')
f.write('%s %s(%s)\n' % (directory, network, params))
f.close()
def remove(self, directory):
"""
Remove entry from /etc/exports
"""
l = self._slurp()
for i in l:
if i['dir'] == directory:
l.remove(i)
f = open(self._exportsFile, 'w')
for i in l:
f.write("%s %s(%s) \n" % (i['dir'], i['network'], i['params']))
f.close()
return
def list_exported(self):
"""
List the current exported filesystems
"""
exports = {}
output = subprocess.check_output(self._cmd)
for export in re.finditer('(\S+?)[\s\n]+(\S+)\n?', output):
exports[export.group(1)] = export.group(2)
return exports
def unexport(self, directory):
"""
Unexport a filesystem
"""
cmd = list(self._cmd)
exports = self.list_exported()
if not directory in exports.keys():
logger.info('Directory %s currently not exported' % directory)
return
logger.info('Unexporting {}:{}'.format(exports[directory] if exports[directory] != '<world>' else '*', directory))
cmd.extend(['-u', '{}:{}'.format(exports[directory] if exports[directory] != '<world>' else '*', directory)])
subprocess.call(cmd)
def export(self, directory, network='*'):
"""
Export a filesystem
"""
cmd = list(self._cmd)
exports = self.list_exported()
if directory in exports.keys():
logger.info('Directory already exported with options %s' % exports[directory])
return
logger.info('Exporting {}:{}'.format(network, directory))
cmd.extend(['-v', '{}:{}'.format(network, directory)])
subprocess.call(cmd)
subprocess.call(self._restart)
def trigger_rpc_mountd(self):
subprocess.call(self._rpcmountd_stop)
subprocess.call(self._rpcmountd_start)
| StarcoderdataPython |
1671326 | <reponame>ingmarschuster/JaxRK<gh_stars>0
from ..core.typing import Array
import jax.numpy as np
import numpy.random as random
def inv_blockmatr(P:Array, P_inv:Array, Q:Array, R:Array, S:Array) -> Array:
"""Given P and P^{-1}, compute the inverse of the block-partitioned matrix
P Q
R S
and return it. Based on Woodbury, Sherman & Morrison formula.
Args:
P (Array): Upper left matrix.
P_inv (Array): Inverse of upper left matrix.
Q (Array): Upper right matrix.
R (Array): Lower left matrix.
S (Array): Lower right matrix.
Returns:
Array: Inverse of the block matrix [[P, Q], [R, S]]
"""
S_ = np.linalg.inv(S - R @ P_inv @ Q)
R_ = -S_ @ R @ P_inv
Q_ = -P_inv @ Q @ S_
P_ = P_inv + P_inv @ Q @ S_ @ R @ P_inv
return np.vstack([np.hstack([P_, Q_]), np.hstack([R_, S_])])
# FIXME: implement Cholesky up/downdates: see
# https://en.wikipedia.org/wiki/Cholesky_decomposition#Adding_and_removing_rows_and_columns
# and
# https://math.stackexchange.com/questions/955874/cholesky-factor-when-adding-a-row-and-column-to-already-factorized-matrix | StarcoderdataPython |
1764863 | import unittest
from app.models import User, Post, Comment
class CommentModelTest(unittest.TestCase):
def setUp(self):
self.user_James = User(username='James', password='<PASSWORD>', email='<EMAIL>')
self.new_post = Post(id=1, post_title='Test', post_content='This is a test post', category="interview",
user=self.user_James, likes=0, dislikes=0)
self.new_comment = Comment(id=1, comment='Test comment', user=self.user_James, post=self.new_post)
def tearDown(self):
Post.query.delete()
User.query.delete()
def test_check_instance_variables(self):
self.assertEquals(self.new_comment.comment, 'Test comment')
self.assertEquals(self.new_comment.user, self.user_James)
self.assertEquals(self.new_comment.post, self.new_post)
| StarcoderdataPython |
1765590 | # -*- coding: utf-8- -*-
from OOPHerySchool.school import Student,Tesla,SpecialStudent,Teacher
from OOPHerySchool.newschool import Test | StarcoderdataPython |
3334315 | <filename>agents/mem_net.py
'''
Implementation of augmented memory network
'''
import torch
import torch.nn as nn
from torchvision import models
import numpy as np
import torch.nn.functional as F
from torch.nn.parameter import Parameter
class Net(nn.Module):
def __init__(self, MemNumSlots, MemFeatSz, model_config):
super(Net, self).__init__()
self.config = model_config
self.batch_size = -1 #will be overwritten later
self.memfeat = MemFeatSz
self.memslots = MemNumSlots
self.focus_beta = self.config['mem_focus_beta'] #focus on content
self.sharp_gamma = 1 #focus on locations
# "ResNet18" defaults to squeezenet for backwards compatibility with older code
if self.config["model_name"] == "SqueezeNet" or self.config["model_name"] == "ResNet18":
self.compressedChannel = 512
self.origsz = 13
self.cutlayer = 12
# Load pretrained squeezenet model
self.model = models.squeezenet1_0(pretrained = self.config['pretrained'])
self.model.classifier[1] = nn.Conv2d(self.compressedChannel,self.config['n_class'], (3, 3), stride=(1, 1), padding=(1, 1))
# freezing weights for feature extraction if desired
for param in self.model.parameters():
param.requires_grad = True
#print(self.model)
# Remove last two layers: adaptive pool + fc layers
self.FeatureExtractor = torch.nn.Sequential(*(list(self.model.features)[:self.cutlayer]))
# freezing weights for feature extraction if desired
if self.config['freeze_feature_extract']:
for param in self.FeatureExtractor.parameters():
param.requires_grad = False
self.block = torch.nn.Sequential(*(list(self.model.features)[self.cutlayer:]),
self.model.classifier, torch.nn.Flatten())
elif self.config["model_name"] == "MobileNet":
self.compressedChannel = 64
self.origsz = 14
self.cutlayer = 8
self.model = models.mobilenet_v2(pretrained = self.config['pretrained'])
self.model.classifier[1] = nn.Linear(1280,self.config['n_class'])
# freezing weights for feature extraction if desired
for param in self.model.parameters():
param.requires_grad = True
#print(self.model)
self.FeatureExtractor = torch.nn.Sequential(*(list(self.model.features)[:self.cutlayer]))
# freezing weights for feature extraction if desired
if self.config['freeze_feature_extract']:
for param in self.FeatureExtractor.parameters():
param.requires_grad = False
self.avgpool = torch.nn.Sequential(nn.AvgPool2d(kernel_size=7, stride=1), nn.Flatten()).cuda()
self.block = torch.nn.Sequential(*(list(self.model.features)[self.cutlayer:]),
self.avgpool,
self.model.classifier)
else:
raise ValueError("Invalid model name. Use 'SqueezeNet' or 'MobileNet'")
#print(self.block)
#self.memory = nn.Linear(self.memfeat, self.memfeat, False)
self.memory = Parameter(torch.randn(MemNumSlots, self.memfeat))
#self.memoryD = Parameter(torch.randn(MemNumSlots, self.memfeat))
if self.config['freeze_memory']:
self.memory.requires_grad = False
#self.memoryD.requires_grad = False
else:
self.memory.requires_grad = True
#self.memoryD.requires_grad = True
#self.relu = nn.ReLU()
#self.sigmoid = nn.Sigmoid()
def forward(self,x):
self.batch_size = x.size(0)
#print("-----------")
#print(x.shape)
extracted = self.FeatureExtractor(x)
#print(extracted.shape)
#x = extracted.view(-1,512,13,1,13).repeat(1,1,1,self.memslots,1)
x = extracted.view(-1, self.compressedChannel, self.origsz, self.origsz) # dim=3
x = x.permute(0, 2, 3, 1)
assert self.compressedChannel % self.memfeat == 0, "Parameter memory_Nfeat must be a factor of " + str(self.compressedChannel)
x = x.view(-1, self.origsz, self.origsz, int(self.compressedChannel/self.memfeat), self.memfeat) # dim=4 (Morgan: I think it's 5-dimensional)
# self.memory = self.sigmoid(self.memory)
att_read = self._similarity(x, self.focus_beta, self.memory)
att_read = self._sharpen(att_read, self.sharp_gamma)
# att_read = self.sigmoid(att_read-1)
# print(att_read[0,0,:])
# read = F.linear(att_read, self.memory)
read = att_read.matmul(self.memory)
read = read.view(-1, self.origsz, self.origsz, self.compressedChannel).permute(0, 3, 1, 2)
read = read.view(-1, self.compressedChannel, self.origsz, self.origsz)
direct = self.block(extracted)
out = self.block(read)
return direct, out, att_read, read, extracted
def forward_woMem(self, x):
self.batch_size = x.size(0)
extracted = self.FeatureExtractor(x)
direct = self.block(extracted)
return direct
def forward_attonly(self, read):
#att_read = att_read.view(-1,13,13,64, self.memslots)
#att_read = F.softmax(att_read,dim=3)
#read = att_read.matmul(self.memory)
#read = F.linear(att_read, self.memory)
read = read.view(-1, self.origsz, self.origsz, self.compressedChannel).permute(0, 3, 1, 2)
#read = read.view(-1, self.compressedChannel,self.origsz,self.origsz)
out = self.block(read)
return out, read
def forward_directonly(self, extracted):
extracted = extracted.view(-1,self.compressedChannel, self.origsz, self.origsz)
out = self.block(extracted)
return out
def _similarity(self, key, focus_beta, memory):
#key = key.view(self.batch_size, 1, -1)
#print(key.shape)
#print(memory.shape)
simmat = key.matmul( memory.t())
#simmat = F.cosine_similarity(memory + 1e-16, key + 1e-16, dim=-1)
w = F.softmax(focus_beta * simmat, dim=4)
return w
def _sharpen(self, wc, sharp_gamma):
w = wc ** sharp_gamma
#print(w.shape)
w = torch.div(w, torch.sum(w, dim=4).unsqueeze(4)+ 1e-16)
return w
def evalModeOn(self):
self.eval()
self.FeatureExtractor.eval()
self.block.eval()
self.memory.requires_grad = False
#self.memory.eval()
def trainModeOn(self):
self.train()
self.FeatureExtractor.train()
self.block.train()
if self.config['freeze_feature_extract']:
for param in self.FeatureExtractor.parameters():
param.requires_grad = False
self.memory.requires_grad = False
def trainMemoryOn(self):
self.train()
self.FeatureExtractor.train()
self.block.train()
for param in self.FeatureExtractor.parameters():
param.requires_grad = False
for param in self.block.parameters():
param.requires_grad = True
self.memory.requires_grad = True
def trainOverallOn(self):
self.train()
self.FeatureExtractor.train()
self.block.train()
if self.config['freeze_feature_extract']:
for param in self.FeatureExtractor.parameters():
param.requires_grad = False
for param in self.block.parameters():
param.requires_grad = True
if self.config['freeze_memory']:
self.memory.requires_grad = False
#self.memoryD.requires_grad = False
else:
self.memory.requires_grad = True
| StarcoderdataPython |
96421 | # Copyright 2017 Brocade Communications Systems, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
IANA Private Enterprise Numbers
See:
https://www.iana.org/assignments/enterprise-numbers/enterprise-numbers
"""
# Brocade Private Enterprise Number (PEN)
# see: http://www.iana.org/assignments/enterprise-numbers/enterprise-numbers
# Brocade Communications Systems, Inc.
# <NAME>
# <EMAIL>
BROCADE_PEN = 1588 #todo add other Brocade PEN values?
#todo switch to a dict approach to avoid duplication
#todo add all PEN values?
VALID_PENS = { BROCADE_PEN }
#todo check type on all fns
def assert_valid_pen( pen ):
assert pen in VALID_PENS
| StarcoderdataPython |
3235101 | #!/usr/bin/env python3
from deid.dicom import get_files, replace_identifiers
from deid.data import get_dataset
# This is an example of replacing fields in dicom headers,
# but via a function instead of a preset identifier.
# This will get a set of example cookie dicoms
base = get_dataset("dicom-cookies")
dicom_files = list(get_files(base)) # todo : consider using generator functionality
# This is the function to get identifiers
from deid.dicom import get_identifiers
items = get_identifiers(dicom_files)
# **
# The function performs an action to generate a uid, but you can also use
# it to communicate with databases, APIs, or do something like
# save the original (and newly generated one) in some (IRB approvied) place
# **
################################################################################
# The Deid Recipe
#
# The process of updating header values means writing a series of actions
# in the deid recipe, in this folder the file "deid.dicom" that has the
# following content:
#
# FORMAT dicom
# %header
# REPLACE StudyInstanceUID func:generate_uid
# REPLACE SeriesInstanceUID func:generate_uid
# ADD FrameOfReferenceUID func:generate_uid
#
# In the above we are saying we want to replace the fields above with the
# output from the generate_uid function, which is expected in the item dict
##################################
# Create the DeidRecipe Instance from deid.dicom
from deid.config import DeidRecipe
recipe = DeidRecipe("deid.dicom")
# To see an entire (raw in a dictionary) recipe just look at
recipe.deid
# What is the format?
recipe.get_format()
# dicom
# What actions do we want to do on the header?
recipe.get_actions()
"""
[{'action': 'REPLACE',
'field': 'StudyInstanceUID',
'value': 'func:generate_uid'},
{'action': 'REPLACE',
'field': 'SeriesInstanceUID',
'value': 'func:generate_uid'},
{'action': 'REPLACE',
'field': 'FrameOfReferenceUID',
'value': 'func:generate_uid'}]
"""
# We can filter to an action type (not useful here, we only have one type)
recipe.get_actions(action="REPLACE")
# or we can filter to a field
recipe.get_actions(field="FrameOfReferenceUID")
"""
[{'action': 'REPLACE',
'field': 'FrameOfReferenceUID',
'value': 'func:generate_uid'}]
"""
# and logically, both (not useful here)
recipe.get_actions(field="PatientID", action="REMOVE")
# Here we need to update each item with the function we want to use!
def generate_uid(item, value, field, dicom):
"""This function will generate a dicom uid! You can expect it to be passed
the dictionary of items extracted from the dicom (and your function)
and variables, the original value (func:generate_uid) and the field
object you are applying it to.
"""
import uuid
# a field can either be just the name string, or a DicomElement
if hasattr(field, "name"):
field = field.name
# Your organization should have it's own DICOM ORG ROOT.
# For the purpose of an example, borrowing PYMEDPHYS_ROOT_UID.
#
# When using a UUID to dynamically create a UID (e.g. SOPInstanceUID),
# the root '2.25' can be used instead of an organization's root.
# For more information see DICOM PS3.5 2020b B.2
ORG_ROOT = "1.2.826.0.1.3680043.10.188" # e.g. PYMEDPHYS_ROOT_UID
prefix = field.lower().replace(" ", " ")
bigint_uid = str(uuid.uuid4().int)
full_uid = ORG_ROOT + "." + bigint_uid
sliced_uid = full_uid[0:64] # A DICOM UID is limited to 64 characters
return prefix + "-" + sliced_uid
# Remember, the action is:
# REPLACE StudyInstanceUID func:generate_uid
# so the key needs to be generate_uid
for item in items:
items[item]["generate_uid"] = generate_uid
# Now let's generate the cleaned files! It will output to a temporary directory
# And then use the deid recipe and updated to create new files
cleaned_files = replace_identifiers(dicom_files=dicom_files, deid=recipe, ids=items)
# Print a cleaned file
print(cleaned_files[0])
| StarcoderdataPython |
184391 | from typing import List, Any, Dict
import numpy as np
from swd.bonuses import BONUSES, ImmediateBonus, SCIENTIFIC_SYMBOLS_RANGE
from swd.cards_board import AGES, CardsBoard
from swd.entity_manager import EntityManager
from swd.game import Game, GameState
from swd.player import Player
class StateFeatures:
@staticmethod
def extract_state_features(state: GameState) -> List[int]:
features = [
state.age,
state.current_player_index
]
features.extend([int(x in state.progress_tokens) for x in EntityManager.progress_token_names()])
# features.extend([int(x in state.discard_pile) for x in range(EntityManager.cards_count())])
# features.append(int(state.is_double_turn))
for player_state in state.players_state:
features.append(player_state.coins)
unbuilt_wonders = [x[0] for x in player_state.wonders if x[1] is None]
features.extend([int(x in unbuilt_wonders) for x in range(EntityManager.wonders_count())])
features.extend(list(player_state.bonuses))
features.append(state.military_track_state.conflict_pawn)
features.extend(list(state.military_track_state.military_tokens))
features.append(state.game_status.value)
# features.extend(list(state.cards_board_state.card_places.flat))
indices = np.flip(AGES[state.age] > 0, axis=0)
features.extend(list(np.flip(state.cards_board_state.card_places, axis=0)[indices]))
return features
@staticmethod
def extract_state_features_dict(state: GameState) -> Dict[str, Any]:
features = {
"age": state.age,
"current_player": state.current_player_index,
"tokens": [int(x in state.progress_tokens) for x in EntityManager.progress_token_names()],
"discard_pile": state.discard_pile,
"military_pawn": state.military_track_state.conflict_pawn,
"military_tokens": list(state.military_track_state.military_tokens),
"game_status": state.game_status.value,
"players": []
}
for player_state in state.players_state:
unbuilt_wonders = [x[0] for x in player_state.wonders if x[1] is None]
player = {
"coins": player_state.coins,
"unbuilt_wonders": [int(x in unbuilt_wonders) for x in range(EntityManager.wonders_count())],
"bonuses": list(player_state.bonuses)
}
features["players"].append(player)
indices = np.flip(AGES[state.age] > 0, axis=0)
available_cards = CardsBoard.available_cards(state.cards_board_state)
features["cards_board"] = list(np.flip(state.cards_board_state.card_places, axis=0)[indices])
features["available_cards"] = list(map(lambda x: x[0], available_cards))
return features
@staticmethod
def extract_manual_state_features(state: GameState) -> List[int]:
features = []
features.extend([int(x in state.progress_tokens) for x in EntityManager.progress_token_names()])
for i, player_state in enumerate(state.players_state):
features.append(player_state.coins)
features.extend(list(Game.points(state, i)))
unbuilt_wonders = [x[0] for x in player_state.wonders if x[1] is None]
features.append(len(unbuilt_wonders))
if player_state.bonuses[BONUSES.index("theology")] > 0:
features.append(len(unbuilt_wonders))
else:
features.append(len([x for x in unbuilt_wonders
if ImmediateBonus.DOUBLE_TURN in EntityManager.wonder(x).immediate_bonus]))
assets = Player.assets(player_state, Player.resources(state.players_state[1 - i]), None)
features.extend(list(assets.resources))
features.extend(list(assets.resources_cost))
features.append(np.count_nonzero(player_state.bonuses[SCIENTIFIC_SYMBOLS_RANGE]))
features.append(state.military_track_state.conflict_pawn)
available_cards = [x[0] for x in CardsBoard.available_cards(state.cards_board_state)]
features.extend([int(card_id in available_cards) for card_id in range(EntityManager.cards_count())])
return features
| StarcoderdataPython |
1756504 | <gh_stars>1-10
# THIS FILE IS FOR CHECKING IF THE CODE RUNS WHEN THE TEST IMAGE WILL HAVE MORE THAN ONE FACE
from cv2 import cv2
import face_recognition
# 1ST IMAGE
imgJohnny = face_recognition.load_image_file('assets/johnny-depp.jpg')
imgJohnny= cv2.cvtColor(imgJohnny, cv2.COLOR_BGR2RGB)
johnnyLocation = face_recognition.face_locations(imgJohnny)[0]
johnnyEncode = face_recognition.face_encodings(imgJohnny)[0]
# 2ND IMAGE
imgJohnnytest = face_recognition.load_image_file('testAssets/johnny-depp-and-robert.jpg')
imgJohnnytest = cv2.cvtColor(imgJohnnytest, cv2.COLOR_BGR2RGB)
faces = face_recognition.face_locations(imgJohnnytest)
len = len(faces)
for i in range(len):
johnnytestLocation = face_recognition.face_locations(imgJohnnytest)[i]
johnnytestEncode = face_recognition.face_encodings(imgJohnnytest)[i]
result = face_recognition.compare_faces([johnnyEncode],johnnytestEncode)
if(result[0]):
cv2.rectangle(imgJohnny, (johnnyLocation[3], johnnyLocation[0]), (johnnyLocation[1], johnnyLocation[2]), (0, 255, 0), 2)
cv2.rectangle(imgJohnnytest, (johnnytestLocation[3], johnnytestLocation[0]), (johnnytestLocation[1], johnnytestLocation[2]), (0, 255, 0), 2)
else:
cv2.rectangle(imgJohnny, (johnnyLocation[3], johnnyLocation[0]), (johnnyLocation[1], johnnyLocation[2]), (0, 255, 0), 2)
cv2.rectangle(imgJohnnytest, (johnnytestLocation[3], johnnytestLocation[0]), (johnnytestLocation[1], johnnytestLocation[2]), (0, 0, 255), 2)
# SHOWING THE FINAL IMAGE WITH GREEN RECTANGLE
cv2.imshow('<NAME>', imgJohnny)
cv2.imshow('<NAME>', imgJohnnytest)
cv2.waitKey(0)
| StarcoderdataPython |
53241 | <filename>runtests.py
#!/usr/bin/env python
import os
import sys
os.environ['DJANGO_SETTINGS_MODULE'] = 'example.settings'
exampleproject_dir = os.path.join(os.path.dirname(__file__), 'example')
sys.path.insert(0, exampleproject_dir)
from django.test.utils import get_runner
from django.conf import settings
def runtests(tests=('blog', 'djadmin2',)):
'''
Takes a list as first argument, enumerating the apps and specific testcases
that should be executed. The syntax is the same as for what you would pass
to the ``django-admin.py test`` command.
Examples::
# run the default test suite
runtests()
# only run the tests from application ``blog``
runtests(['blog'])
# only run testcase class ``Admin2Test`` from app ``djadmin2``
runtests(['djadmin2.Admin2Test'])
# run all tests from application ``blog`` and the test named
# ``test_register`` on the ``djadmin2.Admin2Test`` testcase.
runtests(['djadmin2.Admin2Test.test_register', 'blog'])
'''
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=1, interactive=True)
failures = test_runner.run_tests(tests)
sys.exit(bool(failures))
if __name__ == '__main__':
if len(sys.argv) > 1:
tests = sys.argv[1:]
runtests(tests)
else:
runtests()
| StarcoderdataPython |
4837252 | class Solution:
def solve(self, heights, k):
pq = []
ans = []
for i in range(len(heights)-1,-1,-1):
while pq and pq[0][1] > i+k: heappop(pq)
if not pq or heights[i] > -pq[0][0]: ans.append(i)
heappush(pq, [-heights[i],i])
return sorted(ans)
| StarcoderdataPython |
1693158 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from scipy import misc
import tensorflow as tf
import numpy as np
import sys
import os
import argparse
import align.detect_face
import glob
from pdb import set_trace as bp
from six.moves import xrange
from dataset.dataset_helpers import *
import torch
from torch.utils import data
from torchvision import transforms as T
import torchvision
from PIL import Image
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
from models.resnet import *
from models.irse import *
from helpers import *
"""
#################################################################################
#################################################################################
#################################################################################
ARCFACE LOSS MS1-Celeb
#################################################################################
python3 app/export_embeddings_npy.py ./pth/IR_50_MODEL_arcface_ms1celeb_epoch90_lfw9962.pth ./data/golovan_112/ \
--mean_per_class 1 \
--is_aligned 1 \
--with_demo_images 1 \
--image_size 112 \
--image_batch 5 \
--embeddings_name embeddings_arcface_1.npy \
--labels_strings_array labels_strings_arcface_1.npy
"""
class FacesDataset(data.Dataset):
def __init__(self, image_list, label_list, names_list, num_classes, is_aligned, image_size, margin, gpu_memory_fraction, demo_images_path=None):
self.image_list = image_list
self.label_list = label_list
self.names_list = names_list
self.num_classes = num_classes
self.is_aligned = is_aligned
self.demo_images_path = demo_images_path
self.image_size = image_size
self.margin = margin
self.gpu_memory_fraction = gpu_memory_fraction
self.static = 0
def __getitem__(self, index):
img_path = self.image_list[index]
img = Image.open(img_path)
data = img.convert('RGB')
if self.is_aligned==1:
image_data_rgb = np.asarray(data) # (160, 160, 3)
else:
image_data_rgb = load_and_align_data(img_path, self.image_size, self.margin, self.gpu_memory_fraction)
ccropped, flipped = crop_and_flip(image_data_rgb, for_dataloader=True)
# bp()
# print("\n\n")
# print("### image_data_rgb shape: " + str(image_data_rgb.shape))
# print("### CCROPPED shape: " + str(ccropped.shape))
# print("### FLIPPED shape: " + str(flipped.shape))
# print("\n\n")
if self.demo_images_path is not None:
################################################
### SAVE Demo Images
prefix = str(self.static)+ '_' + str(self.names_list[index])
## Save Matplotlib
im_da = np.asarray(image_data_rgb)
plt.imsave(self.demo_images_path + prefix + '.jpg', im_da)
## Save OpenCV
# image_BGR = cv2.cvtColor(image_data_rgb, cv2.COLOR_RGB2BGR)
# cv2.imwrite(self.demo_images_path + prefix + '.png', image_BGR)
self.static += 1
################################################
# data = self.transforms(data)
label = self.label_list[index]
name = self.names_list[index]
return ccropped, flipped, label, name
def __len__(self):
return len(self.image_list)
def main(ARGS):
np.set_printoptions(threshold=sys.maxsize)
out_dir = ARGS.output_dir
if not os.path.isdir(out_dir): # Create the out directory if it doesn't exist
os.makedirs(out_dir)
images_dir=None
if ARGS.with_demo_images==1:
images_dir = os.path.join(os.path.expanduser(out_dir), 'demo_images/')
if not os.path.isdir(images_dir): # Create the out directory if it doesn't exist
os.makedirs(images_dir)
train_set = get_dataset(ARGS.data_dir)
image_list, label_list, names_list = get_image_paths_and_labels(train_set)
faces_dataset = FacesDataset(image_list=image_list,
label_list=label_list,
names_list=names_list,
num_classes=len(train_set),
is_aligned=ARGS.is_aligned,
image_size=ARGS.image_size,
margin=ARGS.margin,
gpu_memory_fraction=ARGS.gpu_memory_fraction,
demo_images_path=images_dir)
loader = torch.utils.data.DataLoader(faces_dataset, batch_size=ARGS.image_batch,
shuffle=False, num_workers=ARGS.num_workers)
# fetch the classes (labels as strings) exactly as it's done in get_dataset
path_exp = os.path.expanduser(ARGS.data_dir)
classes = [path for path in os.listdir(path_exp) \
if os.path.isdir(os.path.join(path_exp, path))]
classes.sort()
# get the label strings
label_strings = [name for name in classes if \
os.path.isdir(os.path.join(path_exp, name))]
####### Model setup
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = IR_50([112, 112])
model.load_state_dict(torch.load(ARGS.model, map_location='cpu'))
model.to(device)
model.eval()
embedding_size = 512
# emb_array = np.zeros((nrof_images, embedding_size))
start_time = time.time()
# ###### IMAGE
# img_path = './data/test_image.png'
# img = Image.open(img_path)
# image_data = img.convert('RGB')
# image_data_rgb = np.asarray(image_data) # shape=(160, 160, 3) color_array=(255, 255, 255)
# ccropped_im, flipped_im = crop_and_flip(image_data_rgb, for_dataloader=False)
# feats_im = extract_norm_features(ccropped_im, flipped_im, model, device, tta = True)
########################################
# nrof_images = len(loader.dataset)
nrof_images = len(image_list)
emb_array = np.zeros((nrof_images, embedding_size))
# lab_array = np.zeros((nrof_images,))
lab_array = np.zeros((0,0))
# nam_array = np.chararray((nrof_images,))
batch_ind = 0
with torch.no_grad():
for i, (ccropped, flipped, label, name) in enumerate(loader):
ccropped, flipped, label = ccropped.to(device), flipped.to(device), label.to(device)
# feats = model(data)
feats = extract_norm_features(ccropped, flipped, model, device, tta = True)
# for j in range(len(ccropped)):
# # bp()
# dist = distance(feats_im.cpu().numpy(), feats[j].view(1,-1).cpu().numpy())
# # dist = distance(feats_im, feats[j])
# print("11111 Distance Eugene with {} is {}:".format(name[j], dist))
emb = feats.cpu().numpy()
lab = label.detach().cpu().numpy()
# nam_array[lab] = name
# lab_array[lab] = lab
for j in range(len(ccropped)):
emb_array[j+batch_ind, :] = emb[j, :]
lab_array = np.append(lab_array,lab)
# print("\n")
# for j in range(len(ccropped)):
# dist = distance(feats_im.cpu().numpy(), np.expand_dims(emb_array[j+batch_ind], axis=0))
# # dist = distance(feats_im, feats[j])
# print("22222 Distance Eugene with {} is {}:".format(name[j], dist))
# print("\n")
batch_ind += len(ccropped)
percent = round(100. * i / len(loader))
print('.completed {}% Run time: {}'.format(percent, timedelta(seconds=int(time.time() - start_time))), end='\r')
print('', end='\r')
print(60*"=")
print("Done with embeddings... Exporting")
if ARGS.mean_per_class==1:
print("Exporting embeddings mean for class")
label_strings = np.array(label_strings)
label_strings_all = label_strings[label_list]
all_results_dict = {}
for j in range(nrof_images):
embedding = emb_array[j,:]
label = label_strings_all[j]
if label in all_results_dict: # if label value in dictionary
arr = all_results_dict.get(label)
arr.append(embedding)
else:
all_results_dict[label] = [embedding]
## Saving mean
nrof_classes = len(classes)
emb_array_out = np.zeros((nrof_classes, embedding_size))
lab_array_out = np.zeros((0,0))
label_strings_out = []
embedding_index = 0
for key, embeddings_arr in all_results_dict.items():
numpy_arr = np.array(embeddings_arr)
mean = np.mean(numpy_arr, axis=0)
emb_array_out[embedding_index] = mean
lab_array_out = np.append(lab_array_out, embedding_index)
embedding_index += 1
label_strings_out.append(key)
# export emedings and labels
np.save(out_dir + ARGS.embeddings_name, emb_array_out)
# np.save(out_dir + ARGS.labels, lab_array_out)
label_strings = np.array(label_strings_out)
np.save(out_dir + ARGS.labels_strings_array, label_strings)
else:
print("Exporting All embeddings")
# export emedings and labels
np.save(out_dir + ARGS.embeddings_name, emb_array)
# np.save(out_dir + ARGS.labels, lab_array)
label_strings = np.array(label_strings)
np.save(out_dir + ARGS.labels_strings_array, label_strings[label_list])
total_time = timedelta(seconds=int(time.time() - start_time))
print(60*"=")
print('All done. Total time: ' + str(total_time))
def load_and_align_data(image_path, image_size, margin, gpu_memory_fraction):
minsize = 20 # minimum size of face
threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
factor = 0.709 # scale factor
print('🎃 Creating networks and loading parameters')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)
print(image_path)
img = misc.imread(os.path.expanduser(image_path))
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
det = np.squeeze(bounding_boxes[0,0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0]-margin/2, 0)
bb[1] = np.maximum(det[1]-margin/2, 0)
bb[2] = np.minimum(det[2]+margin/2, img_size[1])
bb[3] = np.minimum(det[3]+margin/2, img_size[0])
cropped = img[bb[1]:bb[3],bb[0]:bb[2],:]
aligned = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
img = aligned
return img
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('model', type=str, help='pth model file')
parser.add_argument('data_dir', type=str, help='Directory containing images. If images are not already aligned and cropped include --is_aligned False.')
parser.add_argument('--output_dir', type=str, help='Dir where to save all embeddings and demo images', default='output_arrays/')
parser.add_argument('--mean_per_class', type=int, help='Export mean of all embeddings for each class 0:False 1:True', default=1)
parser.add_argument('--is_aligned', type=int, help='Is the data directory already aligned and cropped? 0:False 1:True', default=1)
parser.add_argument('--with_demo_images', type=int, help='Embedding Images 0:False 1:True', default=1)
parser.add_argument('--image_size', type=int, help='Image size (height, width) in pixels.', default=112)
parser.add_argument('--margin', type=int, help='Margin for the crop around the bounding box (height, width) in pixels.', default=44)
parser.add_argument('--gpu_memory_fraction', type=float, help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0)
parser.add_argument('--image_batch', type=int, help='Number of images stored in memory at a time. Default 64.', default=64)
parser.add_argument('--num_workers', type=int, help='Number of threads to use for data pipeline.', default=8)
# numpy file Names
parser.add_argument('--embeddings_name', type=str, help='Enter string of which the embeddings numpy array is saved as.', default='embeddings.npy')
parser.add_argument('--labels_strings_array', type=str, help='Enter string of which the labels as strings numpy array is saved as.', default='label_strings.npy')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| StarcoderdataPython |
4801096 | <reponame>tmtsoftware/csw-python<filename>csw/EventTime.py
from datetime import datetime,timezone
from dataclasses import dataclass, asdict
@dataclass
class EventTime:
"""
Creates an EventTime containing seconds since the epoch (1970) and the offset from seconds in nanoseconds
"""
seconds: int
nanos: int
def __str__(self):
secs = self.seconds + self.nanos / 1e9
dt = datetime.fromtimestamp(secs, timezone.utc)
return dt.strftime('%Y-%m-%dT%H:%M:%S.%f')
def _asDict(self):
return asdict(self)
@staticmethod
def _fromDict(obj: dict):
return EventTime(**obj)
@staticmethod
def fromSystem():
"""
Returns an EventTime with the current time.
"""
t = datetime.now(timezone.utc).timestamp()
seconds = int(t)
nanos = int((t - seconds) * 1e9)
return EventTime(seconds, nanos)
| StarcoderdataPython |
4817654 | <gh_stars>10-100
import numpy as np
import nibabel as nib
import random
import itertools
def generate_permutation_keys():
"""
This function returns a set of "keys" that represent the 48 unique rotations &
reflections of a 3D matrix.
Each item of the set is a tuple:
((rotate_y, rotate_z), flip_x, flip_y, flip_z, transpose)
As an example, ((0, 1), 0, 1, 0, 1) represents a permutation in which the data is
rotated 90 degrees around the z-axis, then reversed on the y-axis, and then
transposed.
48 unique rotations & reflections:
https://en.wikipedia.org/wiki/Octahedral_symmetry#The_isometries_of_the_cube
"""
return set(itertools.product(
itertools.combinations_with_replacement(range(2), 2), range(2), range(2), range(2), range(2)))
def random_permutation_key():
"""
Generates and randomly selects a permutation key. See the documentation for the
"generate_permutation_keys" function.
"""
return random.choice(list(generate_permutation_keys()))
def permute_data(data, key):
"""
Permutes the given data according to the specification of the given key. Input data
must be of shape (n_modalities, x, y, z).
Input key is a tuple: (rotate_y, rotate_z), flip_x, flip_y, flip_z, transpose)
As an example, ((0, 1), 0, 1, 0, 1) represents a permutation in which the data is
rotated 90 degrees around the z-axis, then reversed on the y-axis, and then
transposed.
"""
data = np.copy(data)
(rotate_y, rotate_z), flip_x, flip_y, flip_z, transpose = key
if rotate_y != 0:
data = np.rot90(data, rotate_y, axes=(1, 3))
if rotate_z != 0:
data = np.rot90(data, rotate_z, axes=(2, 3))
if flip_x:
data = data[:, ::-1]
if flip_y:
data = data[:, :, ::-1]
if flip_z:
data = data[:, :, :, ::-1]
if transpose:
for i in range(data.shape[0]):
data[i] = data[i].T
return data
def random_permutation_x_y(x_data, y_data):
"""
Performs random permutation on the data.
:param x_data: numpy array containing the data. Data must be of shape (n_modalities, x, y, z).
:param y_data: numpy array containing the data. Data must be of shape (n_modalities, x, y, z).
:return: the permuted data
"""
key = random_permutation_key()
return permute_data(x_data, key), permute_data(y_data, key)
def reverse_permute_data(data, key):
key = reverse_permutation_key(key)
data = np.copy(data)
(rotate_y, rotate_z), flip_x, flip_y, flip_z, transpose = key
if transpose:
for i in range(data.shape[0]):
data[i] = data[i].T
if flip_z:
data = data[:, :, :, ::-1]
if flip_y:
data = data[:, :, ::-1]
if flip_x:
data = data[:, ::-1]
if rotate_z != 0:
data = np.rot90(data, rotate_z, axes=(2, 3))
if rotate_y != 0:
data = np.rot90(data, rotate_y, axes=(1, 3))
return data
def reverse_permutation_key(key):
rotation = tuple([-rotate for rotate in key[0]])
return rotation, key[1], key[2], key[3], key[4]
| StarcoderdataPython |
3323490 | from django.conf.urls import include, url
from fir_irma.settings import settings
api_urlpatterns = [
url(r'^$', 'fir_irma.views.not_found', name='base'),
url(r'^scans$', 'fir_irma.views.irma_scan_new'),
url(r'^scans/(?P<scan_id>[^/]+)/files$', 'fir_irma.views.irma_scan_upload'),
url(r'^scans/(?P<scan_id>[^/]+)/launch$', 'fir_irma.views.irma_scan_launch'),
url(r'^scans/(?P<scan_id>[^/]+)(?P<tail>(?:.*)?)$', 'fir_irma.views.irma_scan_generic'),
url(r'^probes$', 'fir_irma.views.irma_probes'),
url(r'^search/files', 'fir_irma.views.irma_search'),
]
common_urlpatterns = [
url(r'^(?P<sub>selection|upload|search|maintenance|)$', 'fir_irma.views.irma_index', name='index'),
url(r'^scan/(?P<scan_id>[a-zA-Z0-9\-]+)(?:/.*)?$', 'fir_irma.views.irma_redirect_index', name='details'),
url(r'^views/(?P<name>maintenance|selection|search|details|scan|upload)\.html$', 'fir_irma.views.irma_view',
name='view'),
url(r'^js/irma.js$', 'fir_irma.views.irma_app', name='app'),
]
urlpatterns = [
url(r'^api/v1/', include(api_urlpatterns, namespace='api')),
]
if settings.IRMA_HAS_UI:
urlpatterns += [
url(r'^', include(common_urlpatterns, namespace='ui')),
]
| StarcoderdataPython |
1703053 | <filename>bookstore/apps/catalog/admin.py
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from mptt.admin import DraggableMPTTAdmin
from image_cropping import ImageCroppingMixin
from bookstore.apps.catalog.models import (
BookImages,
AuthorImages,
Category,
PublishingCompany,
Author,
Book,
BookReview,
)
from bookstore.apps.catalog.forms import (
BookImagesForm,
AuthorImagesForm,
AuthorForm,
BookForm,
)
class BookImagesAdmin(ImageCroppingMixin, admin.ModelAdmin):
model = BookImages
form = BookImagesForm
fieldsets = (
(
None,
{"fields": ("book", "image", "list_page_cropping", "detail_page_cropping")},
),
)
class AuthorImagesAdmin(ImageCroppingMixin, admin.ModelAdmin):
model = AuthorImages
form = AuthorImagesForm
fieldsets = (
(
None,
{
"fields": (
"author",
"image",
"list_page_cropping",
"detail_page_cropping",
)
},
),
)
class BookImagesInline(ImageCroppingMixin, admin.TabularInline):
model = BookImages
class CategoryAdmin(DraggableMPTTAdmin, admin.ModelAdmin):
model = Category
ordering = ("name", "-created")
search_fields = ("name",)
list_display = ("name",)
list_display_links = ["name"]
prepopulated_fields = {"slug": ("name",)}
fieldsets = ((None, {"fields": (("name", "slug"), "parent")}),)
class PublishingCompanyAdmin(admin.ModelAdmin):
model = PublishingCompany
ordering = ("name", "-created")
search_fields = ("name",)
list_display = ("name",)
list_display_links = ["name"]
prepopulated_fields = {"slug": ("name",)}
fieldsets = ((None, {"fields": (("name", "slug"),)}),)
class AuthorAdmin(admin.ModelAdmin):
model = Author
form = AuthorForm
list_per_page = 20
ordering = ("name", "-created")
search_fields = ("name",)
list_display = ("name",)
list_display_links = ["name"]
prepopulated_fields = {"slug": ("name",)}
fieldsets = ((None, {"fields": (("name", "slug"), "about_of")}),)
class BookAdmin(admin.ModelAdmin):
model = Book
form = BookForm
# inlines = [BookImagesInline]
list_per_page = 15
save_on_top = True
save_as = True
ordering = ("title", "-created")
search_fields = ("title", "original_title", "isbn")
list_display = (
"title",
"isbn",
"get_authors",
"get_categories",
"dimensions_of_the_book",
"price",
"quantity",
)
list_display_links = ["title"]
list_filter = (
"is_active",
"is_featured",
"availability_of_stock",
"hardback",
"author",
)
date_hierarchy = "created"
radio_fields = {
"availability_of_stock": admin.HORIZONTAL,
"show_real_price": admin.HORIZONTAL,
}
filter_horizontal = ["author", "category"]
prepopulated_fields = {"slug": ("title",)}
fieldsets = (
("Product info", {"fields": (("visible_where", "is_active", "is_featured"),)}),
(
"Book info",
{
"fields": (
("title", "slug"),
("isbn", "publishing_company"),
"synopsis",
("language", "num_of_pages", "hardback"),
("author", "category"),
)
},
),
(
"Dimensions of the book",
{"fields": (("length", "height", "width"), "weight")},
),
(
"Inventory info",
{
"fields": (
"availability_of_stock",
("quantity", "notify_when_stock_is_exhausted"),
(
"inventory_maintenance_unit",
"quantity_out_of_stock",
"maximum_quantity_in_the_shopping_cart",
),
)
},
),
(
"Prices",
{
"fields": (
"show_real_price",
"price",
"cost_price",
"special_price",
("special_price_from_date", "special_price_to_date"),
)
},
),
)
def get_authors(self, obj):
return ",\n".join([a.name for a in obj.author.all()])
get_authors.short_description = _("Author(s)")
def get_categories(self, obj):
return ",\n".join([c.name for c in obj.category.all()])
get_categories.short_description = _("Categories")
class BookReviewAdmin(admin.ModelAdmin):
model = BookReview
ordering = ("-created",)
search_fields = ("user", "book__title")
list_display = ("book", "get_short_comment", "user", "number_of_stars")
list_display_links = ["book"]
list_filter = ("number_of_stars",)
date_hierarchy = "created"
list_per_page = 15
admin.site.register(Author, AuthorAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(PublishingCompany, PublishingCompanyAdmin)
admin.site.register(Book, BookAdmin)
admin.site.register(BookImages, BookImagesAdmin)
admin.site.register(AuthorImages, AuthorImagesAdmin)
admin.site.register(BookReview, BookReviewAdmin)
| StarcoderdataPython |
3268042 | <reponame>RandallLDavis/SQLAlchemy--Challenge<gh_stars>0
import datetime as dt
import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
from flask import Flask, jsonify
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
Base = automap_base()
Base.prepare(engine, reflect=True)
Measurement = Base.classes.measurement
Station = Base.classes.station
session = Session(engine)
app = Flask(__name__)
app.config["JSON_SORT_KEYS"] = False
#home base routes
@app.route("/")
def index():
return (
f"Available Routes:<br>"
f"-------------------------<br>"
f"Precipitation for last year: /api/v1.0/precipitation<br/>"
f"List of all stations: /api/v1.0/stations<br/>"
f"Date and temperature observations from the last year: /api/v1.0/tobs<br/>"
f"Min, Avg, Max Temp given a start date up to most current date in db: /api/v1.0/2012-05-15<br/>"
f"Min, Avg, Max Temp given a start and end date: /api/v1.0/2015-09-12/2015-09-13<br/>"
)
# date routes
@app.route("/api/v1.0/precipitation")
def precipitation():
return (
precip_stats = session.query(Measurement.date, Measurement.prcp)
.order_by(Measurement.date)
precip_data = []
for r in precip_stats:
precip_dict = {}
precip_dict['date'] = r.date
precip_dict['prcp'] = r.prcp)
return jsonify(precip_data)
# station route
@app.route("/api/v1.0/stations")
def stations():
return (
station_data = session.query(Station.name, Measurement.station)\
.group_by(Station.name).all()
stations_data = []
for r in station_data:
stations_dict = {}
stations_dict['name'] = r.name
stations_dict['station'] = r.station)
return jsonify(stations_data)
# temperature observations route
@app.route("/api/v1.0/tobs")
def tobs():
return (
tempobs = session.query(Measurement.date, Measurement.tobs)\
.order_by(Measurement.date)
tobs_data = []
for r in tempobs:
tobs_dict = {}
tobs_dict['date'] = r.date
tobs_dict['tobs'] = r.tobs)
return jsonify(tobs_data)
# temperature stats with only start date route
@app.route("/api/v1.0/<start>")
def temp_stats_start(start):
temperature_stats = session.query\
(func.min(Measurement.tobs).label('min'),\
func.avg(Measurement.tobs).label('avg'),\
func.max(Measurement.tobs).label('max'))\
start_stats_data = []
for r in temp_stats_start:
start_stats_dict = {}
start_stats_dict['Start Date'] = start
start_stats_dict['Min Temp'] = r.min
start_stats_dict['Avg Temp'] = r.avg
start_stats_dict['Max Temp'] = r.max
return jsonify(start_stats_data)
# temperature stats with both start and end dates route
@app.route("/api/v1.0/<start>/<end>")
def temp_stats_start_end(start, end):
return (
temp_stats = session.query(func.min(Measurement.tobs).label('min'),\
func.avg(Measurement.tobs).label('avg'),\
func.max(Measurement.tobs).label('max'))\
start_end_stats_data = []
for r in temp_stats:
start_end_stats_dict = {}
start_end_stats_dict['Start Date'] = start
start_end_stats_dict['End Date'] = end
start_end_stats_dict['Min Temp'] = r.min
start_end_stats_dict['Avg Temp'] = r.avg
start_end_stats_dict['Max Temp'] = r.max)
return jsonify(start_end_stats_data)
if __name__ == '__main__':
app.run(debug=True)
| StarcoderdataPython |
3298796 | <reponame>SDM-TIB/korona-graph-partitioning<filename>1_data_processing/4. author similarity.py<gh_stars>0
from __future__ import division
from openpyxl import Workbook
from array import array
#from openpyxl.utils import coordinate_from_string, column_index_from_string
from openpyxl.utils import column_index_from_string
from openpyxl.utils.cell import coordinate_from_string
from openpyxl.utils import get_column_letter
import urllib2,re
from bs4 import BeautifulSoup
import urllib,sys
from urllib2 import Request, urlopen, URLError
import openpyxl
import os,glob
import os.path as path
reload(sys)
sys.setdefaultencoding('UTF-8')
inputpath = path.abspath(path.join(__file__ ,"../")) + "/nt-files/" #we have to iterate through the entire DBLP database
authorfile= os.getcwd() + "/output/author-list.txt"
author_Matrix_File = os.getcwd() + "/output/Auth_matrix.txt"
x = glob.glob("%s*.nt" %inputpath)
f_author_file = open(authorfile, 'r')
author_dict = {}
author_list = []
author_count = 0
for row in f_author_file:
author_dict[row.rstrip("\n")] = author_count
author_list.append([row.rstrip("\n"),0])
author_count+=1
f_author_file.close()
# Here we calculate the similarity of two authors which builds up the similarity matrix of authors.
def SimilarityOfAuthors(auth1,auth2):
if (auth1 == auth2):
return float(1.0)
#Things will change from here
global author_list
global author_dict
counterBoth = 0
counterEither = 0
visited_Conf = []
key1 = author_dict.get(auth1)
key2 = author_dict.get(auth2)
No_entries1 = author_list[key1][1]
No_entries2 = author_list[key2][1]
for i in range(No_entries1):
Conf_x = author_list[key1][i+2][0]
visited_Conf.append(Conf_x)
Counter_x = author_list[key1][i+2][1]
Conf_x_present = 0
for j in range(No_entries2):
Conf_y = author_list[key2][j+2][0]
Counter_y = author_list[key2][j+2][1]
if Conf_x == Conf_y:
Conf_x_present = 1
if Counter_y> Counter_x:
counterBoth = counterBoth + Counter_x
counterEither = counterEither + Counter_x
else:
counterBoth = counterBoth + Counter_y
counterEither = counterEither + Counter_y
if Conf_x_present == 0:
counterEither = counterEither + Counter_x
for i in range(No_entries2):
Conf_y = author_list[key2][i+2][0]
if Conf_y not in visited_Conf:
counterEither = counterEither + author_list[key2][i+2][1]
if counterEither == 0:
return 0
similarity = float(counterBoth)/counterEither
return similarity
#################******************~~~~~~~~~~~~Processing starts here~~~~~~~~~~~~*******************#######################
for current_file in x:
nt_file_pointer = open(current_file, 'r')
for row in nt_file_pointer:
if row == "\n":
continue
eachnumber = re.split(r' ', row)
if "#authoredBy" not in eachnumber[1]:
continue
auth = eachnumber[2].strip("<").strip(">")
if auth not in author_dict:
continue
conf = eachnumber[0].strip("<").strip(">").rsplit('/',1)[0]
auth_pointer = author_dict.get(auth)
if author_list[auth_pointer][1] == 0:
author_list[auth_pointer][1] = 1
author_list[auth_pointer].append([conf,1])
else:
counter = author_list[auth_pointer][1]
list_pointer = 0
for i in range(counter):
if conf in author_list[auth_pointer][i+2]:
list_pointer = i+2
break
if list_pointer ==0:
author_list[auth_pointer][1] = counter + 1
author_list[auth_pointer].append([conf,1])
list_pointer = counter + 2
conf_value = author_list[auth_pointer][list_pointer][1]
author_list[auth_pointer][list_pointer] = [conf,conf_value+1]
nt_file_pointer.close()
#Here we save the Author Similarity Matrix File.
f_auth_matrix = open(author_Matrix_File, 'w+')
BPGraph_AuthorSimilarity=[[0 for x in range(author_count)] for y in range(author_count)]
for key1, value1 in sorted(author_dict.iteritems(), key=lambda (k,v): (v,k)):
for key2, value2 in sorted(author_dict.iteritems(), key=lambda (k,v): (v,k)):
if (value2 >= value1):
similarityValue = SimilarityOfAuthors(key1,key2)
BPGraph_AuthorSimilarity[value1][value2] =similarityValue
BPGraph_AuthorSimilarity[value2][value1] = BPGraph_AuthorSimilarity[value1][value2]
#print similarityValue
BPGraph_string = ""
for i in range(author_count):
for j in range(author_count):
BPGraph_string += str(BPGraph_AuthorSimilarity[i][j]) + " "
BPGraph_string = BPGraph_string[:-1]
BPGraph_string += "\n"
BPGraph_string = str(author_count) + "\n" + BPGraph_string
f_auth_matrix.write(BPGraph_string)
del BPGraph_AuthorSimilarity
del BPGraph_string
f_auth_matrix.close()
| StarcoderdataPython |
3277284 | """
Apply a transformation matrix produced by align_epi_anat.py.
Created 11/16/2021 by <NAME>.
<EMAIL>
"""
from os import PathLike
import subprocess
def main(in_image: PathLike, in_matrix: PathLike, out_prefix: PathLike) -> None:
"""
Apply a 1D transformation matrix to an image.
Args:
in_image (PathLike): Path to image to transform.
in_matrix (PathLike): Path to matrix to use.
out_prefix (PathLike): Where to write transformed image.
"""
command = f"""
3dAllineate
-cubic
-1Dmatrix_apply {in_matrix}
-prefix {out_prefix}
{in_image}
""".split()
print(command)
subprocess.run(command, check=True)
| StarcoderdataPython |
189338 | <reponame>vascoalramos/misago-deployment
from django.core.cache import cache
from ..cache.versions import invalidate_cache
from . import MENU_ITEMS_CACHE
def get_menus_cache(cache_versions):
key = get_cache_key(cache_versions)
return cache.get(key)
def set_menus_cache(cache_versions, menus):
key = get_cache_key(cache_versions)
cache.set(key, menus)
def get_cache_key(cache_versions):
return "%s_%s" % (MENU_ITEMS_CACHE, cache_versions[MENU_ITEMS_CACHE])
def clear_menus_cache():
invalidate_cache(MENU_ITEMS_CACHE)
| StarcoderdataPython |
18899 | <filename>CollabMoodle.py
import datetime
from webService import WebService
import Utilidades as ut
import sys
if __name__ == "__main__":
param = ut.mainMoodle(sys.argv[1:])
#param = 'moodle_plugin_sessions.txt', '', '2020-08-01 00:00:00,2020-12-31 00:00:00'
webService = WebService()
report = []
ret = 0
dates = param[2].split(",")
if param[0] != '' and param[1] == '':
print("Moodle Sesions...")
moodlSession = ut.leerUUID(param[0])
for sesion in moodlSession:
try:
nombre_session, date_session = webService.get_moodle_sesion_name(sesion)
except:
print('Erro WS')
nombre_session = None
if nombre_session == None or nombre_session == ' ':
print("Session name not found!")
else:
print(nombre_session)
try:
lista_grabaciones = webService.get_moodle_lista_grabaciones(nombre_session, dates, date_session)
except:
lista_grabaciones = None
if lista_grabaciones is None:
print("There's no recording for: " + nombre_session)
else:
for grabacion in lista_grabaciones:
try:
ret = ut.downloadrecording(grabacion['recording_id'],grabacion['recording_name'], dates)
except:
ret = 2
try:
if ret == 1:
report.append([grabacion['recording_id'], grabacion['recording_name'], grabacion['duration'],
grabacion['storageSize'], grabacion['created']])
elif ret == 2:
report.append(
['Erro no download', grabacion['recording_name'], grabacion['duration'],
grabacion['storageSize'], grabacion['created']])
elif ret == 3:
if [grabacion['recording_id'], grabacion['recording_name'], grabacion['duration'],
grabacion['storageSize'], grabacion['created']] in report:
print("EXISTE")
else:
report.append(
[grabacion['recording_id'], grabacion['recording_name'], grabacion['duration'],
grabacion['storageSize'], grabacion['created']])
except:
print("Nao foi possivel criar o relatorio")
if len(report) > 0:
try:
print(ut.crearReporteMoodle(report, dates))
except:
print("Nao foi possivel criar o relatorio")
else:
print('No recordings was found')
| StarcoderdataPython |
20813 | from cv2 import fastNlMeansDenoisingColored
from cv2 import cvtColor
from cv2 import bitwise_not,threshold,getRotationMatrix2D
from cv2 import warpAffine,filter2D,imread
from cv2 import THRESH_BINARY,COLOR_BGR2GRAY,THRESH_OTSU
from cv2 import INTER_CUBIC,BORDER_REPLICATE,minAreaRect
from numpy import column_stack,array,where
from matplotlib.pyplot import imshow,xticks,yticks
from pytesseract import image_to_string,pytesseract
from PIL import Image
class ImageProcess:
'''this function is removing noise from the image'''
def remove_noise(image):
image = fastNlMeansDenoisingColored(image,None,20,10,7,21)
return image
'''this function is removing skewness.
first, it calculate the angle and accordingly rotate image'''
def remove_skew(image):
in_gray = cvtColor(image, COLOR_BGR2GRAY)
in_gray = bitwise_not(in_gray)
thresh_pic = threshold(in_gray, 0, 255,THRESH_BINARY | THRESH_OTSU)[1]
coords_x_y = column_stack(where(thresh_pic > 0))
angle = minAreaRect(coords_x_y)[-1]
if angle < -45:
angle = -(90 + angle)
else:
angle = -angle
(h, w) = image.shape[:2]
center_of_pic = (w // 2, h // 2)
M = getRotationMatrix2D(center_of_pic, angle, 1.0)
image = warpAffine(image, M, (w, h),flags=INTER_CUBIC, borderMode=BORDER_REPLICATE)
return image
'''for removing blurness from the image,
this function increase sharpness of the image.'''
def shapness_blur(image):
sharpen_kernel = array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
image = filter2D(image, -1, sharpen_kernel)
return image
'''using pytesseract, this function extracting text from the image.'''
def to_text(image):
try:
pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
string_from_image = image_to_string(image,lang='eng')
except Exception:
pytesseract.tesseract_cmd = r"C:\Program Files(x86)\Tesseract-OCR\tesseract.exe"
string_from_image = image_to_string(image,lang='eng')
return string_from_image
##plot image in output
def plot_image(image):
imshow(image)
xticks([])
yticks([])
| StarcoderdataPython |
4811968 | ''' This file contains function definitions for language translation '''
import os
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from dotenv import load_dotenv
load_dotenv()
apikey = os.environ['apikey']
url = os.environ['url']
version = os.environ['version']
authenticator = IAMAuthenticator( apikey )
language_translator = LanguageTranslatorV3(
version=version,
authenticator=authenticator
)
language_translator.set_service_url( url )
def english_to_french(english_text):
'''translates text from English to French'''
if english_text is None :
french_text = None
elif english_text is '' :
french_text = None
else:
french_text = language_translator.translate(
english_text,
model_id='en-fr').get_result()
return french_text
def french_to_english(french_text):
'''translatest text from French to English'''
if french_text is None :
english_text = None
elif french_text is '' :
english_text = None
else:
english_text = language_translator.translate(
french_text,
model_id='fr-en').get_result()
return english_text
# TESTS
#e_input = 'Hello'
#f_out = english_to_french( e_input )
#print( f_out.get('translations')[0].get('translation') )
#f_in = 'Bonjour'
#e_out = french_to_english ( f_in )
#print( e_out.get('translations')[0].get('translation') )
| StarcoderdataPython |
89618 | """Renaming delimiters table to limiters
Revision ID: 17346cf564bc
Revises: <PASSWORD>
Create Date: 2014-03-07 14:45:27.909631
"""
# revision identifiers, used by Alembic.
revision = '17346cf564bc'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.rename_table('delimiters', 'limiters')
def downgrade():
op.rename_table('limiters', 'delimiters')
| StarcoderdataPython |
3341487 | <reponame>mrsixw/git-statistics
from collections import Counter
import dateparser
def _get_branch_id(query_func, branch):
__BRANCH_SQL = """
SELECT branch_id FROM git_branches
WHERE git_branches.branch_name = ?
"""
branch_id = query_func(__BRANCH_SQL, (branch,), one=True)['branch_id']
return branch_id
def _get_commits_for_branch(query_func, branch_id):
__COMMIT_SQL = """
SELECT * FROM git_commit WHERE branch_id = ?
"""
commits = query_func(__COMMIT_SQL, (branch_id,),)
return commits
def generate_branch_insight(query_func, branch = None):
branch_id = _get_branch_id(query_func, branch)
#print branch_id
branch_commits = _get_commits_for_branch(query_func,branch_id)
_FILE_SQL = """
SELECT * from commit_file INNER JOIN file USING (file_id) INNER JOIN git_commit using (commit_hash) WHERE branch_id = ?;
"""
file_change_data = query_func(_FILE_SQL,(branch_id,))
commit_data = {}
commit_data['raw'] = branch_commits
commit_data['total_branch_lines_additions'] = sum([int(x['additions']) for x in file_change_data])
commit_data['total_branch_lines_deletions'] = sum([int(x['deletions']) for x in file_change_data])
commit_data['earliest_brach_commit'] = min([x['commit_date'] for x in branch_commits])
commit_data['recent_branch_commit'] = max([x['commit_date'] for x in branch_commits])
commiters = [x['committer'] for x in branch_commits]
commit_data['files_changed'] = [x['file_path'] for x in file_change_data]
commit_data['top_commiters'] = Counter(commiters).most_common(10)
commit_data['unique_files_changed'] = Counter(commit_data['files_changed'])
commit_data['popular_files_changed'] = Counter(commit_data['files_changed']).most_common(50)
#print len (branch_commits)
return commit_data
def generate_monthly_commit_data(query_func, branch = None):
branch_id = _get_branch_id(query_func, branch)
_COMMIT_SQL = """
SELECT * FROM git_commit INNER JOIN git_branches using (branch_id) WHERE branch_id = ?;
"""
branch_commits = query_func(_COMMIT_SQL, (branch_id,))
commits_per_month = []
for date in [x['commit_date'] for x in branch_commits]:
d = dateparser.parse(date)
commits_per_month.append("%s-%s" % (d.year,str(d.month).zfill(2)))
return Counter(commits_per_month)
def generate_month_change_data(query_func, branch):
branch_id = _get_branch_id(query_func,branch)
#print branch_id
branch_commits = _get_commits_for_branch(query_func,branch_id)
commit_changes = {}
for x in branch_commits:
#print commit_changes
_CHANGE_SQL = """
SELECT * FROM commit_file where commit_hash = ?;
"""
changes = query_func(_CHANGE_SQL,(x['commit_hash'],))
commit_additions = sum([int(y['additions']) for y in changes])
commit_deletions = sum([int(y['deletions']) for y in changes])
d = dateparser.parse(x['commit_date'])
key = "%s-%s" % (d.year, str(d.month).zfill(2))
if commit_changes.has_key(key):
commit_changes[key]['additions'] += commit_additions
commit_changes[key]['deletions'] += commit_deletions
else:
commit_changes[key] = {'additions':commit_additions,
'deletions':commit_deletions}
return commit_changes
def generate_commit_time_of_day(query_func, branch):
branch_id = _get_branch_id(query_func, branch)
branch_commits = _get_commits_for_branch(query_func, branch_id)
commit_tod = {key:{key2:0 for key2 in ['%s-%s' % (n-1,n) for n in xrange(1,24,2)]} for key in ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']}
#print commit_tod
for x in branch_commits:
date = dateparser.parse(x['commit_date'])
day_name = date.strftime('%A')
hour = date.hour
if hour == 0:
hour_period = "0-1"
elif hour % 2 == 0:
# even number
hour_period = "%s-%s" % (hour, hour +1)
else:
hour_period = "%s-%s" % (hour -1, hour)
#print "%s %s, %s" % (day_name,hour, hour_period)
commit_tod[day_name][hour_period] += 1
return commit_tod
| StarcoderdataPython |
3286088 | <filename>resource/admin_node.py
# Copyright (c) 2010-2011 Lazy 8 Studios, LLC.
# All rights reserved.
import csv
from restish import http, resource, templating
from front import VERSION, gift_types
from front.lib import get_uuid, utils, forms, xjson, urls, gametime, money
from front.data import assets
from front.models import user as user_module
from front.models import invite as invite_module
from front.backend import admin, stats, highlights, gamestate, renderer
from front.resource import decode_json, json_success, json_bad_request
from front.backend.admin import ADMIN_INVITER_EMAIL, ADMIN_INVITER_FIRST_NAME, ADMIN_INVITER_LAST_NAME
# Show recent users on the /admin page who have accessed the site within this number of hours ago.
RECENT_USERS_SINCE_HOURS = 48
# The maximum number of recent users to show, even if there are more that meet the RECENT_USERS_SINCE_HOURS
RECENT_USERS_LIMIT = 200
# The maximum number of recent targets to show on /admin page.
RECENT_TARGETS_LIMIT = 50
# The oldest target which will be shown on a recent targets table. This is used to cut down the overall
# number of target rows that need to be examined, since using LIMIT does not do this, and greatly speeds
# up these queries in production.
OLDEST_RECENT_TARGET_DAYS = 5
# The maximum number of recent transactions to show on /admin page.
RECENT_TRANSACTIONS_LIMIT = 3
class AdminNode(resource.Resource):
@resource.child()
def api(self, request, segments):
"""Handles everything under /admin/api"""
return APINode()
@resource.GET()
@templating.page('admin/index.html')
def index(self, request):
params = {
'current_version': VERSION,
'oldest_recent_target_days': OLDEST_RECENT_TARGET_DAYS
}
return params
@resource.POST()
def lookup(self, request):
ok, fields = forms.fetch(request, ['user_search_term'])
if ok:
user_search_term = fields['user_search_term']
return http.see_other(urls.add_query_param_to_url(urls.admin_search_users_with_term(user_search_term)))
else:
return http.bad_request([('content-type', 'text/html')], "Bad parameters. No search query term provided.")
@resource.child()
def recent_users_and_targets_html(self, request, segments):
return RecentUsersAndTargets()
@resource.child('search_users')
def search_users(self, request, segments):
return AdminSearchUsersNode()
@resource.child('user/{user_id}')
def user(self, request, segments, user_id):
return AdminUserNode(request, get_uuid(user_id))
@resource.child('user/{user_id}/map')
def user_map(self, request, segments, user_id):
return AdminUserMapNode(request, get_uuid(user_id))
@resource.child('target/{target_id}')
def target(self, request, segments, target_id):
return AdminTargetNode(request, get_uuid(target_id))
@resource.child('invoice/{invoice_id}')
def invoice(self, request, segments, invoice_id):
return AdminInvoiceNode(request, get_uuid(invoice_id))
@resource.child('gifts')
def gifts(self, request, segments):
return AdminGiftsNode()
@resource.child('invites')
def invites(self, request, segments):
return AdminInvitesNode()
@resource.child('users')
def users(self, request, segments):
return AdminUsersNode()
@resource.child('targets')
def targets(self, request, segments):
return AdminTargetsNode()
@resource.child('transactions')
def transactions(self, request, segments):
return AdminTransactionsNode()
@resource.child('deferreds')
def deferreds(self, request, segments):
return AdminDeferredsNode()
@resource.child('email_queue')
def email_queue(self, request, segments):
return AdminEmailQueueNode()
@resource.child('stats')
def stats(self, request, segments):
return AdminStatsNode()
@resource.child('stats/attrition')
def attrition(self, request, segments):
return AdminStatsAttritionNode()
@resource.child('query')
def query(self, request, segments):
return AdminQueryNode()
class RecentUsersAndTargets(resource.Resource):
@resource.GET(accept=xjson.mime_type)
def recent_users_and_targets_html(self, request):
recent_users = admin.recent_users(request, limit=RECENT_USERS_LIMIT, last_accessed_hours=RECENT_USERS_SINCE_HOURS)
all_users_count = admin.all_users_count(request)
recent_targets = admin.recent_targets(request, limit=RECENT_TARGETS_LIMIT, oldest_recent_target_days=OLDEST_RECENT_TARGET_DAYS)
recent_transactions = admin.recent_transactions(request, limit=RECENT_TRANSACTIONS_LIMIT)
all_transactions_money = admin.all_transactions_amount(request)
all_transactions_money_display = money.format_money(all_transactions_money)
params = {
'recent_users': recent_users,
'all_users_count': all_users_count,
'all_transactions_money_display': all_transactions_money_display,
'recent_targets': recent_targets,
'recent_transactions': recent_transactions,
'format_field': format_field,
'format_email': format_email,
'format_user_label': format_user_label,
'format_utc': format_utc
}
return json_success({
'recent_users_html': templating.render(request, 'admin/recent_users.html', params),
'recent_targets_html': templating.render(request, 'admin/recent_targets.html', params),
'recent_transactions_html': templating.render(request, 'admin/recent_transactions.html', params)
})
class AdminSearchUsersNode(resource.Resource):
@resource.GET()
@templating.page('admin/search_users.html')
def search_users(self, request):
limit = 500
# If there was a no_user param add an error message
search_term = request.GET.get('search_term', None)
if search_term is None:
return {'error': utils.tr("No search query term provided.")}
search_term = search_term.strip()
found_users = admin.search_for_users(request, search_term, limit=limit)
return {
'found_users': found_users,
'search_term': search_term,
'limit': limit,
'format_email': format_email,
'format_user_label': format_user_label
}
class AdminUserNode(resource.Resource):
def __init__(self, request, user_id):
self.user_id = user_id
@resource.GET()
@templating.page('admin/user.html')
def user(self, request):
user = user_module.user_from_context(request, self.user_id, check_exists=True)
if user is None:
return {'error': utils.tr("This user does not exist.")}
# Ask the user object to load and cache all of the gamestate data most of which is going to be
# used on the admin user page.
user.load_gamestate_row_cache()
# Load all the invitations using the faster recent invite queries rather than iterating through
# all of user.invitations and lazy loading everything.
invite_limit = 300
recent_invites = admin.recent_invites(request, sender_id=user.user_id, limit=invite_limit)
return {
'u': user,
'invite_limit': invite_limit,
'user_invitations': recent_invites,
'format_field': format_field,
'format_email': format_email,
'format_utc': format_utc,
'format_utc_approx': format_utc_approx
}
class AdminUserMapNode(resource.Resource):
def __init__(self, request, user_id):
self.user_id = user_id
@resource.GET()
@templating.page('admin/map.html')
def map(self, request):
user = user_module.user_from_context(request, self.user_id, check_exists=True)
if user is None:
return {'error': utils.tr("This user does not exist.")}
return {
'u': user,
'gamestate': gamestate.gamestate_for_user(user, request),
'assets_json_s':assets.get_asset_json()
}
class AdminTargetNode(resource.Resource):
def __init__(self, request, target_id):
self.target_id = target_id
@resource.GET()
@templating.page('admin/target.html')
def target(self, request):
user = user_module.user_from_target_id(request, self.target_id)
target = user.rovers.find_target_by_id(self.target_id)
if target is None:
return {'error': utils.tr("This target does not exist.")}
if target.is_picture():
renderer_target_struct = renderer.process_target_struct(user, target)
else:
renderer_target_struct = {}
return {
'target': target,
'user': target.user,
'renderer_target_struct': renderer_target_struct,
'format_email': format_email
}
class AdminInvoiceNode(resource.Resource):
def __init__(self, request, invoice_id):
self.invoice_id = invoice_id
@resource.GET()
@templating.page('admin/invoice.html')
def invoice(self, request):
user = user_module.user_from_invoice_id(request, self.invoice_id)
if user is None:
return {'error': utils.tr("This user or invoice does not exist.")}
found = [i for i in user.shop.invoices if i.invoice_id == self.invoice_id]
if len(found) == 0:
return {'error': utils.tr("This invoice does not exist.")}
invoice = found[0]
return {
'u': user,
'invoice': invoice,
'format_email': format_email,
'format_utc': format_utc
}
class AdminGiftsNode(resource.Resource):
@resource.GET()
@templating.page('admin/recent_gifts.html')
def get(self, request):
limit = 500
recent_gifts = admin.recent_gifts(request, limit=limit)
return {
'recent_gifts': recent_gifts,
'page_title': "Recent Gifts",
'limit': limit,
'format_email': format_email,
'format_utc': format_utc
}
@resource.child()
def mine(self, request, segments):
return AdminGiftsMineNode()
@resource.child()
def new(self, request, segments):
return AdminGiftsNewNode()
class AdminGiftsMineNode(resource.Resource):
@resource.GET()
@templating.page('admin/recent_gifts.html')
def get(self, request):
limit = 500
user = user_module.user_from_request(request)
recent_gifts = admin.recent_gifts(request, creator_id=user.user_id, limit=limit)
return {
'recent_gifts': recent_gifts,
'page_title': "My Gifts",
'limit': limit,
'format_email': format_email,
'format_utc': format_utc
}
class AdminGiftsNewNode(resource.Resource):
@resource.GET()
@templating.page('admin/new_gift.html')
def get(self, request):
return {
'all_gift_types': gift_types.ALL
}
@resource.POST()
def post(self, request):
ok, fields = forms.fetch(request, ['generate_number', 'gift_annotation', 'gift_type', 'gift_campaign_name'], blanks=['gift_campaign_name'])
if ok:
generate_number = int(fields['generate_number'])
if generate_number <= 0:
return http.bad_request([('content-type', 'text/html')], "Refusing to generate no gifts.")
gift_annotation = fields['gift_annotation']
if len(gift_annotation) < 5:
return http.bad_request([('content-type', 'text/html')], "Please use an annotation longer than 5 characters.")
gift_type = fields['gift_type']
if gift_type not in gift_types.ALL:
return http.bad_request([('content-type', 'text/html')], "Unknown gift_type %s" % gift_type)
gift_campaign_name = fields['gift_campaign_name'].strip()
if len(gift_campaign_name) == 0:
gift_campaign_name = None
user = user_module.user_from_request(request)
for _ in range(0, generate_number):
admin.create_admin_gift_of_type(request, user, gift_type, gift_annotation, campaign_name=gift_campaign_name)
return http.see_other(urls.admin_gifts_mine())
else:
return http.bad_request([('content-type', 'text/html')], "Bad parameters.")
class AdminInvitesNode(resource.Resource):
@resource.GET()
@templating.page('admin/recent_invites.html')
def get(self, request):
limit = 500
recent_invites = admin.recent_invites(request, limit=limit)
return {
'recent_invites': recent_invites,
'page_title': "Recent Invitations",
'limit': limit,
'format_email': format_email,
'format_utc': format_utc
}
@resource.child()
def system(self, request, segments):
return AdminInvitesSystemNode()
@resource.child()
def new(self, request, segments):
return AdminInvitesNewNode()
class AdminInvitesSystemNode(resource.Resource):
@resource.GET()
@templating.page('admin/recent_invites.html')
def get(self, request):
limit = 500
admin_inviter = admin.get_admin_inviter_user(request)
recent_invites = admin.recent_invites(request, sender_id=admin_inviter.user_id, limit=limit)
return {
'recent_invites': recent_invites,
'page_title': "System (Turing) Invitations",
'limit': limit,
'format_email': format_email,
'format_utc': format_utc
}
class AdminInvitesNewNode(resource.Resource):
NO_GIFT_TYPE = "No Gift"
@resource.GET()
@templating.page('admin/new_invite.html')
def get(self, request):
return {
'all_gift_types': (self.NO_GIFT_TYPE,) + gift_types.ALL,
'inviter_email': ADMIN_INVITER_EMAIL,
'inviter_first_name': ADMIN_INVITER_FIRST_NAME,
'inviter_last_name': ADMIN_INVITER_LAST_NAME
}
@resource.POST()
def post(self, request):
ok, fields = forms.fetch(request, ['gift_type', 'invitation_message', 'inviter_email', 'recipient_emails_and_names',
'invitation_campaign_name'], blanks=['invitation_campaign_name'])
if ok:
user = user_module.user_from_request(request)
# Future proof allowing an admin to pick who the sender is.
inviter_email = fields['inviter_email']
assert inviter_email == ADMIN_INVITER_EMAIL
gift_type = fields['gift_type']
if gift_type == self.NO_GIFT_TYPE:
gift_type = None
gift_annotation = None
elif gift_type not in gift_types.ALL:
return http.bad_request([('content-type', 'text/html')], "Unknown gift_type %s" % gift_type)
else:
# Gift annotation is required if a gift type is selected.
gift_annotation = request.POST['gift_annotation']
if len(gift_annotation) < 5:
return http.bad_request([('content-type', 'text/html')], "Please use an annotation longer than 5 characters.")
invitation_message = fields['invitation_message']
if len(invitation_message) < 5:
return http.bad_request([('content-type', 'text/html')], "Please use an invitation message longer than 5 characters.")
invitation_campaign_name = fields['invitation_campaign_name'].strip()
if len(invitation_campaign_name) == 0:
invitation_campaign_name = None
# The lists of emails and optional first and last names are in a CSV format
recipient_emails_and_names = fields['recipient_emails_and_names']
recipient_emails_and_names_list = recipient_emails_and_names.strip().split('\n')
invitations_params = []
# NOTE: This helper function strips whitespace from each CSV value.
for entry in unicode_csv_reader(recipient_emails_and_names_list, fieldnames=['email','first_name','last_name'], strip=True):
if len(entry) != 3:
return http.bad_request([('content-type', 'text/html')], "Bad invite entry [%s]." % entry)
if entry['email'] is None:
return http.bad_request([('content-type', 'text/html')], "Missing email entry in invite row.")
if entry['first_name'] is None: entry['first_name'] = ""
if entry['last_name'] is None: entry['last_name'] = ""
params, error = invite_module.validate_invite_params(user, entry['email'], entry['first_name'], entry['last_name'],
invitation_message,
attaching_gift=gift_type is not None,
admin_invite=True)
if not params:
return http.bad_request([('content-type', 'text/html')], "Bad invite entry [%s][%s]." % (entry, error))
invitations_params.append(params)
if len(invitations_params) == 0:
return http.bad_request([('content-type', 'text/html')], "Unable to parse any invite entries.")
# Send all of the invitations now that they are parsed.
for invite_params in invitations_params:
admin.send_admin_invite_with_gift_type(request, user, invite_params, gift_type, gift_annotation,
campaign_name=invitation_campaign_name)
return http.see_other(urls.admin_invites_system())
else:
return http.bad_request([('content-type', 'text/html')], "Bad parameters.")
def format_deferred_run_at(deferred_row):
""" Show a user friendly version of the deferred row's run_at field, including showing if it is overdue. """
till_run_at = utils.seconds_between_datetimes(gametime.now(), deferred_row.run_at)
if till_run_at < 0:
return "OVERDUE: " + utils.format_time_approx(abs(till_run_at))
else:
return utils.format_time_approx(till_run_at)
class AdminUsersNode(resource.Resource):
@resource.GET()
@templating.page('admin/users.html')
def get(self, request):
limit = 500
campaign_name = request.GET.get('campaign_name', None)
if campaign_name is not None:
recent_users = admin.recent_users(request, limit=limit, campaign_name=campaign_name)
else:
recent_users = admin.recent_users(request, limit=limit)
all_users_count = admin.all_users_count(request)
return {
'recent_users': recent_users,
'all_users_count': all_users_count,
'show_user_full_name': True,
'limit': limit,
'format_email': format_email,
'format_utc': format_utc,
'format_field': format_field,
'format_user_label': format_user_label
}
class AdminTargetsNode(resource.Resource):
@resource.GET()
@templating.page('admin/targets.html')
def get(self, request):
limit = 200
oldest_recent_target_days = OLDEST_RECENT_TARGET_DAYS
recent_targets = admin.recent_targets(request, limit=limit, oldest_recent_target_days=oldest_recent_target_days)
return {
'recent_targets': recent_targets,
'oldest_recent_target_days': oldest_recent_target_days,
'limit': limit,
'format_email': format_email
}
class AdminTransactionsNode(resource.Resource):
@resource.GET()
@templating.page('admin/transactions.html')
def get(self, request):
limit = 300
recent_transactions = admin.recent_transactions(request, limit=limit)
total_money = sum((t.money for t in recent_transactions), money.from_amount_and_currency(0, 'USD'))
total_money_display = money.format_money(total_money)
all_transactions_money = admin.all_transactions_amount(request)
all_transactions_money_display = money.format_money(all_transactions_money)
return {
'recent_transactions': recent_transactions,
'total_money_display': total_money_display,
'all_transactions_money_display': all_transactions_money_display,
'limit': limit,
'format_utc': format_utc
}
class AdminDeferredsNode(resource.Resource):
@resource.GET()
@templating.page('admin/deferreds.html')
def get(self, request):
pending_deferreds = admin.pending_deferreds(request)
return {
'pending_deferreds': pending_deferreds,
'format_deferred_run_at': format_deferred_run_at,
'format_email': format_email,
'format_utc': format_utc
}
class AdminEmailQueueNode(resource.Resource):
@resource.GET()
@templating.page('admin/email_queue.html')
def get(self, request):
queued_emails = admin.queued_emails(request)
return {
'queued_emails': queued_emails,
'format_utc': format_utc
}
class AdminStatsNode(resource.Resource):
DISPLAY_CHARTS = stats.STATS_PAGE_CHARTS
TEMPLATE = 'admin/stats.html'
@resource.GET()
def get(self, request):
use_debug_data = request.GET.get('debug', None) != None
template_data = {
'all_chart_names': self.DISPLAY_CHARTS,
'url_admin_api_chart_data': urls.admin_api_chart_data(),
'use_debug_data': use_debug_data
}
params = {
'all_chart_names': self.DISPLAY_CHARTS,
'template_data_s': xjson.dumps(template_data)
}
content = templating.render(request, self.TEMPLATE, params)
return http.ok([('content-type', 'text/html')], content)
class AdminStatsAttritionNode(AdminStatsNode):
DISPLAY_CHARTS = stats.ATTRITION_PAGE_CHARTS
TEMPLATE = 'admin/attrition.html'
class AdminQueryNode(resource.Resource):
@resource.GET()
@templating.page('admin/query.html')
def get(self, request):
return {}
@resource.POST()
@templating.page('admin/query_result.html')
def run_query(self, request):
"""
Build a custom SQL query based on the inputs.
An example query of this form:
select * from (select users.email, (select count(*) from messages where messages.user_id = users.user_id
and messages.msg_type="MSG_LASTTHINGa") as msg0, (select count(*) from messages
where messages.user_id = users.user_id and messages.msg_type="MSG_LASTTHINGb") as msg1 from users) as tbl_1
where msg0=0 and msg1=1;
"""
ok, fields = forms.fetch(request, ['msg0_id', 'msg0_sent', 'msg0_locked', 'msg1_id',
'msg1_sent', 'msg1_locked', 'mis0_id', 'mis0_status', 'mis1_id', 'mis1_status'],
blanks=['msg0_id', 'msg0_sent', 'msg0_locked', 'msg1_id', 'msg1_sent', 'msg1_locked',
'mis0_id', 'mis0_status', 'mis1_id', 'mis1_status'])
if not ok:
return {'error': 'Bad parameters.'}
# For each message with a non-empty ID, create a subquery that returns only the count of that message type
# with the given parameters.
# e.g., (select count(*) from messages where messages.user_id=users.user_id and messages.msg_type="MSG_OBELISK01a") as msg0
# We'll simultaneously build a criteria_list that will be used to check the outputs of each subquery.
# e.g., 'msg0=1 and msg1=1'
subquery_list = ''
criteria_list = ''
for i in range(2):
msg_type = fields['msg%d_id' % i]
if msg_type != '':
if subquery_list != '':
subquery_list += ',\n'
criteria_list += ' and '
subquery_list += '(select count(*) from messages where messages.user_id=users.user_id and messages.msg_type="%s"' % (msg_type)
if fields['msg%d_locked' % i] == 'TRUE':
subquery_list += ' and messages.locked=1'
elif fields['msg%d_locked' % i] == 'FALSE':
subquery_list += ' and messages.locked=0'
if fields['msg%d_sent' % i] == 'TRUE':
criteria_list += 'msg%d=1' % (i)
else:
criteria_list += 'msg%d=0' % (i)
subquery_list += ') as msg%d' % (i)
# Append subqueries and subquery criteria for missions.
for i in range(2):
mis_def = fields['mis%d_id' % i]
if mis_def != '':
if subquery_list != '':
subquery_list += ',\n'
criteria_list += ' and '
subquery_list += '(select count(*) from missions where missions.user_id=users.user_id and missions.mission_definition="%s"' % (mis_def)
if fields['mis%d_status' % i] == 'STARTED':
subquery_list += ' and missions.done=0'
criteria_list += 'mis%d=1' % (i)
elif fields['mis%d_status' % i] == 'DONE':
subquery_list += ' and missions.done=1'
criteria_list += 'mis%d=1' % (i)
else:
criteria_list += 'mis%d=0' % (i)
subquery_list += ') as mis%d' % (i)
if subquery_list == '':
return {'error': 'All inputs were left blank.'}
# Put it all together into the final query.
query = 'select * from (select users.email,\n%s\nfrom users) as tbl_1 where %s' % (subquery_list, criteria_list)
return { 'sql_query': query }
## The admin REST API.
class APINode(resource.Resource):
@resource.child()
def chart_data(self, request, segments):
return ChartData()
@resource.child()
def user_increment_invites_left(self, request, segments):
return IncrementInvitesLeft()
@resource.child()
def user_edit_campaign_name(self, request, segments):
return EditCampaignName()
@resource.child()
def reprocess_target(self, request, segments):
return ReprocessTarget()
@resource.child()
def highlight_add(self, request, segments):
return HighlightAdd()
@resource.child()
def highlight_remove(self, request, segments):
return HighlightRemove()
class ChartData(resource.Resource):
@resource.POST(accept=xjson.mime_type)
def post(self, request):
body, error = decode_json(request, required={'chart_name': unicode, 'use_debug_data': bool})
if body is None: return error
chart_name = body['chart_name']
use_debug_data = body['use_debug_data']
if chart_name not in stats.ALL_CHARTS:
return json_bad_request(utils.tr('This is an unknown chart name: ' + chart_name))
stat_func = getattr(stats, chart_name + '_stats')
if stat_func is None: raise Exception("No stat function for chart named " + chart_name)
# Call through to the stats module and load the chart data and options for this chart.
chart_type, gtable, options = stat_func(request, use_debug_data=use_debug_data)
return json_success({
'chart_name': chart_name,
'chart_type': chart_type,
'chart_options': options,
# The Google Chart API has a custom JSON serializer so use that serializer, then convert back
# to a Python object so that the whole chart response can be serialized together.
'chart_data': xjson.loads(gtable.ToJSon())
})
class IncrementInvitesLeft(resource.Resource):
@resource.POST(accept=xjson.mime_type)
def post(self, request):
body, error = decode_json(request, required={'user_id': unicode})
if body is None: return error
user_id = get_uuid(body['user_id'])
user = user_module.user_from_context(request, user_id, check_exists=True)
if user is None:
return json_bad_request(utils.tr('This user does not exist.'))
user.increment_invites_left()
return json_success({'invites_left': user.invites_left})
class EditCampaignName(resource.Resource):
@resource.POST(accept=xjson.mime_type)
def post(self, request):
body, error = decode_json(request, required={'user_id': unicode, 'campaign_name': unicode})
if body is None: return error
user_id = get_uuid(body['user_id'])
campaign_name = body['campaign_name'].strip()
if len(campaign_name) > 1024:
return json_bad_request(utils.tr('Campaign name too long.'))
user = user_module.user_from_context(request, user_id, check_exists=True)
if user is None:
return json_bad_request(utils.tr('This user does not exist.'))
if len(campaign_name) > 0:
user.add_metadata("MET_CAMPAIGN_NAME", campaign_name)
else:
user.clear_metadata("MET_CAMPAIGN_NAME")
return json_success({'campaign_name': campaign_name})
class ReprocessTarget(resource.Resource):
@resource.POST(accept=xjson.mime_type)
def post(self, request):
body, error = decode_json(request, required={'target_id': unicode})
if body is None: return error
target_id = get_uuid(body['target_id'])
user = user_module.user_from_target_id(request, target_id)
if user is None:
return json_bad_request(utils.tr('This target does not exist.'))
target = user.rovers.find_target_by_id(target_id)
# Only picture and not-neutered targets can be marked highlighted.
if not target.is_picture():
return json_bad_request(utils.tr('Only picture targets can be reprocessed.'))
if target.is_neutered():
return json_bad_request(utils.tr('Neutered targets can not be reprocessed.'))
target.mark_for_rerender()
return json_success()
class HighlightAdd(resource.Resource):
@resource.POST(accept=xjson.mime_type)
def post(self, request):
return process_highlight_request(request, highlights.add_target_highlight)
class HighlightRemove(resource.Resource):
@resource.POST(accept=xjson.mime_type)
def post(self, request):
return process_highlight_request(request, highlights.remove_target_highlight)
def process_highlight_request(request, highlight_action):
body, error = decode_json(request, required={'target_id': unicode})
if body is None: return error
target_id = get_uuid(body['target_id'])
user = user_module.user_from_target_id(request, target_id)
if user is None:
return json_bad_request(utils.tr('This target does not exist.'))
target = user.rovers.find_target_by_id(target_id)
# Only picture, non-classified and not-neutered targets can be marked highlighted.
if not target.is_picture():
return json_bad_request(utils.tr('Only picture targets can be highlighted.'))
if target.is_classified():
return json_bad_request(utils.tr('Classified targets can not be highlighted.'))
if target.is_neutered():
return json_bad_request(utils.tr('Neutered targets can not be highlighted.'))
highlight_action(request, target)
return json_success()
def format_utc(utc_dt):
pst = utils.utc_date_in_pst(utc_dt)
return pst
def format_utc_approx(utc_dt):
return utils.format_time_approx(utils.seconds_between_datetimes(utc_dt, gametime.now()))
MAX_LENGTH = 30
def format_user_label(user, max_length=MAX_LENGTH):
str = ''
if user.auth != 'PASS':
str += user.auth + ': '
if user.email:
str += user.email
else:
str += user.first_name + ' ' + user.last_name
return _truncate_string(str)
def format_email(email, max_length=MAX_LENGTH):
"""
Keep email fields to a max width in the admin UI
"""
if email:
return _truncate_string(email, max_length)
return "None"
def format_field(text, max_length=MAX_LENGTH):
"""
Keep description/name fields to a max width in the admin UI
"""
if text is None: return None
return _truncate_string(text, max_length)
def _truncate_string(text, max_length=MAX_LENGTH):
"""
Keep string fields to a max width in the admin UI
>>> _truncate_string('<EMAIL>')
'<EMAIL>'
>>> _truncate_string('<EMAIL>')
'<EMAIL>'
>>> len(_truncate_string('<EMAIL>'))
30
>>> _truncate_string('<EMAIL>')
'<EMAIL>...'
>>> len(_truncate_string('<EMAIL>'))
30
"""
if len(text) > max_length:
return text[:max_length - 3] + '...'
else:
return text
# Modified from http://docs.python.org/2/library/csv.html
def unicode_csv_reader(unicode_csv_data, strip=False, dialect=csv.excel, **kwargs):
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.DictReader(_utf_8_encoder(unicode_csv_data),
dialect=dialect, **kwargs)
formatter = _format_val(strip=strip)
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
yield dict((k, formatter(v)) for k,v in row.iteritems())
def _format_val(strip=False):
def _f(v):
if v is None: return None
if not isinstance(v, basestring): return v
if strip: v = v.strip()
return unicode(v, 'utf-8')
return _f
def _utf_8_encoder(unicode_csv_data):
for line in unicode_csv_data:
yield line.encode('utf-8')
| StarcoderdataPython |
1650875 | import argparse
import sys
import os
import ggutils.s3_access as s3_access
import smalltrain as st
try:
# For the case smalltrain is installed as Python library
print('try to load smalltrain modules from Python library')
from smalltrain.model.nn_model import NNModel
print('smalltrain modules are ready to be loaded from Python library')
except ModuleNotFoundError:
if os.environ.get('SMALLTRAIN_HOME'):
# For the case the environmental value SMALLTRAIN_HOME is exported
_smalltrain_home_path = os.environ.get('SMALLTRAIN_HOME')
_smalltrain_home_path = os.path.join(_smalltrain_home_path, 'src')
else:
# Try to load smalltrain modules from current directory
_smalltrain_home_path = './'
print('try to load smalltrain modules from the path: {}'.format(_smalltrain_home_path))
sys.path.append(_smalltrain_home_path)
from smalltrain.model.nn_model import NNModel
print('smalltrain modules are ready to be loaded from the path: {}'.format(_smalltrain_home_path))
def get_model_list():
from smalltrain.model.one_dim_cnn_model import OneDimCNNModel
from smalltrain.model.two_dim_cnn_model import TwoDimCNNModel
from smalltrain.model.two_dim_cnn_model_v2 import TwoDimCNNModelV2
model_list = [OneDimCNNModel(), TwoDimCNNModel(), TwoDimCNNModelV2()]
model_id_list = [model.MODEL_ID for model in model_list]
return model_list, model_id_list
MODEL_LIST, MODEL_ID_LIST = get_model_list()
def construct_model(log_dir_path, model_id, hparams, train_data=None, debug_mode=True):
if model_id in MODEL_ID_LIST:
for _m in MODEL_LIST:
if _m.MODEL_ID == model_id:
model = _m.construct_and_prepare_model(log_dir_path=log_dir_path, model_id=model_id, hparams=hparams, train_data=train_data, debug_mode=debug_mode)
return model
raise TypeError('Invalid model_id:{}'.format(model_id))
# MODEL_ID_4NN = '4NN_20180808' # 4 nn model 2019/09/10
# MODEL_ID_DNN = 'DNN' # 4 nn model 2019/09/10
# MODEL_ID_1D_CNN = '1D_CNN'
# MODEL_ID_CC = 'CC' # Carbon Copy
# MODEL_ID = MODEL_ID_4NN
class Operation:
"""Operation class as hyper parameter of train or prediction operation
Arguments:
params: A dictionary that maps hyper parameter keys and values
debug_mode: Boolean, if `True` then running with debug mode.
"""
def __init__(self, hparams=None, setting_file_path=None):
self._hparam_ins = st.Hyperparameters(hparams, setting_file_path)
self.hparams_dict = self._hparam_ins.__dict__
print('init hparams_dict: {}'.format(self.hparams_dict))
def get_hparams_ins(self):
return self._hparam_ins
def update_params_from_file(self, setting_file_path):
self._hparam_ins.update_hyper_param_from_file(setting_file_path)
self.hparams_dict = self._hparam_ins.__dict__
def update_hyper_param_from_json(self, json_obj):
self._hparam_ins.update_hyper_param_from_json(json_obj)
self.hparams_dict = self._hparam_ins.__dict__
def read_hyper_param_from_file(self, setting_file_path):
'''
This method is for the compatibility for the codes:
:param setting_file_path:
:return:
'''
self.update_params_from_file(setting_file_path=setting_file_path)
self.hparams_dict = self._hparam_ins.__dict__
return self.hparams_dict
def prepare_dirs(self):
'''
Prepare directories used in operation
:return:
'''
log_dir_path = self.hparams_dict['save_root_dir'] + '/logs/' + self.hparams_dict['train_id']
log_dir_path = log_dir_path.replace('//', '/')
os.makedirs(log_dir_path, exist_ok=True)
self.log_dir_path = log_dir_path
# Set value to hyperparameter
self._hparam_ins.set('log_dir_path', log_dir_path)
save_dir_path = self.hparams_dict['save_root_dir'] + '/model/' + self.hparams_dict['train_id'] + '/'
save_dir_path = save_dir_path.replace('//', '/')
os.makedirs(save_dir_path, exist_ok=True)
self.save_dir_path = save_dir_path
self._hparam_ins.set('save_dir_path', save_dir_path)
save_file_name = 'model-{}_lr-{}_bs-{}.ckpt'.format(self.hparams_dict['model_prefix'], self.hparams_dict['learning_rate'],
self.hparams_dict['batch_size'])
save_file_path = save_dir_path + '/' + save_file_name
save_file_path = save_file_path.replace('//', '/')
self.save_file_path = save_file_path
self._hparam_ins.set('save_file_path', save_file_path)
report_dir_path = self.hparams_dict['save_root_dir'] + '/report/' + self.hparams_dict['train_id'] + '/'
report_dir_path = report_dir_path.replace('//', '/')
os.makedirs(report_dir_path, exist_ok=True)
self.report_dir_path = report_dir_path
self._hparam_ins.set('report_dir_path', report_dir_path)
operation_dir_path = os.path.join(self.hparams_dict['save_root_dir'], 'operation')
operation_dir_path = os.path.join(operation_dir_path, self.hparams_dict['train_id'])
operation_file_path = os.path.join(operation_dir_path, self.hparams_dict['train_id'] + '.json')
os.makedirs(operation_dir_path, exist_ok=True)
# self.operation_dir_path = operation_dir_path
# self.operation_file_path = operation_file_path
if self.hparams_dict['cloud_root'] is not None:
print('Upload the hparams to cloud: {}'.format(self.hparams_dict['cloud_root']))
upload_to_cloud(operation_file_path, self.hparams_dict['cloud_root'], self.hparams_dict['save_root_dir'])
print('[Operation]DONE prepare_dirs')
def construct_and_prepare_model(self, hparams=None, train_data=None):
hparams = hparams or self.hparams_dict
model_id = hparams['model_id']
print('construct_and_prepare_model with model_id: {}'.format(model_id))
if model_id in MODEL_ID_LIST:
for _m in MODEL_LIST:
if _m.MODEL_ID == model_id:
model = _m.construct_and_prepare_model(log_dir_path=hparams['log_dir_path'], model_id=model_id,
hparams=hparams, train_data=train_data,
debug_mode=hparams['debug_mode'])
self.model = model
return model
raise TypeError('Invalid model_id:{}'.format(model_id))
def train(self, hparams=None):
hparams = hparams or self.hparams_dict
if self.model is None:
self.construct_and_prepare_model(hparams=hparams)
self.model.train(iter_to=hparams['iter_to'], learning_rate=hparams['learning_rate'],
batch_size=hparams['batch_size'], dropout_ratio=hparams['dropout_ratio'],
l1_norm_reg_ratio=hparams['l1_norm_reg_ratio'], save_file_path=hparams['save_file_path'],
report_dir_path=hparams['report_dir_path'])
print('DONE train data ')
print('====================')
def auto(self, hparams=None, setting_file_path=None):
print('====================')
print('TODO auto operation with hyper parameter: ')
print(self.hparams_dict)
print('====================')
self.prepare_dirs()
print('DONE prepare_dirs')
print('====================')
print('TODO construct_and_prepare_model')
self.construct_and_prepare_model()
print('DONE construct_and_prepare_model')
print('====================')
if (not self.hparams_dict.get('prediction_mode')):
print('TODO train( or test only)')
self.train()
print('DONE train( or test only)')
print('====================')
print('DONE auto operation')
print('====================')
def main(exec_param):
print(exec_param)
operation = Operation(setting_file_path=exec_param['setting_file_path'])
operation.auto()
def _main(exec_param):
print(exec_param)
operation = Operation()
if 'setting_file_path' in exec_param.keys() and exec_param['setting_file_path'] is not None:
operation.update_params_from_file(exec_param['setting_file_path'])
elif 'json_param' in exec_param.keys() and exec_param['json_param'] is not None:
operation.update_hyper_param_from_json(exec_param['json_param'])
exec_param = operation.hparams_dict
print('updated exec_param:{}'.format(exec_param))
# prepare directories
operation.prepare_dirs()
if 'scrpit_test' in exec_param.keys() and exec_param['scrpit_test'] == True:
test_static_methods()
model = operation.construct_and_prepare_model()
model.train(iter_to=1000, learning_rate=exec_param['learning_rate'], batch_size=exec_param['batch_size'], dropout_ratio=exec_param['dropout_ratio'], save_file_path=exec_param['save_file_path'])
exit()
model = None
print('====================')
print('TODO train data ')
if model is None:
model = operation.construct_and_prepare_model()
operation.train()
print('DONE train data ')
print('====================')
from pathlib import Path
def download_to_local(path, work_dir_path='/var/tmp/tsp/'):
ret_path = None
# check path is local
if os.path.exists(path): return path
os.makedirs(work_dir_path, exist_ok=True)
# check if s3 path
s3_bucket_name, s3_key = get_bucket_name(path)
if s3_bucket_name is not None:
ret_path = os.path.join(work_dir_path, s3_key)
os.makedirs(Path(ret_path).parent, exist_ok=True)
s3_access.download(s3_bucket_name=s3_bucket_name, s3_key=s3_key, local_dir=work_dir_path, file_path=s3_key)
return ret_path
import multiprocessing
def upload_to_cloud(local_path, cloud_root, local_root, with_multiprocessing=True):
if local_path is None:
print('No file to upload_to_cloud:local_path:{}'.format(local_path))
return
s3_bucket_name, s3_root_key = get_bucket_name(cloud_root)
if s3_bucket_name is None:
raise ValueError('Invalid cloud_root:{}'.format(cloud_root))
if len(local_path.split(local_root)[0]) > 0:
raise ValueError('Invalid local_path:{} or local_root:{}'.format(local_path, local_root))
local_path_from_local_root = local_path.split(local_root)[1]
# print('local_path_from_local_root:{}'.format(local_path_from_local_root))
s3_key = os.path.join(s3_root_key, local_path_from_local_root)
local_dir = Path(local_path).parent
file_path = Path(local_path).name
if with_multiprocessing:
# p = multiprocessing.Process(target=s3_access.upload, args=(s3_bucket_name, s3_key, local_dir, file_path,))
# p.start()
send_to_s3_uploader(s3_bucket_name=s3_bucket_name, s3_key=s3_key, local_dir=local_dir, file_path=file_path)
else:
s3_access.upload(s3_bucket_name=s3_bucket_name, s3_key=s3_key, local_dir=local_dir, file_path=file_path)
def send_to_s3_uploader(s3_bucket_name, s3_key, local_dir, file_path, queue_file_path='/var/tmp/tsp/queue.txt'):
mode = 'a' if os.path.isfile(queue_file_path) else 'w'
f = open(queue_file_path, mode)
f.write('{}, {}, {}, {}\n'.format(s3_bucket_name, s3_key, local_dir, file_path))
f.close()
def is_s3_path(s3_path):
s3_bucket_name, s3_key = get_bucket_name(s3_path)
return (s3_bucket_name is not None)
def get_bucket_name(s3_path):
if s3_path is None: return None, None
try:
_split = s3_path.split('s3://')
if len(_split[0]) > 0: return None, None
s3_bucket_name = _split[1].split('/')[0]
s3_key = _split[1][1 + len(s3_bucket_name):]
return s3_bucket_name, s3_key
except IndexError as e:
print('Can not read s3_bucket_name or s3_key from s3_path:{}'.format(s3_path))
return None, None
def test_download_to_local():
path = 's3://your-bucket/tsp/sample/sample.json'
download_path = download_to_local(path)
has_downloaded = os.path.isfile(download_path)
print('[test_download_to_local]from:{}, to:{} has_downloaded:{}'.format(path, download_path, has_downloaded))
assert has_downloaded
def test_upload_to_cloud():
# case 1
local_path = '/var/tsp/sample/test/sample_upload.txt'
cloud_root = 's3://your-bucket/tsp/sample/test/'
local_root = '/var/tsp/sample/test/'
upload_to_cloud(local_path, cloud_root, local_root)
def test_static_methods():
test_upload_to_cloud()
exit()
test_download_to_local()
print('Done test_static_methods')
def main_with_train_id(train_id):
print('TODO')
import json
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='tsp')
parser.add_argument('--model_prefix', '-mp', type=str, default='nn',
help='The prefix string representing the model')
parser.add_argument('--save_root_dir', '-rd', type=str, default='/var/tensorflow/tsp/',
help='Root dir for Tensorflow FileWriter')
parser.add_argument('--init_model_path', '-imp', type=str, default=None,
help='Model path to restore Tensorflow session')
parser.add_argument('--restore_var_name_list', '-rvnl', type=list, default=None,
help='restore_var_name_list')
parser.add_argument('--untrainable_var_name_list', '-utvnl', type=list, default=None,
help='untrainable_var_name_list')
parser.add_argument('--learning_rate', '-ll', type=float, default=1e-4,
help='learning_rate of optsimizer')
# About batch size
parser.add_argument('--batch_size', '-bs', type=int, default=128,
help='batch_size')
# About minibatch operation
parser.add_argument('--evaluate_in_minibatch', '-enmb', type=bool, default=False,
help = 'Bool, Whether to evaluate in minibatch or not (Default: False)')
parser.add_argument('--iter_to', '-itr', type=int, default=10000,
help='iter_to')
parser.add_argument('--dropout_ratio', '-dr', type=float, default=0.5,
help='Dropout ratio')
parser.add_argument('--train_id', '-tid', type=str, default='TEST_YYYYMMDD-HHmmSS',
help='id attached to model and log dir to identify train operation ')
parser.add_argument('--model_id', '-mid', type=str, default=st.Hyperparameters.DEFAULT_DICT['model_id'],
help='id attached to model to identify model constructure ')
parser.add_argument('--model_type', '-mty', type=str, default='REGRESSION',
help='model_type ')
parser.add_argument('--prediction_mode', '-pmd', type=bool, default=None,
help='Whether prediction mode or not')
parser.add_argument('--debug_mode', '-dmd', type=bool, default=None,
help='Whether debug mode or not')
parser.add_argument('--monochrome_mode', '-mmd', type=bool, default=False,
help='Whether monochrome mode or not')
parser.add_argument('--optimizer', '-otm', type=str, default=None,
help='String, optimizer')
parser.add_argument('--input_ts_size', '-its', type=int, default=12,
help='input_ts_size')
parser.add_argument('--input_ts_width', '-itw', type=int, default=None,
help='input_img_width')
parser.add_argument('--input_img_width', '-iiw', type=int, default=32,
help='input_img_width')
parser.add_argument('--input_output_ts_offset', '-iotso', type=int, default=1,
help='input_output_ts_offset')
parser.add_argument('--input_output_ts_offset_range', '-iotsor', type=list, default=None,
help='input_output_ts_offset_range')
parser.add_argument('--input_output_ts_offset_list', '-iotsol', type=list, default=None,
help='input_output_ts_offset_list')
parser.add_argument('--has_to_complement_before', '-htcb', type=bool, default=True,
help='Whether complement the value before ts starts or not(Default:True)')
parser.add_argument('--complement_ts', '-cpts', type=str, default=None,
help='String, Values to complement the missing time series data (Default:None)')
parser.add_argument('--n_layer', '-nly', type=int, default=5,
help='n_layer')
parser.add_argument('--num_add_fc_layers', '-nafl', type=int, default=0,
help='num_add_fc_layers')
parser.add_argument('--fc_node_size_list', '-fnsl', type=list, default=None,
help='fc_node_size_list')
parser.add_argument('--fc_weight_stddev_list', '-fwsl', type=list, default=None,
help='List of integer, the list of stddevs of weight variables in each fc layers. Default: all 0.1.')
parser.add_argument('--fc_bias_value_list', '-fbvl', type=list, default=None,
help='List of integer, the list of initial values of bias variables in each fc layers. Default: all 0.1')
# about sub model
parser.add_argument('--sub_model_url', '-smu', type=str, default=None,
help='String, The sub model\'s URL (Default: None, Do not use sub model)')
parser.add_argument('--sub_model_allocation', '-sma', type=float, default=0.0,
help='Float, the allocation of value which flows into the sub model (Default: 0.0, no allocation into the sub model)')
parser.add_argument('--sub_model_input_point', '-smip', type=str, default=None,
help='String, The sub model input point (Default: None, Do not use sub model)')
parser.add_argument('--sub_model_output_point', '-smop', type=str, default=None,
help='String, The sub model output point (Default: None, Do not use sub model)')
# about ResNet
parser.add_argument('--has_res_net', '-hrs', type=bool, default=False,
help='Whether the model has ResNet (the layers in the model has short cut) or not.')
parser.add_argument('--num_cnn_layers_in_res_block', '-nclrb', type=int, default=2,
help='Integer, the number of CNN layers in one Residual Block (Default: 2)')
parser.add_argument('--ts_start', '-tss', type=int, default=None,
help='ts_start')
parser.add_argument('--ts_end', '-tse', type=int, default=None,
help='ts_end')
parser.add_argument('--test_ts_index_from', '-tetsif', type=int, default=None,
help='test_ts_index_from')
parser.add_argument('--test_ts_index_to', '-tetsit', type=int, default=None,
help='test_ts_index_to')
parser.add_argument('--max_data_per_ts', '-mdpts', type=int, default=None,
help='max_data_per_ts')
parser.add_argument('--filter_width', '-flw', type=int, default=5,
help='filter_width')
parser.add_argument('--cnn_channel_size', '-ccs', type=int, default=4,
help='cnn_channel_size')
parser.add_argument('--cnn_channel_size_list', '-ccsl', type=list, default=None,
help='cnn_channel_size_list')
parser.add_argument('--pool_size_list', '-psl', type=list, default=None,
help='pool_size_list')
parser.add_argument('--act_func_list', '-actfl', type=list, default=None,
help='act_func_list')
parser.add_argument('--cnn_weight_stddev_list', '-cwsl', type=list, default=None,
help='List of integer, the list of stddevs of weight variables in each cnn layers. Default: all 0.1.')
parser.add_argument('--cnn_bias_value_list', '-cbvl', type=list, default=None,
help='List of integer, the list of initial values of bias variables in each cnn layers. Default: all 0.1.')
# about data augmentation
parser.add_argument('--flip_randomly_left_right', '-frlr', type=bool, default=True,
help='Boolean, of integer, the list of initial values of bias variables in each cnn layers. Default: all 0.1.')
parser.add_argument('--crop_randomly', '-crr', type=bool, default=True,
help='Boolean, if true, the processed images will be randomly cropped from resized images, the size to resize is set with size_random_crop_from (Default: true).')
parser.add_argument('--size_random_crop_from', '-srcf', type=int, default=None,
help='Integer, the size to which the images will be resized and from whiqch the processed images will be randomly cropped (Default: None, set input_img_width * 1.25 if crop_randomly is true)')
parser.add_argument('--angle_rotate_randomly', '-rtrnd', type=float, default=None,
help='Integer, The Angle by which the image be rotated, randomly choosen between -rt <= x <= +rt (Default: 0)')
parser.add_argument('--rounding_angle', '-rndang', type=int, default=90,
help='Integer, The Angle should be rounded to a multiple of rounding_angle (Default: 90)')
parser.add_argument('--resize_to_crop_with', '-retcw', type=str, default='scaling_or_padding',
help='String, The image needs to be scaling_or_padding or just padding')
# about L1 term loss
parser.add_argument('--add_l1_norm_reg', '-al1nr', type=bool, default=False,
help='Whether add L1 term or not.')
parser.add_argument('--l1_norm_reg_ratio', '-l1nrr', type=float, default=0.01,
help='L1 term ratio (* L1 term)')
# about preactivation regularization
parser.add_argument('--add_preactivation_regularization', '-aprreg', type=bool, default=False,
help='Whether add_preactivation_regularization or not.')
parser.add_argument('--preactivation_regularization_value_ratio', '-prrgvr', type=float, default=0.0,
help='preactivation_regularization_value_ratio')
parser.add_argument('--preactivation_maxout_list', '-prmol', type=list, default=None,
help='preactivation_maxout_list')
# about min-max normalization
parser.add_argument('--has_minmax_norm', '-hmmn', type=bool, default=True,
help='has_minmax_norm')
parser.add_argument('--input_min', '-imin', type=float, default=None,
help='Float, min value of input data. Default: None(will be selected from input test/train data)')
parser.add_argument('--input_max', '-imax', type=float, default=None,
help='Float, max value of input data. Default: None(will be selected from input test/train data)')
# about batch normalization
parser.add_argument('--has_batch_norm', '-hbn', type=bool, default=True,
help='has_batch_norm')
parser.add_argument('--bn_decay', '-bnd', type=float, default=NNModel.DEFAULT_BN_DECAY,
help='batch normalization param decay')
parser.add_argument('--bn_eps', '-bne', type=float, default=NNModel.DEFAULT_BN_ESP,
help='batch normalization param eps')
parser.add_argument('--data_dir_path', '-ddp', type=str, default=None,
help='data_dir_path')
parser.add_argument('--data_set_def_path', '-dsdp', type=str, default=None,
help='data_set_def_path')
parser.add_argument('--input_data_names', '-idn', type=str, default=None,
help='input_data_names')
parser.add_argument('--input_data_names_to_be_extended', '-idnex', type=str, default=None,
help='input_data_names_to_be_extended')
parser.add_argument('--output_data_names', '-odn', type=str, default=None,
help='output_data_names')
parser.add_argument('--output_classes', '-ocs', type=int, default=None,
help='Integer, the number of output classes (output class size) used in cassification operations. Default: None(will be set from data set or initial model)')
# col name that has time series data
parser.add_argument('--dt_col_name', '-tcn', type=str, default=None,
help='ts_col_name')
parser.add_argument('--dt_col_format', '-tcf', type=str, default='YYYY-mm-DD',
help='ts_col_format')
parser.add_argument('--dt_unit', '-tsu', type=str, default='day',
help='ts_unit')
# datetime col
parser.add_argument('--add_dt_col_name_list', '-adcnl', type=list, default=None,
help='add_dt_col_name_list')
parser.add_argument('--annotation_col_names', '-acn', type=list, default=None,
help='annotation_col_names')
# multi resolution channels
parser.add_argument('--multi_resolution_channels', '-mrc', type=int, default=0,
help='multi resolution channels(default:not add)')
parser.add_argument('--decrease_resolution_ratio', '-rdr', type=int, default=NNModel.DEFAULT_DECREASE_RESOLUTION_RATIO,
help='ratio to decrease to multi resolution channels(default:decrease by {})'.format(NNModel.DEFAULT_DECREASE_RESOLUTION_RATIO))
parser.add_argument('--decrease_resolution_ratio_list', '-rdrl', type=list, default=None,
help='list of ratio to decrease to multi resolution channels. If this set, decrease_resolution_ratio setting will be ignored.')
parser.add_argument('--target_group', '-tgr', type=str, default=None,
help='target_group')
parser.add_argument('--test_only_mode', '-tomd', type=bool, default=None,
help='Whether calc output using test data only(without train) or not')
parser.add_argument('--mask_rate', '-mskr', type=float, default=None,
help='mask_rate')
parser.add_argument('--col_index_to_mask', '-citm', type=list, default=None,
help='Column index to mask. If this is None also maks_rate > 0, then none of columns will be masked.')
parser.add_argument('--skip_invalid_data', '-sivld', type=bool, default=None,
help='skip_invalid_data')
parser.add_argument('--valid_data_range', '-vldr', type=list, default=None,
help='valid_data_range')
parser.add_argument('--plot_x_label', '-pxl', type=str, default=None,
help='plot_x_label')
parser.add_argument('--plot_y_label', '-pyl', type=str, default=None,
help='plot_y_label')
parser.add_argument('--plot_x_data_name_in_annotation', '-plxdnia', type=str, default=None,
help='plot_x_data_name_in_annotation')
parser.add_argument('--plot_group_data_name_in_annotation', '-plgdnia', type=str, default=None,
help='plot_group_data_name_in_annotation')
parser.add_argument('--plot_x_range', '-plxr', type=list, default=None,
help='plot_x_range')
parser.add_argument('--plot_y_range', '-plyr', type=list, default=None,
help='plot_y_range')
parser.add_argument('--plot_title', '-pltt', type=str, default=None,
help='plot_title')
parser.add_argument('--plot_errors', '-pler', type=list, default=None,
help='plot_errors')
parser.add_argument('--plot_animation', '-pla', type=bool, default=None,
help='plot_animation')
parser.add_argument('--calc_cc_errors', '-cce', type=bool, default=None,
help='calc_cc_errors')
parser.add_argument('--op_errors', '-opers', type=list, default=None,
help='op_errors')
parser.add_argument('--rank_boundary_list', '-rbl', type=list, default=None,
help='rank_boundary_list')
parser.add_argument('--cloud_root', '-clr', type=str, default=None,
help='String, cloud_root')
parser.add_argument('--prioritize_cloud', '-prcl', type=bool, default=False,
help='Boolean, prioritize_cloud')
# frequencies for the tasks duaring iterations
parser.add_argument('--train_report_frequency', '-trrf', type=int, default=100,
help='train report frequency(default:100)')
parser.add_argument('--test_report_frequency', '-tsrf', type=int, default=100,
help='test report frequency(default:100)')
parser.add_argument('--save_model_frequency', '-smf', type=int, default=100,
help='save model frequency(default:100)')
parser.add_argument('--export_to_onnx', '-eto', type=bool, default=None,
help = 'Boolean, whether to refresh train data stored with the key name or not (Default: false).')
parser.add_argument('--summarize_layer_frequency', '-slf', type=int, default=1000,
help='Integer, summarize layerl frequency(default:1000)')
parser.add_argument('--summarize_layer_name_list', '-slnl', type=int, default=None,
help='List of String, summarize_layer_name_list(Default: None)')
parser.add_argument('--use_cache', '-ucch', type=bool, default=False,
help='Boolean, use_cache')
parser.add_argument('--cache_db_host', '-cchdbh', type=str, default='localhost',
help='String, cache_db_host')
parser.add_argument('--cache_data_set_id', '-cdsid', type=str, default=None,
help='String, Data set id. If None, then set with train_id (Default:None)')
parser.add_argument('--refresh_cache_data_set', '-rfds', type=bool, default=False,
help='Boolean, default: false. Whether to refresh train data stored with the key name or not.')
parser.add_argument('--json_param', '-jpr', type=str, default=None,
help='JSON String to set parameters')
parser.add_argument('--setting_file_path', '-sfp', type=str, default=None,
help='String, The setting file path of JSON String to set parameters')
parser.add_argument('--scrpit_test', '-sct', type=bool, default=False,
help='Boolean, scrpit_test')
args = parser.parse_args()
print('args:{}'.format(args))
exec_param = vars(args)
print('init exec_param:{}'.format(args))
main(exec_param)
| StarcoderdataPython |
194265 | <filename>matrix/common/aws/sqs_handler.py
import json
import boto3
from matrix.common.exceptions import MatrixException
class SQSHandler:
"""
Interface for interacting with SQS.
"""
def __init__(self):
self.sqs = boto3.resource('sqs')
def add_message_to_queue(self, queue_url: str, payload: dict):
response = self.sqs.meta.client.send_message(QueueUrl=queue_url,
MessageBody=json.dumps(payload))
status = response['ResponseMetadata']['HTTPStatusCode']
if status != 200:
raise MatrixException(status=500, title="Internal error",
detail=f"Adding message for {payload} "
f"was unsuccessful to SQS {queue_url} with status {status})")
def receive_messages_from_queue(self, queue_url: str, wait_time=15, num_messages=1):
response = self.sqs.meta.client.receive_message(QueueUrl=queue_url,
MaxNumberOfMessages=num_messages,
WaitTimeSeconds=wait_time)
status = response['ResponseMetadata']['HTTPStatusCode']
if status != 200:
raise MatrixException(status=500, title="Internal error",
detail=f"Retrieving message from {queue_url} "
f"was unsuccessful with status {status})")
return response.get('Messages')
def delete_message_from_queue(self, queue_url: str, receipt_handle: str):
response = self.sqs.meta.client.delete_message(QueueUrl=queue_url,
ReceiptHandle=receipt_handle)
status = response['ResponseMetadata']['HTTPStatusCode']
if status != 200:
raise MatrixException(status=500, title="Internal error",
detail=f"Deleting message with receipt handle {receipt_handle} from {queue_url} "
f"was unsuccessful with status {status})")
| StarcoderdataPython |
3224385 | from Queue import Queue
from domain import DomainUtils
from domain.ErrorTypes import ErrorTypes
from pipeline_generator.preprocessing.task import SpecialCaseHandler
# No need to keep data/state, so I did not make it a class..
# This will be safe for multi-thread use as well~
# Improve this...
def determine_generation_order(dependents_info, requireds_info, waiting_queue, special_edges):
error_code = ErrorTypes.NO_ERROR
if(special_edges is not None):
# Pass waiting queue in case any special cases needs to update it...
SpecialCaseHandler.update_dependents_and_requireds_for_special_cases(dependents_info, requireds_info, special_edges)
generation_order=[]
added_nodes=set()
# At this point, waiting queue has data-source and ModelLoad nodes.
while(not waiting_queue.empty()):
cur_node=waiting_queue.get()
if(cur_node not in added_nodes):
if((cur_node not in requireds_info) or (not bool(requireds_info[cur_node]))):
generation_order.append(cur_node)
added_nodes.add(cur_node)
__safe_delete(requireds_info, cur_node)
if(cur_node in dependents_info):
for dependent in dependents_info[cur_node]:
requireds_info[dependent].remove(cur_node)
waiting_queue.put(dependent)
__safe_delete(dependents_info, cur_node)
if(bool(requireds_info)):
# There must be a cycle if required_info still has elements at this moment
error_code = ErrorTypes.CYCLE_IN_GRAPH_ERROR
if(not bool(generation_order)):
error_code=ErrorTypes.EMPTY_GRAPH_ERROR
return generation_order, error_code
def preprocess_graph(graph):
dependents_info = {}
requireds_info = {}
waiting_queue = Queue()
for edge_id in graph["edges"]:
# Assuming directed edges such that first node is the source and the second node is the target.
node_ids = edge_id.split("-")
source_node_family = graph["nodes"][node_ids[0]]["family"]
__add_dependents_info(node_ids[0], node_ids[1], dependents_info)
__add_requireds_info(node_ids[1], node_ids[0], requireds_info)
# Nodes without incoming edges (requireds) will be processed first...
if(not DomainUtils.requires_incoming_edge(source_node_family)):
waiting_queue.put(node_ids[0])
return dependents_info, requireds_info, waiting_queue
def __add_dependents_info(current_node_id, dependent_node_id, dependents_info):
if (current_node_id not in dependents_info):
dependents_info[current_node_id] = set()
dependents_info[current_node_id].add(dependent_node_id)
def __add_requireds_info(current_node_id, required_node_id, requireds_info):
if (current_node_id not in requireds_info):
requireds_info[current_node_id] = set()
requireds_info[current_node_id].add(required_node_id)
def __safe_delete(dict, val):
if(val in dict):
del dict[val] | StarcoderdataPython |
1706051 | #!/usr/bin/python
import os
import collections
AAs = set(["alanine", "arginine", "asparagine", "aspartate", "cysteine", "glutamine", "glycine", "leucine", "lysine", \
"methionine", "phenylalanine", "isoleucine", "histidine", "serine", "threonine", "tyrosine", "valine", "tryptophan", \
"glutamate", "proline"])
RECEPTOR_TO_LIGANDS = collections.defaultdict(list)
#This routine is used to write down first non-amino acids and then amino acids
def recordLigands(ligandsAndAffins, outputFile, indx):
aaLigands = []
for ligandAndAffin in ligandsAndAffins:
#First write down non-amino acids
if ligandAndAffin[0] not in AAs:
outputFile.write("\t" + ligandAndAffin[indx])
#Collecting amino acids here for the later recording
else:
aaLigands.append(ligandAndAffin)
for ligAndAff in aaLigands:
outputFile.write("\t" + ligAndAff[indx])
tempSet = set()
for sysObject in os.listdir("."):
if os.path.isdir(sysObject):
os.chdir(sysObject)
for sysObject2 in os.listdir("."):
if os.path.isdir(sysObject2):
receptorName = sysObject2
os.chdir(sysObject2)
for sysObject3 in os.listdir("."):
if os.path.isfile(sysObject3) and len(sysObject3.split("_")) > 1 and sysObject3.split("_")[1] == "log.txt":
print os.getcwd()
ligandName = sysObject3.split("_")[0]
with open (sysObject3) as inputFile:
bindingAffinityLine = inputFile.readlines()[26]
bindingAffinity = bindingAffinityLine.split(" ")[12]
tempSet.add(bindingAffinity)
RECEPTOR_TO_LIGANDS[receptorName].append((ligandName, bindingAffinity))
os.chdir("..")
os.chdir("..")
print tempSet
with open ("AutodockFolders_Summary2", "w") as outputFile:
#Write down headers (ligand names)
for receptor, ligandsAndAffins in RECEPTOR_TO_LIGANDS.items():
outputFile.write("*****************")
recordLigands(ligandsAndAffins, outputFile, 0)
break
#Write down receptors and ligand affinities
receptors = sorted(RECEPTOR_TO_LIGANDS.keys())
for receptor in receptors:
ligandsAndAffins = RECEPTOR_TO_LIGANDS[receptor]
#for receptor, ligandsAndAffins in RECEPTOR_TO_LIGANDS.items():
outputFile.write("\n" + receptor)
recordLigands(ligandsAndAffins, outputFile, 1)
| StarcoderdataPython |
3230721 | <filename>optimization/particle_swarm_plotter.py<gh_stars>1-10
#!/usr/bin/env python
"""Plotter displays positions of the particles during each iteration
as well as the evolution of the best value.
author: jussiks
"""
import numpy
import matplotlib.pyplot as plt
class Plotter:
"""Plotting object for diplaying charts during swarm optimization
algorithm.
"""
def __init__(self, swarm_plot_domain=None, value_plot_domain=None,
iterations_to_plot=100, sleep=0.2, func_str=None):
"""Plotter initialization.
Initializes a scatter plot to display particle positions and a line
chart to display best value.
"""
swarm_fig = plt.figure(1)
value_fig = plt.figure(2)
self.swarm_plot = swarm_fig.add_subplot(111)
self.value_plot = value_fig.add_subplot(111)
self.value_plot.set_xlabel("iterations")
self.value_plot.set_ylabel("f(gbest)")
self.value_plot_domain = value_plot_domain
self.swarm_plot_domain = swarm_plot_domain
self.iterations_to_plot = iterations_to_plot
self.func_str = func_str
self.sleep = sleep
if self.value_plot_domain:
self.value_plot.axis(self.value_plot_domain)
else:
self.value_plot_max_y = - float("inf")
self.value_plot_min_y = float("inf")
def set_up_value_plot(self, label):
"""Sets the label of the value plot."""
self.line, = self.value_plot.plot([], "-", label=label)
handles, labels = self.value_plot.get_legend_handles_labels()
self.value_plot.legend(handles=handles.append(self.line), loc=1)
def update_plots(self, swarm, gbest, iteration):
"""Redraws swarm_plot and value_plot."""
if iteration > self.iterations_to_plot:
return
self.swarm_plot.cla()
if self.swarm_plot_domain:
self.swarm_plot.axis(self.swarm_plot_domain)
else:
self.swarm_plot.axis(
[i for sublist in swarm[0].domain for i in sublist]
)
colormap = plt.get_cmap("gist_rainbow")
colors = len(swarm)
for i in range(len(swarm)):
color = colormap(float(i) / colors)
self.swarm_plot.plot(
swarm[i].variables[0], swarm[i].variables[1],
marker="o", markersize=10, color=color)
self.swarm_plot.quiver(
swarm[i].variables[0] - swarm[i].velocity[0],
swarm[i].variables[1] - swarm[i].velocity[1],
swarm[i].velocity[0],
swarm[i].velocity[1],
angles="xy", scale_units="xy", scale=1, width=0.002,
color=color, headlength=0, headwidth=1)
self.swarm_plot.plot(
gbest.variables[0],
gbest.variables[1],
marker="*", markersize=20, color="black",
label="Global best")
self.swarm_plot.legend(loc=1)
self.swarm_plot.set_xlabel("x[0]")
self.swarm_plot.set_ylabel("x[1]")
if not self.value_plot_domain:
# Adjust the y limits of the value plot
self.value_plot_max_y = max(
self.value_plot_max_y, gbest.best_value + 0.05)
self.value_plot_min_y = min(
self.value_plot_min_y, gbest.best_value - 0.05)
self.value_plot.axis(
[0, self.iterations_to_plot,
self.value_plot_min_y, self.value_plot_max_y]
)
x = numpy.append(self.line.get_xdata(), iteration)
y = numpy.append(self.line.get_ydata(), gbest.best_value)
self.line.set_xdata(x)
self.line.set_ydata(y)
plt.title("Swarm during iteration {0}\n{1}\nGbest {2}\n value {3}".format(
iteration, self.func_str, gbest.best_variables, gbest.best_value))
plt.pause(self.sleep)
def show(self):
plt.show()
| StarcoderdataPython |
1747745 | <gh_stars>10-100
from amitools.vamos.error import *
from amitools.vamos.log import log_mem_alloc
from amitools.vamos.label import LabelRange, LabelStruct
from amitools.vamos.astructs import AccessStruct
class Memory:
def __init__(self, addr, size, label, access):
self.addr = addr
self.size = size
self.label = label
self.access = access
def __str__(self):
if self.label != None:
return str(self.label)
else:
return "[@%06x +%06x %06x]" % (self.addr, self.size, self.addr + self.size)
class MemoryChunk:
def __init__(self, addr, size):
self.addr = addr
self.size = size
self.next = None
self.prev = None
def __str__(self):
end = self.addr + self.size
return "[@%06x +%06x %06x]" % (self.addr, self.size, end)
def does_fit(self, size):
"""check if new size would fit into chunk
return < 0 if it does not fit, 0 for exact fit, > 0 n wasted bytes
"""
return self.size - size
class MemoryAlloc:
def __init__(self, mem, addr=0, size=0, label_mgr=None):
"""mem is a interface.
setup allocator starting at addr with size bytes.
if label_mgr is set then labels are created for allocations.
"""
# if no size is specified then take mem total
if size == 0:
size = mem.get_ram_size_kib() * 1024
size -= addr
# if addr == 0 skip 0 to never return NULL pointer!
if addr == 0:
addr = 4
size -= 4
self.mem = mem
self.addr = addr
self.size = size
self.label_mgr = label_mgr
# compat link
self.access = mem
self.addrs = {}
self.mem_objs = {}
# init free list
self.free_bytes = size
self.free_first = MemoryChunk(addr, self.free_bytes)
self.free_entries = 1
@classmethod
def for_machine(cls, machine):
return cls(
machine.get_mem(),
addr=machine.get_ram_begin(),
label_mgr=machine.get_label_mgr(),
)
def get_mem(self):
return self.mem
def get_addr(self):
return self.addr
def get_size(self):
return self.size
def get_label_mgr(self):
return self.label_mgr
def get_free_bytes(self):
return self.free_bytes
def is_all_free(self):
return self.size == self.free_bytes
def _find_best_chunk(self, size):
"""find best chunk that could take the given alloc
return: index of chunk in free list or -1 if none found + bytes left in chunk
"""
chunk = self.free_first
while chunk != None:
left = chunk.does_fit(size)
# exact match
if left == 0:
return (chunk, 0)
# potential candidate: has some bytes left
elif left > 0:
# Don't make such a hassle. Return the first one that fits.
# This function takes too much time.
return (chunk, left)
chunk = chunk.next
# nothing found?
return (None, -1)
def _remove_chunk(self, chunk):
next = chunk.next
prev = chunk.prev
if chunk == self.free_first:
self.free_first = next
if next != None:
next.prev = prev
if prev != None:
prev.next = next
self.free_entries -= 1
def _replace_chunk(self, old_chunk, new_chunk):
next = old_chunk.next
prev = old_chunk.prev
if old_chunk == self.free_first:
self.free_first = new_chunk
if next != None:
next.prev = new_chunk
if prev != None:
prev.next = new_chunk
new_chunk.next = next
new_chunk.prev = prev
def _insert_chunk(self, chunk):
cur = self.free_first
last = None
addr = chunk.addr
while cur != None:
# fits right before
if addr < cur.addr:
break
last = cur
cur = cur.next
# inster after last but before cur
if last == None:
self.free_first = chunk
else:
last.next = chunk
chunk.prev = last
if cur != None:
chunk.next = cur
cur.prev = chunk
self.free_entries += 1
def _merge_chunk(self, a, b):
# can we merge?
if a.addr + a.size == b.addr:
chunk = MemoryChunk(a.addr, a.size + b.size)
prev = a.prev
if prev != None:
prev.next = chunk
chunk.prev = prev
next = b.next
if next != None:
next.prev = chunk
chunk.next = next
if self.free_first == a:
self.free_first = chunk
self.free_entries -= 1
return chunk
else:
return None
def _stat_info(self):
num_allocs = len(self.addrs)
return "(free %06x #%d) (allocs #%d)" % (
self.free_bytes,
self.free_entries,
num_allocs,
)
def alloc_mem(self, size, except_on_fail=True):
"""allocate memory and return addr or 0 if no more memory"""
# align size to 4 bytes
size = (size + 3) & ~3
# find best free chunk
chunk, left = self._find_best_chunk(size)
# out of memory?
if chunk == None:
if except_on_fail:
self.dump_orphans()
log_mem_alloc.error("[alloc: NO MEMORY for %06x bytes]" % size)
raise VamosInternalError("[alloc: NO MEMORY for %06x bytes]" % size)
return 0
# remove chunk from free list
# is something left?
addr = chunk.addr
if left == 0:
self._remove_chunk(chunk)
else:
left_chunk = MemoryChunk(addr + size, left)
self._replace_chunk(chunk, left_chunk)
# add to valid allocs map
self.addrs[addr] = size
self.free_bytes -= size
# erase memory
self.mem.clear_block(addr, size, 0)
log_mem_alloc.info(
"[alloc @%06x-%06x: %06x bytes] %s",
addr,
addr + size,
size,
self._stat_info(),
)
if addr % 4:
raise VamosInternalError(
"Memory pool is invalid, return address not aligned by a long word"
)
return addr
def free_mem(self, addr, size):
# first check if its a right alloc
if addr not in self.addrs:
raise VamosInternalError("Invalid Free'd Memory at %06x" % addr)
# align size to 4 bytes
size = (size + 3) & ~3
real_size = self.addrs[addr]
assert size == real_size
# remove from valid allocs
del self.addrs[addr]
# create a new free chunk
chunk = MemoryChunk(addr, real_size)
self._insert_chunk(chunk)
# try to merge with prev/next
prev = chunk.prev
if prev != None:
new_chunk = self._merge_chunk(prev, chunk)
if new_chunk != None:
log_mem_alloc.debug(
"merged: %s + this=%s -> %s", prev, chunk, new_chunk
)
chunk = new_chunk
next = chunk.next
if next != None:
new_chunk = self._merge_chunk(chunk, next)
if new_chunk != None:
log_mem_alloc.debug(
"merged: this=%s + %s -> %s", chunk, next, new_chunk
)
# correct free bytes
self.free_bytes += size
num_allocs = len(self.addrs)
log_mem_alloc.info(
"[free @%06x-%06x: %06x bytes] %s",
addr,
addr + size,
size,
self._stat_info(),
)
def get_range_by_addr(self, addr):
if addr in self.addrs:
return self.addrs[addr]
else:
return None
def dump_mem_state(self):
chunk = self.free_first
num = 0
while chunk != None:
log_mem_alloc.debug("dump #%02d: %s" % (num, chunk))
num += 1
chunk = chunk.next
def _dump_orphan(self, addr, size):
log_mem_alloc.warning("orphan: [@%06x +%06x %06x]" % (addr, size, addr + size))
if self.label_mgr is not None:
labels = self.label_mgr.get_intersecting_labels(addr, size)
for l in labels:
log_mem_alloc.warning("-> %s", l)
def dump_orphans(self):
last = self.free_first
# orphan at begin?
if last.addr != self.addr:
addr = self.addr
size = last.addr - addr
self._dump_orphan(addr, size)
# walk along free list
cur = last.next
while cur != None:
addr = last.addr + last.size
size = cur.addr - addr
self._dump_orphan(addr, size)
last = cur
cur = cur.next
# orphan at end?
addr = last.addr + last.size
end = self.addr + self.size
if addr != end:
self._dump_orphan(addr, end - addr)
# ----- convenience functions with label creation -----
def get_memory(self, addr):
if addr in self.mem_objs:
return self.mem_objs[addr]
else:
return None
# memory
def alloc_memory(self, name, size, add_label=True, except_on_failure=True):
addr = self.alloc_mem(size, except_on_failure)
if addr == 0:
return None
if add_label and self.label_mgr is not None:
label = LabelRange(name, addr, size)
self.label_mgr.add_label(label)
else:
label = None
mem = Memory(addr, size, label, self.mem)
log_mem_alloc.info("alloc memory: %s", mem)
self.mem_objs[addr] = mem
return mem
def free_memory(self, mem):
log_mem_alloc.info("free memory: %s", mem)
if mem.label != None:
self.label_mgr.remove_label(mem.label)
self.free_mem(mem.addr, mem.size)
del self.mem_objs[mem.addr]
# struct
def alloc_struct(self, name, struct, size=None, add_label=True):
if size is None:
size = struct.get_size()
addr = self.alloc_mem(size)
if self.label_mgr is not None and add_label:
label = LabelStruct(name, addr, struct)
self.label_mgr.add_label(label)
else:
label = None
access = AccessStruct(self.mem, struct, addr)
mem = Memory(addr, size, label, access)
log_mem_alloc.info("alloc struct: %s", mem)
self.mem_objs[addr] = mem
return mem
def map_struct(self, name, addr, struct):
size = struct.get_size()
access = AccessStruct(self.mem, struct, addr)
if self.label_mgr is not None:
label = self.label_mgr.get_label(addr)
else:
label = None
mem = Memory(addr, size, label, access)
log_mem_alloc.info("map struct: %s", mem)
return mem
def free_struct(self, mem):
log_mem_alloc.info("free struct: %s", mem)
if self.label_mgr is not None:
self.label_mgr.remove_label(mem.label)
self.free_mem(mem.addr, mem.size)
del self.mem_objs[mem.addr]
# cstr
def alloc_cstr(self, name, cstr):
size = len(cstr) + 1
addr = self.alloc_mem(size)
if self.label_mgr is not None:
label = LabelRange(name, addr, size)
self.label_mgr.add_label(label)
else:
label = None
self.mem.w_cstr(addr, cstr)
mem = Memory(addr, size, label, self.mem)
log_mem_alloc.info("alloc c_str: %s", mem)
self.mem_objs[addr] = mem
return mem
def free_cstr(self, mem):
log_mem_alloc.info("free c_str: %s", mem)
if self.label_mgr is not None:
self.label_mgr.remove_label(mem.label)
self.free_mem(mem.addr, mem.size)
del self.mem_objs[mem.addr]
# bstr
def alloc_bstr(self, name, bstr):
size = len(bstr) + 2 # front: count, end: extra zero for safety
addr = self.alloc_mem(size)
if self.label_mgr is not None:
label = LabelRange(name, addr, size)
self.label_mgr.add_label(label)
else:
label = None
self.mem.w_bstr(addr, bstr)
mem = Memory(addr, size, label, self.mem)
log_mem_alloc.info("alloc b_str: %s", mem)
self.mem_objs[addr] = mem
return mem
def free_bstr(self, mem):
log_mem_alloc.info("free b_str: %s", mem)
if self.label_mgr is not None:
self.label_mgr.remove_label(mem.label)
self.free_mem(mem.addr, mem.size)
del self.mem_objs[mem.addr]
def is_valid_address(self, addr):
if addr >= self.addr and addr < self.addr + self.size:
return True
return False
def total(self):
return self.size
def available(self):
free = 0
chunk = self.free_first
while chunk != None:
free += chunk.size
chunk = chunk.next
return free
def largest_chunk(self):
largest = 0
chunk = self.free_first
while chunk != None:
if chunk.size > largest:
largest = chunk.size
chunk = chunk.next
return largest
| StarcoderdataPython |
126749 | <gh_stars>100-1000
"""
CMT Unit Test framework.
"""
from cmt.test.mayaunittest import TestCase
__all__ = ["TestCase", "run_tests"]
| StarcoderdataPython |
20741 | <reponame>linuxluigi/success-backup-check
import pytest
import success_backup_check
def test_project_defines_author_and_version():
assert hasattr(success_backup_check, '__author__')
assert hasattr(success_backup_check, '__version__')
| StarcoderdataPython |
70091 | # %% [1221. Split a String in Balanced Strings](https://leetcode.com/problems/split-a-string-in-balanced-strings/)
class Solution:
def balancedStringSplit(self, s: str) -> int:
res = cnt = 0
for c in s:
cnt += (c == "L") * 2 - 1
res += not cnt
return res
| StarcoderdataPython |
3352956 | <gh_stars>1-10
from Crypto.Protocol.KDF import PBKDF2
from Crypto.Cipher import AES
from Crypto.Random import random
g_salt1 = b"12345678"
g_salt2 = bytes("12345678", "utf8")
def p_example1_hard_coded1(password, data):
key = PBKDF2(password, b"<PASSWORD>", 16, count=1000)
cipher = AES.new(key, AES.MODE_ECB)
cipher_text = cipher.encrypt(data)
return cipher_text
def p_example2_hard_coded2(password, data):
key = PBKDF2(password, bytes("12345678", "utf8"), 16, count=1000)
cipher = AES.new(key, AES.MODE_ECB)
cipher_text = cipher.encrypt(data)
return cipher_text
def p_example3_local_variable1(password, data):
salt = b"12345678"
key = PBKDF2(password, salt, 16, count=1000)
cipher = AES.new(key, AES.MODE_ECB)
cipher_text = cipher.encrypt(data)
return cipher_text
def p_example4_local_variable2(password, data):
salt = bytes("12345678", "utf8")
key = PBKDF2(password, salt, 16, count=1000)
cipher = AES.new(key, AES.MODE_ECB)
cipher_text = cipher.encrypt(data)
return cipher_text
def p_example5_nested_local_variable1(password, data):
salt1 = b"12345678"
salt2 = salt1
salt3 = salt2
key = PBKDF2(password, salt3, 16, count=1000)
cipher = AES.new(key, AES.MODE_ECB)
cipher_text = cipher.encrypt(data)
return cipher_text
def p_example6_nested_local_variable2(password, data):
salt1 = bytes("12345678", "utf8")
salt2 = salt1
salt3 = salt2
key = PBKDF2(password, salt3, 16, count=1000)
cipher = AES.new(key, AES.MODE_ECB)
cipher_text = cipher.encrypt(data)
return cipher_text
def p_example_method_call(password, salt, data):
key = PBKDF2(password, salt, 16, count=1000)
cipher = AES.new(key, AES.MODE_ECB)
cipher_text = cipher.encrypt(data)
return cipher_text
def p_example_nested_method_call(password, salt, data):
return p_example_method_call(password, salt, data)
def p_example7_direct_method_call1(password, data):
salt = b"12345678"
return p_example_method_call(password, salt, data)
def p_example8_direct_method_call2(password, data):
salt = bytes("12345678", "utf8")
return p_example_method_call(password, salt, data)
def p_example9_nested_method_call1(password, data):
salt = b"<PASSWORD>"
return p_example_nested_method_call(password, salt, data)
def p_example10_nested_method_call2(password, data):
salt = bytes("12345678", "utf8")
return p_example_nested_method_call(password, salt, data)
def p_example11_direct_g_variable_access1(password, data):
key = PBKDF2(password, g_salt1, 16, count=1000)
cipher = AES.new(key, AES.MODE_ECB)
cipher_text = cipher.encrypt(data)
return cipher_text
def p_example12_direct_g_variable_access2(password, data):
key = PBKDF2(password, g_salt2, 16, count=1000)
cipher = AES.new(key, AES.MODE_ECB)
cipher_text = cipher.encrypt(data)
return cipher_text
def p_example13_indirect_g_variable_access1(password, data):
salt = g_salt1
key = PBKDF2(password, salt, 16, count=1000)
cipher = AES.new(key, AES.MODE_ECB)
cipher_text = cipher.encrypt(data)
return cipher_text
def p_example14_indirect_g_variable_access2(password, data):
salt = g_salt2
key = PBKDF2(password, salt, 16, count=1000)
cipher = AES.new(key, AES.MODE_ECB)
cipher_text = cipher.encrypt(data)
return cipher_text
def p_example15_warning_parameter_not_resolvable(password, salt, data):
key = PBKDF2(password, salt, 16, count=1000)
cipher = AES.new(key, AES.MODE_ECB)
cipher_text = cipher.encrypt(data)
return cipher_text
def n_example1_random_salt(password, data):
salt = random.getrandbits(16).to_bytes(16, 'big')
key = PBKDF2(password, salt, 16, count=1000)
cipher = AES.new(key, AES.MODE_ECB)
cipher_text = cipher.encrypt(data)
return cipher_text
| StarcoderdataPython |
39924 | #!/usr/bin/env python
# Copyright (c) 02004, The Long Now Foundation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, division, with_statement
import sys
import string
import math
import csv
import gd
__author__ = "<NAME> and <NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = "1.1"
__copyright__ = "Copyright (c) 02004 The Long Now Foundation"
__license__ = "BSD-style"
__status__ = "Beta"
class SlicedImage:
def __init__(self, width, height):
self.__image = gd.image((width, height))
self.__xOffset = 0
self.__height = height
self.diamondWidth = 7
"""Width of any diamonds rendered."""
self.diamondLeftMargin = 2
"""Width of horizontal margin on the left side a diamond"""
self.diamondVerticalMargin = 0
"""Height of vertical margin above and below a diamond"""
self.diamondColor = (0xff, 0xff, 0xff)
"""Color used to render diamonds."""
return
def addSlice(self, fields):
"""Add a slice to this image"""
# Returns a tuple of adjusted r, g, b integers given tuple of integers.
def computeRgbValues(rgb, saturation, brightness):
return map((lambda x:
adjustColorComponent(x, saturation, brightness)), rgb)
# Returns an integer value for the given component, adjusted for
# brightness and saturation.
def adjustColorComponent(component, saturation, brightness):
answer = component * saturation + brightness * 255.0 * \
(1 - saturation)
return int(round(answer))
width = fields["width"]
height = fields["height"]
saturation = fields["saturation"]
brightness = fields["brightness"]
upperHeight = int(round(height - (height * fields["lowerSize"])))
tl = (self.__xOffset, upperHeight + 1)
br = (self.__xOffset + width, height)
lowerColor = self.__image.colorAllocate(
computeRgbValues(fields["lowerColor"], saturation, brightness))
self.__image.filledRectangle(tl, br, lowerColor)
tl = (self.__xOffset, upperHeight)
br = (self.__xOffset + width, upperHeight + 1)
dividerColor = self.__image.colorAllocate(
computeRgbValues(fields["dividerColor"], saturation, brightness))
self.__image.filledRectangle(tl, br, dividerColor)
tl = (self.__xOffset, 0)
br = (self.__xOffset + width, upperHeight)
upperColor = self.__image.colorAllocate(
computeRgbValues(fields["upperColor"], saturation, brightness))
self.__image.filledRectangle(tl, br, upperColor)
self.__xOffset += width
return
def drawDiamond(self, startX):
"""Draw a diamond whose leftmost point is at startX."""
# allocate diamond color, if we haven't already
if not hasattr(self, '__diamondColorIndex'):
self.__diamondColorIndex = self.__image.colorAllocate(
self.diamondColor)
# calculate the points
bottomPoint = (startX + int(math.ceil(self.diamondWidth/2)),
self.__height-1 - self.diamondVerticalMargin)
rightPoint = (startX + self.diamondWidth-1, int((self.__height-1)/2))
topPoint = (startX + int(math.ceil(self.diamondWidth/2)),
self.diamondVerticalMargin)
leftPoint = (startX + self.diamondLeftMargin, int(self.__height/2))
# draw the diamond
diamond = (bottomPoint, rightPoint, topPoint, leftPoint)
self.__image.filledPolygon(diamond, self.__diamondColorIndex)
return
def drawFilledSlice(self, startX, width, color):
"""Draw a filled slice
startX -- X coordinate where slice should start
width -- width in pixels
color -- color tuple
"""
colorIndex = self.__image.colorAllocate(color)
self.__image.filledRectangle( (startX, 0),
(startX + width - 1, self.__height),
colorIndex )
return
def generate(self, filename):
"""Write out a PNG file for this image"""
# write out the file
file = open(filename, "w")
self.__image.writePng(file)
file.close()
return
# Returns a tuple of red, green, and blue integer values, given a hex string
def parseHexColor(hex):
justHex = hex.split('#')[-1]
return map((lambda x: string.atoi(x, 16)), (justHex[0:2], justHex[2:4], justHex[4:6]))
def calculateTotalSize(inputFileName):
width = 0
height = 0
reader = csv.reader(file(inputFileName))
for row in reader:
fields = mapInputFields(row)
width += fields["width"]
if fields["height"] > height:
height = fields["height"]
return int(width), int(height)
# Given a list of input values, return a dictionary of same. This allows us to
# deal with column positions in one place only.
def mapInputFields(row):
fields = ["height", "width", "lowerSize", "lowerColor", "dividerColor", "upperColor", "saturation", "brightness"]
numericFieldNos = [0, 1, 2, 6, 7]
percentFieldNos = [2, 6, 7]
hexFieldNos = [3, 4, 5]
i = 0
answer = {}
for field in fields:
value = row[i]
if i in numericFieldNos:
value = string.atof(value)
if i in percentFieldNos:
value = value / 100.0
else:
if i in hexFieldNos:
value = parseHexColor(value)
answer[field] = value
i = i + 1
return answer
def makeSlices(width, height, inputFileName, outputFileName):
slices = SlicedImage(width, height);
reader = csv.reader(file(inputFileName))
for row in reader:
fields = mapInputFields(row)
slices.addSlice(fields)
slices.generate(outputFileName)
return
# We write an image per csv file.
def generateImage(inFile, outFile):
print "Reading from %s and writing to %s" % (inFile, outFile)
width, height = calculateTotalSize(inFile)
slices = makeSlices(width, height, inFile, outFile)
if __name__ == "__main__":
if len(sys.argv) == 1:
print "Usage: %s csvfile [pngfile]" % sys.argv[0]
print " If pngfile is not specified, its name is derived from csvfile"
else:
inFile = sys.argv[1]
if len(sys.argv) == 2:
outFile = inFile.split(".")[0] + ".png"
else:
outFile = sys.argv[2]
generateImage(inFile, outFile)
| StarcoderdataPython |
3340020 | #!/usr/bin/env python
from setuptools import setup
import versioneer
setup(name='gglsbl',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Client library for Google Safe Browsing Update API v4",
classifiers=[
"Operating System :: POSIX",
"Environment :: Console",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Internet",
"Topic :: Security",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='google safe browsing api client',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/afilipovich/gglsbl',
license='Apache2',
packages=['gglsbl'],
install_requires=['google-api-python-client>=1.4.2,<2'],
scripts=['bin/gglsbl_client.py'],
)
| StarcoderdataPython |
76026 | <filename>trx/__init__.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import print_function,division,absolute_import
from . import azav
from . import utils
from . import mask
from . import cell
from . import filters
from . import id9
from . import dataReduction
from . import center
from datastorage import DataStorage, read, save
try:
from . import peaks
except ImportError as err:
print("Can't import submodule peaks, reason was:",err)
__version__ = "0.6.6"
| StarcoderdataPython |
3200349 | <filename>torch_geometric/__init__.py
__version__ = '1.2.2'
__all__ = ['__version__']
| StarcoderdataPython |
3236700 | <filename>test/unit/mysql_log_admin/run_program.py
#!/usr/bin/python
# Classification (U)
"""Program: run_program.py
Description: Unit testing of run_program in mysql_log_admin.py.
Usage:
test/unit/mysql_log_admin/run_program.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
import mock
# Local
sys.path.append(os.getcwd())
import mysql_log_admin
import lib.gen_libs as gen_libs
import version
__version__ = version.__version__
def fetch_log_pos(server, args_array, opt_arg_list):
"""Method: fetch_log_pos
Description: Stub holder for mysql_log_admin.fetch_log_pos function.
Arguments:
"""
status = True
if server and args_array and opt_arg_list:
status = True
return status
class Server(object):
"""Class: Server
Description: Class stub holder for mysql_class.Server class.
Methods:
__init__
connect
set_srv_binlog_crc
"""
def __init__(self):
"""Method: __init__
Description: Class initialization.
Arguments:
"""
self.extra_def_file = None
self.sql_user = "mysql"
self.host = "hostname"
self.port = 3306
self.name = "Server_Name"
self.conn = "Connection Handler"
self.conn_msg = None
def connect(self, silent):
"""Method: connect
Description: Method stub holder for mysql_class.Server.connect.
Arguments:
(input) silent
"""
status = True
if silent:
status = True
return status
def set_srv_binlog_crc(self):
"""Method: set_srv_binlog_crc
Description: Stub holder for mysql_class.Server.set_srv_binlog_crc.
Arguments:
"""
return True
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_connect_failure
test_connect_successful
test_run_program
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.opt_arg_list = ["--force-read", "--read-from-remote-server"]
self.args_array = {"-c": True, "-d": True, "-L": True}
self.func_dict = {"-L": fetch_log_pos}
self.server = Server()
@mock.patch("mysql_log_admin.mysql_libs.disconnect",
mock.Mock(return_value=True))
@mock.patch("mysql_log_admin.mysql_libs.create_instance")
def test_connect_failure(self, mock_server):
"""Function: test_connect_failure
Description: Test with failed connection.
Arguments:
"""
self.server.conn_msg = "Error connection message"
mock_server.return_value = self.server
with gen_libs.no_std_out():
self.assertFalse(mysql_log_admin.run_program(
self.args_array, self.func_dict, self.opt_arg_list))
@mock.patch("mysql_log_admin.mysql_libs.disconnect",
mock.Mock(return_value=True))
@mock.patch("mysql_log_admin.mysql_libs.create_instance")
def test_connect_successful(self, mock_server):
"""Function: test_connect_successful
Description: Test with successful connection.
Arguments:
"""
mock_server.return_value = self.server
self.assertFalse(mysql_log_admin.run_program(
self.args_array, self.func_dict, self.opt_arg_list))
@mock.patch("mysql_log_admin.mysql_libs.disconnect",
mock.Mock(return_value=True))
@mock.patch("mysql_log_admin.mysql_libs.create_instance")
def test_run_program(self, mock_server):
"""Function: test_run_program
Description: Test with only default arguments passed.
Arguments:
"""
mock_server.return_value = self.server
self.assertFalse(mysql_log_admin.run_program(
self.args_array, self.func_dict, self.opt_arg_list))
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
1741181 | #!/usr/bin/env python3
r"""
This module provides functions which are useful for running plug-ins.
"""
import sys
import os
import glob
import gen_print as gp
import gen_misc as gm
# Some help text that is common to more than one program.
plug_in_dir_paths_help_text = \
'This is a colon-separated list of plug-in directory paths. If one' +\
' of the entries in the list is a plain directory name (i.e. no' +\
' path info), it will be taken to be a native plug-in. In that case,' +\
' %(prog)s will search for the native plug-in in the "plug-ins"' +\
' subdirectory of each path in the PATH environment variable until it' +\
' is found. Also, integrated plug-ins will automatically be appended' +\
' to your plug_in_dir_paths list. An integrated plug-in is any plug-in' +\
' found using the PATH variable that contains a file named "integrated".'
mch_class_help_text = \
'The class of machine that we are testing (e.g. "op" = "open power",' +\
' "obmc" = "open bmc", etc).'
PATH_LIST = gm.return_path_list()
def get_plug_in_base_paths():
r"""
Get plug-in base paths and return them as a list.
This function searches the PATH_LIST (created from PATH environment variable) for any paths that have a
"plug_ins" subdirectory. All such paths are considered plug_in_base paths.
"""
global PATH_LIST
plug_in_base_path_list = []
for path in PATH_LIST:
candidate_plug_in_base_path = path + "plug_ins/"
if os.path.isdir(candidate_plug_in_base_path):
plug_in_base_path_list.append(candidate_plug_in_base_path)
return plug_in_base_path_list
# Define global plug_in_base_path_list and call get_plug_in_base_paths to set its value.
plug_in_base_path_list = get_plug_in_base_paths()
def find_plug_in_package(plug_in_name):
r"""
Find and return the normalized directory path of the specified plug in. This is done by searching the
global plug_in_base_path_list.
Description of arguments:
plug_in_name The unqualified name of the plug-in package.
"""
global plug_in_base_path_list
for plug_in_base_dir_path in plug_in_base_path_list:
candidate_plug_in_dir_path = os.path.normpath(plug_in_base_dir_path
+ plug_in_name) + \
os.sep
if os.path.isdir(candidate_plug_in_dir_path):
return candidate_plug_in_dir_path
return ""
def validate_plug_in_package(plug_in_dir_path,
mch_class="obmc"):
r"""
Validate the plug in package and return the normalized plug-in directory path.
Description of arguments:
plug_in_dir_path The "relative" or absolute path to a plug in package directory.
mch_class The class of machine that we are testing (e.g. "op" = "open power",
"obmc" = "open bmc", etc).
"""
gp.dprint_executing()
if os.path.isabs(plug_in_dir_path):
# plug_in_dir_path begins with a slash so it is an absolute path.
candidate_plug_in_dir_path = os.path.normpath(plug_in_dir_path) +\
os.sep
if not os.path.isdir(candidate_plug_in_dir_path):
gp.print_error_report("Plug-in directory path \""
+ plug_in_dir_path + "\" does not exist.\n")
exit(1)
else:
# The plug_in_dir_path is actually a simple name (e.g. "OBMC_Sample")...
candidate_plug_in_dir_path = find_plug_in_package(plug_in_dir_path)
if candidate_plug_in_dir_path == "":
global PATH_LIST
gp.print_error_report("Plug-in directory path \""
+ plug_in_dir_path + "\" could not be found"
+ " in any of the following directories:\n"
+ gp.sprint_var(PATH_LIST))
exit(1)
# Make sure that this plug-in supports us...
supports_file_path = candidate_plug_in_dir_path + "supports_" + mch_class
if not os.path.exists(supports_file_path):
gp.print_error_report("The following file path could not be"
+ " found:\n"
+ gp.sprint_varx("supports_file_path",
supports_file_path)
+ "\nThis file is necessary to indicate that"
+ " the given plug-in supports the class of"
+ " machine we are testing, namely \""
+ mch_class + "\".\n")
exit(1)
return candidate_plug_in_dir_path
def return_integrated_plug_ins(mch_class="obmc"):
r"""
Return a list of integrated plug-ins. Integrated plug-ins are plug-ins which are selected without regard
for whether the user has specified them. In other words, they are "integrated" into the program suite.
The programmer designates a plug-in as integrated by putting a file named "integrated" into the plug-in
package directory.
Description of arguments:
mch_class The class of machine that we are testing (e.g. "op" = "open power",
"obmc" = "open bmc", etc).
"""
global plug_in_base_path_list
integrated_plug_ins_list = []
DEBUG_SKIP_INTEGRATED = int(os.getenv('DEBUG_SKIP_INTEGRATED', '0'))
if DEBUG_SKIP_INTEGRATED:
return integrated_plug_ins_list
for plug_in_base_path in plug_in_base_path_list:
# Get a list of all plug-in paths that support our mch_class.
mch_class_candidate_list = glob.glob(plug_in_base_path
+ "*/supports_" + mch_class)
for candidate_path in mch_class_candidate_list:
integrated_plug_in_dir_path = os.path.dirname(candidate_path) +\
os.sep
integrated_file_path = integrated_plug_in_dir_path + "integrated"
if os.path.exists(integrated_file_path):
plug_in_name = \
os.path.basename(os.path.dirname(candidate_path))
if plug_in_name not in integrated_plug_ins_list:
# If this plug-in has not already been added to the list...
integrated_plug_ins_list.append(plug_in_name)
return integrated_plug_ins_list
def return_plug_in_packages_list(plug_in_dir_paths,
mch_class="obmc"):
r"""
Return a list of plug-in packages given the plug_in_dir_paths string. This function calls
validate_plug_in_package so it will fail if plug_in_dir_paths contains any invalid plug-ins.
Description of arguments:
plug_in_dir_path The "relative" or absolute path to a plug in package directory.
mch_class The class of machine that we are testing (e.g. "op" = "open power",
"obmc" = "open bmc", etc).
"""
if plug_in_dir_paths != "":
plug_in_packages_list = plug_in_dir_paths.split(":")
else:
plug_in_packages_list = []
# Get a list of integrated plug-ins (w/o full path names).
integrated_plug_ins_list = return_integrated_plug_ins(mch_class)
# Put both lists together in plug_in_packages_list with no duplicates. NOTE: This won't catch
# duplicates if the caller specifies the full path name of a native plug-in but that should be rare
# enough.
plug_in_packages_list = plug_in_packages_list + integrated_plug_ins_list
plug_in_packages_list = \
list(set([validate_plug_in_package(path, mch_class)
for path in plug_in_packages_list]))
return plug_in_packages_list
| StarcoderdataPython |
1605700 | <filename>util/datadog.py
from Utility import resources as ex
from datadog import initialize, api
import time
class DataDog:
@staticmethod
def initialize_data_dog():
"""Initialize The DataDog Class"""
initialize()
@staticmethod
def send_metric(metric_name, value):
"""Send a metric value to DataDog."""
# some values at 0 are important such as active games, this was put in place to make sure they are updated at 0.
metrics_at_zero = ['bias_games', 'guessing_games', 'commands_per_minute', 'n_words_per_minute',
'bot_api_idol_calls', 'bot_api_translation_calls', 'messages_received_per_min',
'errors_per_minute', 'wolfram_per_minute', 'urban_per_minute']
if metric_name in metrics_at_zero and not value:
value = 0
else:
if not value:
return
if ex.test_bot:
metric_name = 'test_bot_' + metric_name
else:
metric_name = 'irene_' + metric_name
api.Metric.send(metric=metric_name, points=[(time.time(), value)])
| StarcoderdataPython |
1634184 | import asyncio
from copy import deepcopy
from dataclasses import dataclass
from importlib.resources import path
from subprocess import Popen
from typing import List, Optional
import google.protobuf
from multiaddr import Multiaddr
import hivemind.hivemind_cli as cli
import hivemind.p2p.p2p_daemon_bindings.p2pclient as p2pclient
from hivemind.p2p.p2p_daemon_bindings.datastructures import PeerID, StreamInfo
from hivemind.proto import p2pd_pb2
from hivemind.utils import MSGPackSerializer
from hivemind.utils.logging import get_logger
from hivemind.utils.networking import find_open_port
logger = get_logger(__name__)
P2PD_FILENAME = 'p2pd'
NUM_RETRIES = 3
RETRY_DELAY = 0.4
class P2PInterruptedError(Exception):
pass
@dataclass(frozen=False)
class P2PContext(object):
id: str
port: int
handle_name: str
peer_id: PeerID = None
peer_addr: Multiaddr = None
class P2P:
"""
Forks a child process and executes p2pd command with given arguments.
Can be used for peer to peer communication and procedure calls.
Sends SIGKILL to the child in destructor.
"""
HEADER_LEN = 8
BYTEORDER = 'big'
PB_HEADER_LEN = 1
RESULT_MESSAGE = b'\x00'
ERROR_MESSAGE = b'\x01'
DHT_MODE_MAPPING = {
'dht': {'dht': 1},
'dht_server': {'dhtServer': 1},
'dht_client': {'dhtClient': 1},
}
FORCE_REACHABILITY_MAPPING = {
'public': {'forceReachabilityPublic': 1},
'private': {'forceReachabilityPrivate': 1},
}
def __init__(self):
self._child = None
self._alive = False
self._listen_task = None
self._server_stopped = asyncio.Event()
@classmethod
async def create(cls, *args, quic: bool = True, tls: bool = True, conn_manager: bool = True,
dht_mode: str = 'dht_server', force_reachability: Optional[str] = None,
nat_port_map: bool = True, auto_nat: bool = True, bootstrap: bool = False,
bootstrap_peers: Optional[List[str]] = None, use_global_ipfs: bool = False, host_port: int = None,
daemon_listen_port: int = None, **kwargs):
"""
Start a new p2pd process and connect to it.
:param args:
:param quic: Enables the QUIC transport
:param tls: Enables TLS1.3 channel security protocol
:param conn_manager: Enables the Connection Manager
:param dht_mode: DHT mode (dht_client/dht_server/dht)
:param force_reachability: Force reachability mode (public/private)
:param nat_port_map: Enables NAT port mapping
:param auto_nat: Enables the AutoNAT service
:param bootstrap: Connects to bootstrap peers and bootstraps the dht if enabled
:param bootstrap_peers: List of bootstrap peers; defaults to the IPFS DHT peers
:param use_global_ipfs: Bootstrap to global ipfs (works only if bootstrap=True and bootstrap_peers=None)
:param host_port: port for p2p network
:param daemon_listen_port: port for connection daemon and client binding
:param kwargs:
:return: new wrapper for p2p daemon
"""
assert not (bootstrap and bootstrap_peers is None and not use_global_ipfs), \
'Trying to create with bootstrap node without bootstrap nodes list. ' \
'It is very dangerous, because p2pd connects to global ipfs and it is very unstable. ' \
'If you really want this, pass use_global_ipfs=True'
assert not (bootstrap_peers is not None and use_global_ipfs), \
'Non empty bootstrap_nodes and use_global_ipfs=True are incompatible.' \
'Choose one option: your nodes list (preferable) or global ipfs (very unstable)'
self = cls()
with path(cli, P2PD_FILENAME) as p:
p2pd_path = p
bootstrap_peers = cls._make_bootstrap_peers(bootstrap_peers)
dht = cls.DHT_MODE_MAPPING.get(dht_mode, {'dht': 0})
force_reachability = cls.FORCE_REACHABILITY_MAPPING.get(force_reachability, {})
proc_args = self._make_process_args(
str(p2pd_path), *args,
quic=quic, tls=tls, connManager=conn_manager,
natPortMap=nat_port_map, autonat=auto_nat,
b=bootstrap, **{**bootstrap_peers, **dht, **force_reachability, **kwargs})
self._assign_daemon_ports(host_port, daemon_listen_port)
for try_count in range(NUM_RETRIES):
try:
self._initialize(proc_args)
await self._wait_for_client(RETRY_DELAY * (2 ** try_count))
break
except Exception as e:
logger.debug(f"Failed to initialize p2p daemon: {e}")
self._terminate()
if try_count == NUM_RETRIES - 1:
raise
self._assign_daemon_ports()
return self
@classmethod
async def replicate(cls, daemon_listen_port: int, host_port: int):
"""
Connect to existing p2p daemon
:param daemon_listen_port: port for connection daemon and client binding
:param host_port: port for p2p network
:return: new wrapper for existing p2p daemon
"""
self = cls()
# There is no child under control
# Use external already running p2pd
self._child = None
self._alive = True
self._assign_daemon_ports(host_port, daemon_listen_port)
self._client_listen_port = find_open_port()
self._client = p2pclient.Client(
Multiaddr(f'/ip4/127.0.0.1/tcp/{self._daemon_listen_port}'),
Multiaddr(f'/ip4/127.0.0.1/tcp/{self._client_listen_port}'))
await self._wait_for_client()
return self
async def wait_for_at_least_n_peers(self, n_peers, attempts=3, delay=1):
for _ in range(attempts):
peers = await self._client.list_peers()
if len(peers) >= n_peers:
return
await asyncio.sleep(delay)
raise RuntimeError('Not enough peers')
def _initialize(self, proc_args: List[str]) -> None:
proc_args = deepcopy(proc_args)
proc_args.extend(self._make_process_args(
hostAddrs=f'/ip4/0.0.0.0/tcp/{self._host_port},/ip4/0.0.0.0/udp/{self._host_port}/quic',
listen=f'/ip4/12172.16.17.32/tcp/{self._daemon_listen_port}'
))
self._child = Popen(args=proc_args, encoding="utf8")
self._alive = True
self._client_listen_port = find_open_port()
self._client = p2pclient.Client(
Multiaddr(f'/ip4/127.0.0.1/tcp/{self._daemon_listen_port}'),
Multiaddr(f'/ip4/127.0.0.1/tcp/{self._client_listen_port}'))
async def _wait_for_client(self, delay=0):
await asyncio.sleep(delay)
encoded = await self._client.identify()
self.id = encoded[0].to_base58()
def _assign_daemon_ports(self, host_port=None, daemon_listen_port=None):
if host_port is None:
host_port = find_open_port()
if daemon_listen_port is None:
daemon_listen_port = find_open_port()
while daemon_listen_port == host_port:
daemon_listen_port = find_open_port()
self._host_port, self._daemon_listen_port = host_port, daemon_listen_port
@staticmethod
async def send_raw_data(byte_str, writer):
request = len(byte_str).to_bytes(P2P.HEADER_LEN, P2P.BYTEORDER) + byte_str
writer.write(request)
@staticmethod
async def send_msgpack(data, writer):
raw_data = MSGPackSerializer.dumps(data)
await P2P.send_raw_data(raw_data, writer)
@staticmethod
async def send_protobuf(protobuf, out_proto_type, writer):
if type(protobuf) != out_proto_type:
raise TypeError('Unary handler returned protobuf of wrong type.')
if out_proto_type == p2pd_pb2.RPCError:
await P2P.send_raw_data(P2P.ERROR_MESSAGE, writer)
else:
await P2P.send_raw_data(P2P.RESULT_MESSAGE, writer)
await P2P.send_raw_data(protobuf.SerializeToString(), writer)
@staticmethod
async def receive_raw_data(reader: asyncio.StreamReader, header_len=HEADER_LEN):
header = await reader.readexactly(header_len)
content_length = int.from_bytes(header, P2P.BYTEORDER)
data = await reader.readexactly(content_length)
return data
@staticmethod
async def receive_msgpack(reader):
return MSGPackSerializer.loads(await P2P.receive_raw_data(reader))
@staticmethod
async def receive_protobuf(in_proto_type, reader):
msg_type = await P2P.receive_raw_data(reader)
if msg_type == P2P.RESULT_MESSAGE:
protobuf = in_proto_type()
protobuf.ParseFromString(await P2P.receive_raw_data(reader))
return protobuf, None
elif msg_type == P2P.ERROR_MESSAGE:
protobuf = p2pd_pb2.RPCError()
protobuf.ParseFromString(await P2P.receive_raw_data(reader))
return None, protobuf
else:
raise TypeError('Invalid Protobuf message type')
@staticmethod
def _handle_stream(handle):
async def do_handle_stream(stream_info, reader, writer):
try:
request = await P2P.receive_raw_data(reader)
except asyncio.IncompleteReadError:
logger.debug("Incomplete read while receiving request from peer")
writer.close()
return
try:
result = handle(request)
await P2P.send_raw_data(result, writer)
finally:
writer.close()
return do_handle_stream
@staticmethod
def _handle_unary_stream(handle, context, in_proto_type, out_proto_type):
async def watchdog(reader: asyncio.StreamReader):
await reader.read(n=1)
raise P2PInterruptedError()
async def do_handle_unary_stream(
stream_info: StreamInfo,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter) -> None:
try:
try:
request = await P2P.receive_protobuf(in_proto_type, reader)
except asyncio.IncompleteReadError:
logger.debug("Incomplete read while receiving request from peer")
return
except google.protobuf.message.DecodeError as error:
logger.exception(error)
return
context.peer_id, context.peer_addr = stream_info.peer_id, stream_info.addr
done, pending = await asyncio.wait([watchdog(reader), handle(request, context)],
return_when=asyncio.FIRST_COMPLETED)
try:
result = done.pop().result()
await P2P.send_protobuf(result, out_proto_type, writer)
except P2PInterruptedError:
pass
except Exception as exc:
error = p2pd_pb2.RPCError(message=str(exc))
await P2P.send_protobuf(error, p2pd_pb2.RPCError, writer)
finally:
pending_task = pending.pop()
pending_task.cancel()
try:
await pending_task
except asyncio.CancelledError:
pass
finally:
writer.close()
return do_handle_unary_stream
def start_listening(self):
async def listen():
async with self._client.listen():
await self._server_stopped.wait()
self._listen_task = asyncio.create_task(listen())
async def stop_listening(self):
if self._listen_task is not None:
self._server_stopped.set()
self._listen_task.cancel()
try:
await self._listen_task
except asyncio.CancelledError:
self._listen_task = None
self._server_stopped.clear()
async def add_stream_handler(self, name, handle):
if self._listen_task is None:
self.start_listening()
await self._client.stream_handler(name, self._handle_stream(handle))
async def add_unary_handler(self, name, handle, in_proto_type, out_proto_type):
if self._listen_task is None:
self.start_listening()
context = P2PContext(id=self.id, port=self._host_port, handle_name=name)
await self._client.stream_handler(
name, P2P._handle_unary_stream(handle, context, in_proto_type, out_proto_type))
async def call_peer_handler(self, peer_id, handler_name, input_data):
libp2p_peer_id = PeerID.from_base58(peer_id)
stream_info, reader, writer = await self._client.stream_open(libp2p_peer_id, (handler_name,))
try:
await P2P.send_raw_data(input_data, writer)
return await P2P.receive_raw_data(reader)
finally:
writer.close()
def __del__(self):
self._terminate()
@property
def is_alive(self):
return self._alive
async def shutdown(self):
await asyncio.get_event_loop().run_in_executor(None, self._terminate)
def _terminate(self):
self._alive = False
if self._child is not None and self._child.poll() is None:
self._child.kill()
self._child.wait()
@staticmethod
def _make_process_args(*args, **kwargs) -> List[str]:
proc_args = []
proc_args.extend(
str(entry) for entry in args
)
proc_args.extend(
f'-{key}={P2P._convert_process_arg_type(value)}' if value is not None else f'-{key}'
for key, value in kwargs.items()
)
return proc_args
@staticmethod
def _convert_process_arg_type(val):
if isinstance(val, bool):
return 1 if val else 0
return val
@staticmethod
def _make_bootstrap_peers(nodes):
if nodes is None:
return {}
return {'bootstrapPeers': ','.join(nodes)}
| StarcoderdataPython |
131659 | <reponame>panther-labs/panther-cli<gh_stars>1-10
import ipaddress
import panther_event_type_helpers as event_type
from panther_base_helpers import deep_get
def get_event_type(event):
# currently, only tracking a few event types
if (
event.get("eventName") == "ConsoleLogin"
and deep_get(event, "userIdentity", "type") == "IAMUser"
):
if deep_get(event, "responseElements", "ConsoleLogin") == "Failure":
return event_type.FAILED_LOGIN
if deep_get(event, "responseElements", "ConsoleLogin") == "Success":
return event_type.SUCCESSFUL_LOGIN
if event.get("eventName") == "CreateUser":
return event_type.USER_ACCOUNT_CREATED
if event.get("eventName") == "CreateAccountResult":
return event_type.ACCOUNT_CREATED
return None
def load_ip_address(event):
"""
CloudTrail occassionally sets non-IPs in the sourceIPAddress field.
This method ensures that either an IPv4 or IPv6 address is always returned.
"""
source_ip = event.get("sourceIPAddress")
if not source_ip:
return None
try:
ipaddress.IPv4Address(source_ip)
except ipaddress.AddressValueError:
try:
ipaddress.IPv6Address(source_ip)
except ipaddress.AddressValueError:
return None
return source_ip
| StarcoderdataPython |
120961 | <gh_stars>0
# -*- coding: utf-8 -*-
from flask import Blueprint, render_template, current_app
from flask.ext.login import current_user
blueprint = Blueprint('admin.dashboard', __name__)
@blueprint.before_request
def restrict_blueprint_to_admins():
"""
.. seealso:: http://flask.pocoo.org/snippets/59/
"""
if not current_user.is_authenticated():
return current_app.login_manager.unauthorized()
@blueprint.route('')
def home():
return render_template('admin/dashboard.html')
@blueprint.route('/status')
def status():
config = dict(current_app.config)
return render_template('admin/status.html', config=config)
| StarcoderdataPython |
3278257 | from asyncpg.exceptions import UniqueViolationError
from starlette.background import BackgroundTask
from starlette.responses import JSONResponse
from admin.audit_logs import AuditColour, send_audit_log
from admin.route import Route
from admin.models import ShortURL
from admin.utils import is_authorized, is_json
from admin.discord_api import get_user
class LinkRoute(Route):
"""
Route for fetching, creating, updating and deleting links.
"""
name = "link"
path = "/link"
@is_authorized
async def get(self, request):
urls = await ShortURL.query.order_by(ShortURL.clicks.desc()).gino.all()
response = []
for url in urls:
if (
request.query_params.get("mine")
and url.creator != request.state.api_key.creator
):
continue
response.append({
"short_code": url.short_code,
"long_url": url.long_url,
"notes": url.notes,
"creator": str(url.creator),
"creation_date": url.creation_date.timestamp(),
"clicks": url.clicks
})
return JSONResponse(response)
@is_authorized
@is_json
async def post(self, request):
data = await request.json()
if data["short_code"].isspace() or data["short_code"] == "":
return JSONResponse(
{
"status": "error",
"message": "Short code cannot be blank"
},
status_code=400
)
if data["long_url"].isspace() or data["long_url"] == "":
return JSONResponse(
{
"status": "error",
"message": "Long URL cannot be blank"
},
status_code=400
)
if request.state.api_key.is_admin and data.get("creator"):
new_url = ShortURL(
short_code=data["short_code"],
long_url=data["long_url"],
creator=data["creator"],
notes=data.get("notes", "")
)
else:
new_url = ShortURL(
short_code=data["short_code"],
long_url=data["long_url"],
creator=request.state.api_key.creator,
notes=data.get("notes", "")
)
try:
await new_url.create()
except UniqueViolationError:
return JSONResponse(
{
"status": "error",
"message": "Short code already exists"
},
status_code=400
)
task = BackgroundTask(
send_audit_log,
title="New short URL",
body=f"Created by <@{request.state.api_key.creator}>",
newline_fields={
"Short code": data["short_code"],
"Long URL": data["long_url"],
"Notes": data.get("notes", "*No notes*")
},
colour=AuditColour.SUCCESS
)
return JSONResponse(
{
"status": "success"
},
background=task
)
@is_authorized
@is_json
async def delete(self, request):
data = await request.json()
short_url = await ShortURL.get(data["short_code"])
if not short_url:
return JSONResponse({
"status": "error",
"message": "Short URL not found"
}, status_code=404)
if (
request.state.api_key.is_admin
or request.state.api_key.creator == short_url.creator
):
await short_url.delete()
else:
return JSONResponse({
"status": "error",
"message": "You are not an administrator "
"and you do not own this short URL"
}, status_code=403)
task = BackgroundTask(
send_audit_log,
title="Short URL deleted",
body=f"Deleted by <@{request.state.api_key.creator}>",
newline_fields={
"Short code": short_url.short_code,
"Long URL": short_url.long_url,
"Original creator": f"<@{short_url.creator}>",
"Notes": short_url.notes
},
colour=AuditColour.ERROR
)
return JSONResponse({
"status": "success"
}, background=task)
@is_authorized
@is_json
async def patch(self, request):
data = await request.json()
short_url = await ShortURL.get(data["old_short_code"])
if not short_url:
return JSONResponse({
"status": "error",
"message": "Short URL not found"
}, status_code=404)
updates = {
"short_code": data.get("short_code", short_url.short_code),
"long_url": data.get("long_url", short_url.long_url),
"notes": data.get("notes", short_url.notes)
}
if updates["short_code"].isspace() or updates["short_code"] == "":
return JSONResponse(
{
"status": "error",
"message": "Short code cannot be blank"
},
status_code=400
)
if updates["long_url"].isspace() or updates["long_url"] == "":
return JSONResponse(
{
"status": "error",
"message": "Long URL cannot be blank"
},
status_code=400
)
task = BackgroundTask(
send_audit_log,
title="Short URL updated",
body=f"Updated by <@{request.state.api_key.creator}>",
newline_fields={
"Short code": short_url.short_code,
"Long URL": short_url.long_url,
"Original creator": f"<@{short_url.creator}>",
"Notes": data.get("notes", "*No notes*")
},
colour=AuditColour.BLURPLE
)
if request.state.api_key.is_admin:
try:
updates["creator"] = int(data.get("creator", short_url.creator))
if updates["creator"] != short_url.creator:
task = BackgroundTask(
send_audit_log,
title="Short URL transferred",
body=f"Transferred by <@{request.state.api_key.creator}>",
inline_fields={
"Original creator": f"<@{short_url.creator}>",
"New creator": f"<@{updates['creator']}>",
},
colour=AuditColour.BLURPLE
)
get_user(updates["creator"])
except (ValueError, KeyError):
return JSONResponse({
"status": "error",
"message": "User does not exist"
}, status_code=400)
try:
await short_url.update(**updates).apply()
except UniqueViolationError:
return JSONResponse({
"status": "error",
"message": "New short URL already exists"
}, status_code=400)
return JSONResponse({
"status": "success"
}, background=task)
| StarcoderdataPython |
1782122 | from datetime import datetime, timedelta
import simplejson
from django.db import models
class RecentManager(models.Manager):
def get_query_set(self):
return super(RecentManager, self).get_query_set().filter(updated__gt=datetime.utcnow() - timedelta(14))
class Geocode(models.Model):
lon = models.FloatField(null=True)
lat = models.FloatField(null=True)
query = models.TextField(null=True)
_results = models.TextField(default='null')
updated = models.DateTimeField()
local_name = models.TextField()
recent = RecentManager()
objects = models.Manager()
def _get_results(self):
try:
return self._cached_results
except AttributeError:
self._cached_results = simplejson.loads(self._results)
return self._cached_results
def _set_results(self, value):
self._cached_results = value
results = property(_get_results, _set_results)
def save(self, *args, **kwargs):
if hasattr(self, '_cached_results'):
self._results = simplejson.dumps(self._cached_results)
self.updated = datetime.utcnow()
super(Geocode, self).save(*args, **kwargs)
| StarcoderdataPython |
3337017 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import unittest
from copy import deepcopy
import torch
from pytorchvideo.layers.accelerator.mobile_cpu.activation_functions import (
supported_act_functions,
)
from pytorchvideo.layers.accelerator.mobile_cpu.attention import SqueezeExcitation
class TestActivationAttentionEquivalency(unittest.TestCase):
def test_activation_equivalency(self):
# Input tensor
input_tensor = torch.randn(1, 3, 4, 6, 6)
for iter_activation_name in supported_act_functions:
act_func_ref = supported_act_functions[iter_activation_name]()
act_func_convert = deepcopy(act_func_ref)
act_func_convert.convert()
# Get output of both activations
out0 = act_func_ref(input_tensor)
out1 = act_func_convert(input_tensor)
# Check arithmetic equivalency
max_err = float(torch.max(torch.abs(out0 - out1)))
logging.info(
f"test_activation_equivalency: {iter_activation_name} max_err {max_err}"
)
self.assertTrue(max_err < 1e-3)
def test_squeeze_excite_equivalency(self):
# Input tensor
input_tensor = torch.randn(1, 16, 4, 6, 6)
# Instantiate ref and convert se modules.
se_ref = SqueezeExcitation(16, num_channels_reduced=2, is_3d=True)
se_ref.eval()
se_convert = deepcopy(se_ref)
se_convert.convert((1, 16, 4, 6, 6))
# Get output of both activations
out0 = se_ref(input_tensor)
out1 = se_convert(input_tensor)
# Check arithmetic equivalency
max_err = float(torch.max(torch.abs(out0 - out1)))
rel_err = torch.abs((out0 - out1) / out0)
max_rel_err = float(torch.max(rel_err))
logging.info(
(
"test_squeeze_excite_equivalency: "
f"max_err {max_err}, max_rel_err {max_rel_err}"
)
)
self.assertTrue(max_err < 1e-3)
| StarcoderdataPython |
52071 | from .models import Signal
from rest_framework import viewsets
from dashboard.quickstart.serializers import SignalSerializer
from django.utils import timezone
class SignalViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows Signal to be viewed or edited.
"""
queryset = Signal.objects.filter(
time_recieved__startswith=timezone.now().date())
serializer_class = SignalSerializer
| StarcoderdataPython |
1631571 | <reponame>ASVO-TAO/SS18B-PLasky
"""
Distributed under the MIT License. See LICENSE.txt for more info.
"""
import ast
from collections import OrderedDict
from ...utility.display_names import OPEN_DATA
from ..dynamic import field
from ...models import DataParameter, Data
from ..dynamic.form import DynamicForm
from ...utility.display_names import (
DETECTOR_CHOICE,
DETECTOR_CHOICE_DISPLAY,
SIGNAL_DURATION,
SIGNAL_DURATION_DISPLAY,
SAMPLING_FREQUENCY,
SAMPLING_FREQUENCY_DISPLAY,
START_TIME,
START_TIME_DISPLAY,
HANFORD,
HANFORD_DISPLAY,
LIVINGSTON,
LIVINGSTON_DISPLAY,
VIRGO,
VIRGO_DISPLAY,
)
DETECTOR_CHOICES = [
(HANFORD, HANFORD_DISPLAY),
(LIVINGSTON, LIVINGSTON_DISPLAY),
(VIRGO, VIRGO_DISPLAY),
]
DATA_FIELDS_PROPERTIES = OrderedDict([
(DETECTOR_CHOICE, {
'type': field.MULTIPLE_CHOICES,
'label': DETECTOR_CHOICE_DISPLAY,
'initial': None,
'required': True,
'choices': DETECTOR_CHOICES,
}),
(SIGNAL_DURATION, {
'type': field.POSITIVE_INTEGER,
'label': SIGNAL_DURATION_DISPLAY,
'placeholder': '2',
'initial': None,
'required': True,
}),
(SAMPLING_FREQUENCY, {
'type': field.POSITIVE_INTEGER,
'label': SAMPLING_FREQUENCY_DISPLAY,
'placeholder': '2',
'initial': None,
'required': True,
}),
(START_TIME, {
'type': field.POSITIVE_FLOAT,
'label': START_TIME_DISPLAY,
'placeholder': '2.1',
'initial': None,
'required': True,
}),
])
class OpenDataParameterForm(DynamicForm):
"""
Open Data Parameter Form extending Dynamic Form
"""
def __init__(self, *args, **kwargs):
kwargs['name'] = 'data-parameter'
kwargs['fields_properties'] = DATA_FIELDS_PROPERTIES
# We need to job to extract job information but job itself is not going to be used for saving form
self.job = kwargs.pop('job', None)
super(OpenDataParameterForm, self).__init__(*args, **kwargs)
def save(self):
# find the data first
data = Data.objects.get(job=self.job)
# Create or update the data parameters
for name, value in self.cleaned_data.items():
DataParameter.objects.update_or_create(
data=data,
name=name,
defaults={
'value': value,
}
)
def update_from_database(self, job):
"""
Populates the form field with the values stored in the database
:param job: instance of job model for which the data parameters belong to
:return: Nothing
"""
if not job:
return
else:
# check whether the data choice is open data or not
# if not nothing to populate
try:
data = Data.objects.get(job=job)
if data.data_choice != OPEN_DATA:
return
except Data.DoesNotExist:
return
# iterate over the fields
for name in DATA_FIELDS_PROPERTIES.keys():
try:
value = DataParameter.objects.get(data=data, name=name).value
# set the field value
# extra processing required for checkbox type fields
self.fields[name].initial = ast.literal_eval(value) if name == DETECTOR_CHOICE else value
except DataParameter.DoesNotExist:
continue
| StarcoderdataPython |
2915 | <filename>sc2/bot_ai.py
import itertools
import logging
import math
import random
from collections import Counter
from typing import Any, Dict, List, Optional, Set, Tuple, Union # mypy type checking
from .cache import property_cache_forever, property_cache_once_per_frame
from .data import ActionResult, Alert, Race, Result, Target, race_gas, race_townhalls, race_worker
from .data import ActionResult, Attribute, Race, race_worker, race_townhalls, race_gas, Target, Result
from .game_data import AbilityData, GameData
# imports for mypy and pycharm autocomplete
from .game_state import GameState
from .game_data import GameData, AbilityData
from .ids.ability_id import AbilityId
from .ids.unit_typeid import UnitTypeId
from .ids.upgrade_id import UpgradeId
from .pixel_map import PixelMap
from .position import Point2, Point3
from .unit import Unit
from .units import Units
logger = logging.getLogger(__name__)
class BotAI:
"""Base class for bots."""
EXPANSION_GAP_THRESHOLD = 15
def __init__(self):
# Specific opponent bot ID used in sc2ai ladder games http://sc2ai.net/
# The bot ID will stay the same each game so your bot can "adapt" to the opponent
self.opponent_id: int = None
self.units: Units = None
self.workers: Units = None
self.townhalls: Units = None
self.geysers: Units = None
self.minerals: int = None
self.vespene: int = None
self.supply_army: Union[float, int] = None
self.supply_workers: Union[float, int] = None # Doesn't include workers in production
self.supply_cap: Union[float, int] = None
self.supply_used: Union[float, int] = None
self.supply_left: Union[float, int] = None
self.idle_worker_count: int = None
self.army_count: int = None
self.warp_gate_count: int = None
self.larva_count: int = None
self.cached_known_enemy_structures = None
self.cached_known_enemy_units = None
@property
def enemy_race(self) -> Race:
assert len(self._game_info.player_races) == 2, "enemy_race not available"
self.enemy_id = 3 - self.player_id
return Race(self._game_info.player_races[self.enemy_id])
@property
def time(self) -> Union[int, float]:
""" Returns time in seconds, assumes the game is played on 'faster' """
return self.state.game_loop / 22.4 # / (1/1.4) * (1/16)
@property
def time_formatted(self) -> str:
""" Returns time as string in min:sec format """
t = self.time
return f"{int(t // 60):02}:{int(t % 60):02}"
@property
def game_info(self) -> "GameInfo":
return self._game_info
def alert(self, alert_code: Alert) -> bool:
"""
Check if alert is triggered in the current step.
Example use:
from sc2.data import Alert
if self.alert(Alert.AddOnComplete):
print("Addon Complete")
Alert codes:
AlertError
AddOnComplete
BuildingComplete
BuildingUnderAttack
LarvaHatched
MergeComplete
MineralsExhausted
MorphComplete
MothershipComplete
MULEExpired
NuclearLaunchDetected
NukeComplete
NydusWormDetected
ResearchComplete
TrainError
TrainUnitComplete
TrainWorkerComplete
TransformationComplete
UnitUnderAttack
UpgradeComplete
VespeneExhausted
WarpInComplete
"""
assert isinstance(alert_code, Alert), f"alert_code {alert_code} is no Alert"
return alert_code.value in self.state.alerts
@property
def start_location(self) -> Point2:
return self._game_info.player_start_location
@property
def enemy_start_locations(self) -> List[Point2]:
"""Possible start locations for enemies."""
return self._game_info.start_locations
@property_cache_once_per_frame
def known_enemy_units(self) -> Units:
"""List of known enemy units, including structures."""
return self.state.enemy_units
@property_cache_once_per_frame
def known_enemy_structures(self) -> Units:
"""List of known enemy units, structures only."""
return self.state.enemy_units.structure
@property
def main_base_ramp(self) -> "Ramp":
""" Returns the Ramp instance of the closest main-ramp to start location.
Look in game_info.py for more information """
if hasattr(self, "cached_main_base_ramp"):
return self.cached_main_base_ramp
# The reason for len(ramp.upper) in {2, 5} is:
# ParaSite map has 5 upper points, and most other maps have 2 upper points at the main ramp.
# The map Acolyte has 4 upper points at the wrong ramp (which is closest to the start position).
try:
self.cached_main_base_ramp = min(
(ramp for ramp in self.game_info.map_ramps if len(ramp.upper) in {2, 5}),
key=lambda r: self.start_location.distance_to(r.top_center),
)
except ValueError:
# Hardcoded hotfix for Honorgrounds LE map, as that map has a large main base ramp with inbase natural
self.cached_main_base_ramp = min(
(ramp for ramp in self.game_info.map_ramps if len(ramp.upper) in {4, 9}),
key=lambda r: self.start_location.distance_to(r.top_center),
)
return self.cached_main_base_ramp
@property_cache_forever
def expansion_locations(self) -> Dict[Point2, Units]:
"""
Returns dict with the correct expansion position Point2 object as key,
resources (mineral field and vespene geyser) as value.
"""
# Idea: create a group for every resource, then merge these groups if
# any resource in a group is closer than 6 to any resource of another group
# Distance we group resources by
RESOURCE_SPREAD_THRESHOLD = 8.5
geysers = self.state.vespene_geyser
# Create a group for every resource
resource_groups = [[resource] for resource in self.state.resources]
# Loop the merging process as long as we change something
found_something = True
while found_something:
found_something = False
# Check every combination of two groups
for group_a, group_b in itertools.combinations(resource_groups, 2):
# Check if any pair of resource of these groups is closer than threshold together
if any(
resource_a.distance_to(resource_b) <= RESOURCE_SPREAD_THRESHOLD
for resource_a, resource_b in itertools.product(group_a, group_b)
):
# Remove the single groups and add the merged group
resource_groups.remove(group_a)
resource_groups.remove(group_b)
resource_groups.append(group_a + group_b)
found_something = True
break
# Distance offsets we apply to center of each resource group to find expansion position
offset_range = 7
offsets = [
(x, y)
for x, y in itertools.product(range(-offset_range, offset_range + 1), repeat=2)
if math.hypot(x, y) <= 8
]
# Dict we want to return
centers = {}
# For every resource group:
for resources in resource_groups:
# Possible expansion points
amount = len(resources)
# Calculate center, round and add 0.5 because expansion location will have (x.5, y.5)
# coordinates because bases have size 5.
center_x = int(sum(resource.position.x for resource in resources) / amount) + 0.5
center_y = int(sum(resource.position.y for resource in resources) / amount) + 0.5
possible_points = (Point2((offset[0] + center_x, offset[1] + center_y)) for offset in offsets)
# Filter out points that are too near
possible_points = (
point
for point in possible_points
# Check if point can be built on
if self._game_info.placement_grid[point.rounded] == 1
# Check if all resources have enough space to point
and all(point.distance_to(resource) > (7 if resource in geysers else 6) for resource in resources)
)
# Choose best fitting point
result = min(possible_points, key=lambda point: sum(point.distance_to(resource) for resource in resources))
centers[result] = resources
return centers
def _correct_zerg_supply(self):
""" The client incorrectly rounds zerg supply down instead of up (see
https://github.com/Blizzard/s2client-proto/issues/123), so self.supply_used
and friends return the wrong value when there are an odd number of zerglings
and banelings. This function corrects the bad values. """
# TODO: remove when Blizzard/sc2client-proto#123 gets fixed.
half_supply_units = {
UnitTypeId.ZERGLING,
UnitTypeId.ZERGLINGBURROWED,
UnitTypeId.BANELING,
UnitTypeId.BANELINGBURROWED,
UnitTypeId.BANELINGCOCOON,
}
correction = self.units(half_supply_units).amount % 2
self.supply_used += correction
self.supply_army += correction
self.supply_left -= correction
async def get_available_abilities(
self, units: Union[List[Unit], Units], ignore_resource_requirements=False
) -> List[List[AbilityId]]:
""" Returns available abilities of one or more units. Right know only checks cooldown, energy cost, and whether the ability has been researched.
Example usage:
units_abilities = await self.get_available_abilities(self.units)
or
units_abilities = await self.get_available_abilities([self.units.random]) """
return await self._client.query_available_abilities(units, ignore_resource_requirements)
async def expand_now(
self, building: UnitTypeId = None, max_distance: Union[int, float] = 10, location: Optional[Point2] = None
):
""" Not recommended as this function uses 'self.do' (reduces performance).
Finds the next possible expansion via 'self.get_next_expansion()'. If the target expansion is blocked (e.g. an enemy unit), it will misplace the expansion. """
if not building:
# self.race is never Race.Random
start_townhall_type = {
Race.Protoss: UnitTypeId.NEXUS,
Race.Terran: UnitTypeId.COMMANDCENTER,
Race.Zerg: UnitTypeId.HATCHERY,
}
building = start_townhall_type[self.race]
assert isinstance(building, UnitTypeId)
if not location:
location = await self.get_next_expansion()
await self.build(building, near=location, max_distance=max_distance, random_alternative=False, placement_step=1)
async def get_next_expansion(self) -> Optional[Point2]:
"""Find next expansion location."""
closest = None
distance = math.inf
for el in self.expansion_locations:
def is_near_to_expansion(t):
return t.distance_to(el) < self.EXPANSION_GAP_THRESHOLD
if any(map(is_near_to_expansion, self.townhalls)):
# already taken
continue
startp = self._game_info.player_start_location
d = await self._client.query_pathing(startp, el)
if d is None:
continue
if d < distance:
distance = d
closest = el
return closest
async def distribute_workers(self, resource_ratio: float = 2):
"""
Distributes workers across all the bases taken.
Keyword `resource_ratio` takes a float. If the current minerals to gas
ratio is bigger than `resource_ratio`, this function prefer filling geysers
first, if it is lower, it will prefer sending workers to minerals first.
This is only for workers that need to be moved anyways, it will NOT will
geysers on its own.
NOTE: This function is far from optimal, if you really want to have
refined worker control, you should write your own distribution function.
For example long distance mining control and moving workers if a base was killed
are not being handled.
WARNING: This is quite slow when there are lots of workers or multiple bases.
"""
if not self.state.mineral_field or not self.workers or not self.townhalls.ready:
return
actions = []
worker_pool = [worker for worker in self.workers.idle]
bases = self.townhalls.ready
geysers = self.geysers.ready
# list of places that need more workers
deficit_mining_places = []
for mining_place in bases | geysers:
difference = mining_place.surplus_harvesters
# perfect amount of workers, skip mining place
if not difference:
continue
if mining_place.is_vespene_geyser:
# get all workers that target the gas extraction site
# or are on their way back from it
local_workers = self.workers.filter(
lambda unit: unit.order_target == mining_place.tag
or (unit.is_carrying_vespene and unit.order_target == bases.closest_to(mining_place).tag)
)
else:
# get tags of minerals around expansion
local_minerals_tags = {
mineral.tag for mineral in self.state.mineral_field if mineral.distance_to(mining_place) <= 8
}
# get all target tags a worker can have
# tags of the minerals he could mine at that base
# get workers that work at that gather site
local_workers = self.workers.filter(
lambda unit: unit.order_target in local_minerals_tags
or (unit.is_carrying_minerals and unit.order_target == mining_place.tag)
)
# too many workers
if difference > 0:
for worker in local_workers[:difference]:
worker_pool.append(worker)
# too few workers
# add mining place to deficit bases for every missing worker
else:
deficit_mining_places += [mining_place for _ in range(-difference)]
# prepare all minerals near a base if we have too many workers
# and need to send them to the closest patch
if len(worker_pool) > len(deficit_mining_places):
all_minerals_near_base = [
mineral
for mineral in self.state.mineral_field
if any(mineral.distance_to(base) <= 8 for base in self.townhalls.ready)
]
# distribute every worker in the pool
for worker in worker_pool:
# as long as have workers and mining places
if deficit_mining_places:
# choose only mineral fields first if current mineral to gas ratio is less than target ratio
if self.vespene and self.minerals / self.vespene < resource_ratio:
possible_mining_places = [place for place in deficit_mining_places if not place.vespene_contents]
# else prefer gas
else:
possible_mining_places = [place for place in deficit_mining_places if place.vespene_contents]
# if preferred type is not available any more, get all other places
if not possible_mining_places:
possible_mining_places = deficit_mining_places
# find closest mining place
current_place = min(deficit_mining_places, key=lambda place: place.distance_to(worker))
# remove it from the list
deficit_mining_places.remove(current_place)
# if current place is a gas extraction site, go there
if current_place.vespene_contents:
actions.append(worker.gather(current_place))
# if current place is a gas extraction site,
# go to the mineral field that is near and has the most minerals left
else:
local_minerals = [
mineral for mineral in self.state.mineral_field if mineral.distance_to(current_place) <= 8
]
target_mineral = max(local_minerals, key=lambda mineral: mineral.mineral_contents)
actions.append(worker.gather(target_mineral))
# more workers to distribute than free mining spots
# send to closest if worker is doing nothing
elif worker.is_idle and all_minerals_near_base:
target_mineral = min(all_minerals_near_base, key=lambda mineral: mineral.distance_to(worker))
actions.append(worker.gather(target_mineral))
else:
# there are no deficit mining places and worker is not idle
# so dont move him
pass
await self.do_actions(actions)
@property
def owned_expansions(self) -> Dict[Point2, Unit]:
"""List of expansions owned by the player."""
owned = {}
for el in self.expansion_locations:
def is_near_to_expansion(t):
return t.distance_to(el) < self.EXPANSION_GAP_THRESHOLD
th = next((x for x in self.townhalls if is_near_to_expansion(x)), None)
if th:
owned[el] = th
return owned
def can_feed(self, unit_type: UnitTypeId) -> bool:
""" Checks if you have enough free supply to build the unit """
required = self._game_data.units[unit_type.value]._proto.food_required
return required == 0 or self.supply_left >= required
def can_afford(
self, item_id: Union[UnitTypeId, UpgradeId, AbilityId], check_supply_cost: bool = True
) -> "CanAffordWrapper":
"""Tests if the player has enough resources to build a unit or cast an ability."""
enough_supply = True
if isinstance(item_id, UnitTypeId):
unit = self._game_data.units[item_id.value]
cost = self._game_data.calculate_ability_cost(unit.creation_ability)
if check_supply_cost:
enough_supply = self.can_feed(item_id)
elif isinstance(item_id, UpgradeId):
cost = self._game_data.upgrades[item_id.value].cost
else:
cost = self._game_data.calculate_ability_cost(item_id)
return CanAffordWrapper(cost.minerals <= self.minerals, cost.vespene <= self.vespene, enough_supply)
async def can_cast(
self,
unit: Unit,
ability_id: AbilityId,
target: Optional[Union[Unit, Point2, Point3]] = None,
only_check_energy_and_cooldown: bool = False,
cached_abilities_of_unit: List[AbilityId] = None,
) -> bool:
"""Tests if a unit has an ability available and enough energy to cast it.
See data_pb2.py (line 161) for the numbers 1-5 to make sense"""
assert isinstance(unit, Unit)
assert isinstance(ability_id, AbilityId)
assert isinstance(target, (type(None), Unit, Point2, Point3))
# check if unit has enough energy to cast or if ability is on cooldown
if cached_abilities_of_unit:
abilities = cached_abilities_of_unit
else:
abilities = (await self.get_available_abilities([unit]))[0]
if ability_id in abilities:
if only_check_energy_and_cooldown:
return True
cast_range = self._game_data.abilities[ability_id.value]._proto.cast_range
ability_target = self._game_data.abilities[ability_id.value]._proto.target
# Check if target is in range (or is a self cast like stimpack)
if (
ability_target == 1
or ability_target == Target.PointOrNone.value
and isinstance(target, (Point2, Point3))
and unit.distance_to(target) <= cast_range
): # cant replace 1 with "Target.None.value" because ".None" doesnt seem to be a valid enum name
return True
# Check if able to use ability on a unit
elif (
ability_target in {Target.Unit.value, Target.PointOrUnit.value}
and isinstance(target, Unit)
and unit.distance_to(target) <= cast_range
):
return True
# Check if able to use ability on a position
elif (
ability_target in {Target.Point.value, Target.PointOrUnit.value}
and isinstance(target, (Point2, Point3))
and unit.distance_to(target) <= cast_range
):
return True
return False
def select_build_worker(self, pos: Union[Unit, Point2, Point3], force: bool = False) -> Optional[Unit]:
"""Select a worker to build a building with."""
workers = (
self.workers.filter(lambda w: (w.is_gathering or w.is_idle) and w.distance_to(pos) < 20) or self.workers
)
if workers:
for worker in workers.sorted_by_distance_to(pos).prefer_idle:
if (
not worker.orders
or len(worker.orders) == 1
and worker.orders[0].ability.id in {AbilityId.MOVE, AbilityId.HARVEST_GATHER}
):
return worker
return workers.random if force else None
async def can_place(self, building: Union[AbilityData, AbilityId, UnitTypeId], position: Point2) -> bool:
"""Tests if a building can be placed in the given location."""
building_type = type(building)
assert building_type in {AbilityData, AbilityId, UnitTypeId}
if building_type == UnitTypeId:
building = self._game_data.units[building.value].creation_ability
elif building_type == AbilityId:
building = self._game_data.abilities[building.value]
r = await self._client.query_building_placement(building, [position])
return r[0] == ActionResult.Success
async def find_placement(
self,
building: UnitTypeId,
near: Union[Unit, Point2, Point3],
max_distance: int = 20,
random_alternative: bool = True,
placement_step: int = 2,
) -> Optional[Point2]:
"""Finds a placement location for building."""
assert isinstance(building, (AbilityId, UnitTypeId))
assert isinstance(near, Point2)
if isinstance(building, UnitTypeId):
building = self._game_data.units[building.value].creation_ability
else: # AbilityId
building = self._game_data.abilities[building.value]
if await self.can_place(building, near):
return near
if max_distance == 0:
return None
for distance in range(placement_step, max_distance, placement_step):
possible_positions = [
Point2(p).offset(near).to2
for p in (
[(dx, -distance) for dx in range(-distance, distance + 1, placement_step)]
+ [(dx, distance) for dx in range(-distance, distance + 1, placement_step)]
+ [(-distance, dy) for dy in range(-distance, distance + 1, placement_step)]
+ [(distance, dy) for dy in range(-distance, distance + 1, placement_step)]
)
]
res = await self._client.query_building_placement(building, possible_positions)
possible = [p for r, p in zip(res, possible_positions) if r == ActionResult.Success]
if not possible:
continue
if random_alternative:
return random.choice(possible)
else:
return min(possible, key=lambda p: p.distance_to_point2(near))
return None
def already_pending_upgrade(self, upgrade_type: UpgradeId) -> Union[int, float]:
""" Check if an upgrade is being researched
Return values:
0: not started
0 < x < 1: researching
1: finished
"""
assert isinstance(upgrade_type, UpgradeId)
if upgrade_type in self.state.upgrades:
return 1
level = None
if "LEVEL" in upgrade_type.name:
level = upgrade_type.name[-1]
creationAbilityID = self._game_data.upgrades[upgrade_type.value].research_ability.id
for structure in self.units.filter(lambda unit: unit.is_structure and unit.is_ready):
for order in structure.orders:
if order.ability.id is creationAbilityID:
if level and order.ability.button_name[-1] != level:
return 0
return order.progress
return 0
@property_cache_once_per_frame
def _abilities_all_units(self) -> Counter:
""" Cache for the already_pending function, includes protoss units warping in, and all units in production, and all structures, and all morphs """
abilities_amount = Counter()
for unit in self.units: # type: Unit
for order in unit.orders:
abilities_amount[order.ability] += 1
if not unit.is_ready:
if self.race != Race.Terran or not unit.is_structure:
# If an SCV is constructing a building, already_pending would count this structure twice (once from the SCV order, and once from "not structure.is_ready")
abilities_amount[self._game_data.units[unit.type_id.value].creation_ability] += 1
return abilities_amount
@property_cache_once_per_frame
def _abilities_workers_and_eggs(self) -> Counter:
""" Cache for the already_pending function, includes all worker orders (including pending).
Zerg units in production (except queens and morphing units) and structures in production,
counts double for terran """
abilities_amount = Counter()
for worker in self.workers: # type: Unit
for order in worker.orders:
abilities_amount[order.ability] += 1
if self.race == Race.Zerg:
for egg in self.units(UnitTypeId.EGG): # type: Unit
for order in egg.orders:
abilities_amount[order.ability] += 1
if self.race != Race.Terran:
# If an SCV is constructing a building, already_pending would count this structure twice
# (once from the SCV order, and once from "not structure.is_ready")
for unit in self.units.structure.not_ready: # type: Unit
abilities_amount[self._game_data.units[unit.type_id.value].creation_ability] += 1
return abilities_amount
def already_pending(self, unit_type: Union[UpgradeId, UnitTypeId], all_units: bool = True) -> int:
"""
Returns a number of buildings or units already in progress, or if a
worker is en route to build it. This also includes queued orders for
workers and build queues of buildings.
If all_units==True, then build queues of other units (such as Carriers
(Interceptors) or Oracles (Stasis Ward)) are also included.
"""
# TODO / FIXME: SCV building a structure might be counted as two units
if isinstance(unit_type, UpgradeId):
return self.already_pending_upgrade(unit_type)
ability = self._game_data.units[unit_type.value].creation_ability
amount = len(self.units(unit_type).not_ready)
if all_units:
amount += sum([o.ability == ability for u in self.units for o in u.orders])
else:
amount += sum([o.ability == ability for w in self.workers for o in w.orders])
amount += sum([egg.orders[0].ability == ability for egg in self.units(UnitTypeId.EGG)])
return amount
async def build(self, building: UnitTypeId, near: Union[Point2, Point3], max_distance: int=20, unit: Optional[Unit]=None, random_alternative: bool=True, placement_step: int=2):
"""Build a building."""
if isinstance(near, Unit):
near = near.position.to2
elif near is not None:
near = near.to2
else:
return
p = await self.find_placement(building, near.rounded, max_distance, random_alternative, placement_step)
if p is None:
return ActionResult.CantFindPlacementLocation
unit = unit or self.select_build_worker(p)
if unit is None or not self.can_afford(building):
return ActionResult.Error
return await self.do(unit.build(building, p))
async def do(self, action):
if not self.can_afford(action):
logger.warning(f"Cannot afford action {action}")
return ActionResult.Error
r = await self._client.actions(action)
if not r: # success
cost = self._game_data.calculate_ability_cost(action.ability)
self.minerals -= cost.minerals
self.vespene -= cost.vespene
else:
logger.error(f"Error: {r} (action: {action})")
return r
async def do_actions(self, actions: List["UnitCommand"], prevent_double=True):
""" Unlike 'self.do()', this function does not instantly subtract minerals and vespene. """
if not actions:
return None
if prevent_double:
actions = list(filter(self.prevent_double_actions, actions))
for action in actions:
cost = self._game_data.calculate_ability_cost(action.ability)
self.minerals -= cost.minerals
self.vespene -= cost.vespene
return await self._client.actions(actions)
def prevent_double_actions(self, action):
# always add actions if queued
if action.queue:
return True
if action.unit.orders:
# action: UnitCommand
# current_action: UnitOrder
current_action = action.unit.orders[0]
if current_action.ability.id != action.ability:
# different action, return true
return True
try:
if current_action.target == action.target.tag:
# same action, remove action if same target unit
return False
except AttributeError:
pass
try:
if action.target.x == current_action.target.x and action.target.y == current_action.target.y:
# same action, remove action if same target position
return False
except AttributeError:
pass
return True
return True
async def chat_send(self, message: str):
""" Send a chat message. """
assert isinstance(message, str), f"{message} is no string"
await self._client.chat_send(message, False)
# For the functions below, make sure you are inside the boundries of the map size.
def get_terrain_height(self, pos: Union[Point2, Point3, Unit]) -> int:
""" Returns terrain height at a position.
Caution: terrain height is different from a unit's z-coordinate.
"""
assert isinstance(pos, (Point2, Point3, Unit)), f"pos is not of type Point2, Point3 or Unit"
pos = pos.position.to2.rounded
return self._game_info.terrain_height[pos] # returns int
def get_terrain_z_height(self, pos: Union[Point2, Point3, Unit]) -> int:
""" Returns terrain z-height at a position. """
assert isinstance(pos, (Point2, Point3, Unit)), f"pos is not of type Point2, Point3 or Unit"
pos = pos.position.to2.rounded
return -16 + 32 * self._game_info.terrain_height[pos] / 255
def in_placement_grid(self, pos: Union[Point2, Point3, Unit]) -> bool:
""" Returns True if you can place something at a position.
Remember, buildings usually use 2x2, 3x3 or 5x5 of these grid points.
Caution: some x and y offset might be required, see ramp code:
https://github.com/Dentosal/python-sc2/blob/master/sc2/game_info.py#L17-L18 """
assert isinstance(pos, (Point2, Point3, Unit))
pos = pos.position.to2.rounded
return self._game_info.placement_grid[pos] == 1
def in_pathing_grid(self, pos: Union[Point2, Point3, Unit]) -> bool:
""" Returns True if a unit can pass through a grid point. """
assert isinstance(pos, (Point2, Point3, Unit))
pos = pos.position.to2.rounded
return self._game_info.pathing_grid[pos] == 1
def is_visible(self, pos: Union[Point2, Point3, Unit]) -> bool:
""" Returns True if you have vision on a grid point. """
# more info: https://github.com/Blizzard/s2client-proto/blob/9906df71d6909511907d8419b33acc1a3bd51ec0/s2clientprotocol/spatial.proto#L19
assert isinstance(pos, (Point2, Point3, Unit))
pos = pos.position.to2.rounded
return self.state.visibility[pos] == 2
def has_creep(self, pos: Union[Point2, Point3, Unit]) -> bool:
""" Returns True if there is creep on the grid point. """
assert isinstance(pos, (Point2, Point3, Unit))
pos = pos.position.to2.rounded
return self.state.creep[pos] == 1
def _prepare_start(self, client, player_id, game_info, game_data):
"""Ran until game start to set game and player data."""
self._client: "Client" = client
self._game_info: "GameInfo" = game_info
self._game_data: GameData = game_data
self.player_id: int = player_id
self.race: Race = Race(self._game_info.player_races[self.player_id])
self._units_previous_map: dict = dict()
self._previous_upgrades: Set[UpgradeId] = set()
self.units: Units = Units([])
def _prepare_first_step(self):
"""First step extra preparations. Must not be called before _prepare_step."""
if self.townhalls:
self._game_info.player_start_location = self.townhalls.first.position
self._game_info.map_ramps, self._game_info.vision_blockers = self._game_info._find_ramps_and_vision_blockers()
def _prepare_step(self, state, proto_game_info):
# Set attributes from new state before on_step."""
self.state: GameState = state # See game_state.py
# update pathing grid
self._game_info.pathing_grid: PixelMap = PixelMap(
proto_game_info.game_info.start_raw.pathing_grid, in_bits=True, mirrored=False
)
# Required for events
self._units_previous_map: Dict = {unit.tag: unit for unit in self.units}
self.units: Units = state.own_units
self.workers: Units = self.units(race_worker[self.race])
self.townhalls: Units = self.units(race_townhalls[self.race])
self.geysers: Units = self.units(race_gas[self.race])
self.minerals: int = state.common.minerals
self.vespene: int = state.common.vespene
self.supply_army: int = state.common.food_army
self.supply_workers: int = state.common.food_workers # Doesn't include workers in production
self.supply_cap: int = state.common.food_cap
self.supply_used: int = state.common.food_used
self.supply_left: int = self.supply_cap - self.supply_used
if self.race == Race.Zerg:
self.larva_count: int = state.common.larva_count
# Workaround Zerg supply rounding bug
self._correct_zerg_supply()
elif self.race == Race.Protoss:
self.warp_gate_count: int = state.common.warp_gate_count
self.idle_worker_count: int = state.common.idle_worker_count
self.army_count: int = state.common.army_count
# reset cached values
self.cached_known_enemy_structures = None
self.cached_known_enemy_units = None
async def issue_events(self):
""" This function will be automatically run from main.py and triggers the following functions:
- on_unit_created
- on_unit_destroyed
- on_building_construction_complete
"""
await self._issue_unit_dead_events()
await self._issue_unit_added_events()
for unit in self.units.structure:
await self._issue_building_complete_event(unit)
if len(self._previous_upgrades) != len(self.state.upgrades):
for upgrade_completed in self.state.upgrades - self._previous_upgrades:
await self.on_upgrade_complete(upgrade_completed)
self._previous_upgrades = self.state.upgrades
async def _issue_unit_added_events(self):
for unit in self.units.not_structure:
if unit.tag not in self._units_previous_map:
await self.on_unit_created(unit)
for unit in self.units.structure:
if unit.tag not in self._units_previous_map:
await self.on_building_construction_started(unit)
async def _issue_building_complete_event(self, unit):
if unit.build_progress < 1:
return
if unit.tag not in self._units_previous_map:
return
unit_prev = self._units_previous_map[unit.tag]
if unit_prev.build_progress < 1:
await self.on_building_construction_complete(unit)
async def _issue_unit_dead_events(self):
for unit_tag in self.state.dead_units:
await self.on_unit_destroyed(unit_tag)
async def on_unit_destroyed(self, unit_tag):
""" Override this in your bot class.
Note that this function uses unit tags because the unit does not exist any more. """
async def on_unit_created(self, unit: Unit):
""" Override this in your bot class. """
async def on_building_construction_started(self, unit: Unit):
""" Override this in your bot class. """
async def on_building_construction_complete(self, unit: Unit):
""" Override this in your bot class. Note that this function is also
triggered at the start of the game for the starting base building."""
async def on_upgrade_complete(self, upgrade: UpgradeId):
""" Override this in your bot class. """
def on_start(self):
""" Allows initializing the bot when the game data is available. """
async def on_start_async(self):
""" This function is run after "on_start". At this point, game_data, game_info and
the first iteration of game_state (self.state) are available. """
async def on_step(self, iteration: int):
"""Ran on every game step (looped in realtime mode)."""
raise NotImplementedError
def on_end(self, game_result: Result):
""" Triggered at the end of a game. """
class CanAffordWrapper:
def __init__(self, can_afford_minerals, can_afford_vespene, have_enough_supply):
self.can_afford_minerals = can_afford_minerals
self.can_afford_vespene = can_afford_vespene
self.have_enough_supply = have_enough_supply
def __bool__(self):
return self.can_afford_minerals and self.can_afford_vespene and self.have_enough_supply
@property
def action_result(self):
if not self.can_afford_vespene:
return ActionResult.NotEnoughVespene
elif not self.can_afford_minerals:
return ActionResult.NotEnoughMinerals
elif not self.have_enough_supply:
return ActionResult.NotEnoughFood
else:
return None
| StarcoderdataPython |
3219475 | <gh_stars>1-10
"""
PyWRM Layout Widget implementation
"""
from external_widgets.dhx.dhx_layout import Layout as dhx_layout
from external_widgets.w2ui.w2ui_layout import Layout as w2ui_layout
class Panel:
""" Panel object for placement into a Layout Widget"""
def __init__(self, panel_type, panel_id, **kwargs):
self.panel_type = panel_type
self.panel_id = panel_id
self.kwargs = kwargs
self.panel_type_info = None
self.parent = None
#TODO check kwargs against property setters and raise if not there
self.config = {
"id": panel_id,
**kwargs
}
def build_defaults(self, parent):
"""Apply panel defaults to configuration"""
self.parent = parent
self.panel_type_info = self.parent.base_widget.panel_type_info[self.panel_type]
panel_defaults = {k.replace("panel_default_", ""):v
for k, v in self.panel_type_info.items()
if "panel_default_" in k}
# config should update panel_defaults so user values take precedence
panel_defaults.update(self.config)
self.config = panel_defaults
def attach_widget(self, widget):
"""attach a widget to the panel"""
self.parent.attach_widget(widget, self)
def collapse(self):
"""collapse a panel"""
self.parent.collapse(self)
def expand(self):
"""expand a panel"""
self.parent.expand(self)
def hide(self):
"""hide a panel"""
self.parent.hide(self)
def show(self):
"""show a panel"""
self.parent.show(self)
def toggle(self):
"""toggle hide / show of a panel"""
self.parent.toggle(self)
@property
def style(self) -> str:
"""get the name of a CSS class(es) applied to Layout"""
return self.config.get("css", "")
@style.setter
def style(self, value: str):
"""set the name of a CSS class(es) applied to Layout"""
self.config["css"] = value
@property
def title_text(self) -> str:
"""gets a header with text for a cell"""
return self.config.get("header", "")
@title_text.setter
def title_text(self, value: str):
"""adds a header with text for a cell"""
self.config["header"] = value
@property
def height(self) -> str:
"""gets the height of a cell depending on the panel type"""
return self.config.get("height", "")
@height.setter
def height(self, value: str):
"""sets the height of a cell depending on the panel type"""
self.config["height"] = value
@property
def width(self) -> str:
"""gets the width of a cell depending on the panel type"""
return self.config.get("width", "")
@width.setter
def width(self, value: str):
"""sets the width of a cell depending on the panel type"""
self.config["width"] = value
@property
def hidden(self):
"""gets whether a cell is hidden"""
return self.config.get("hidden", False)
@hidden.setter
def hidden(self, value: bool):
"""defines whether a cell is hidden"""
self.config["hidden"] = value
@property
def html(self):
"""gets HTML content for a cell"""
return self.config.get("html", "")
@html.setter
def html(self, value: str):
"""sets HTML content for a cell"""
self.config["html"] = value
@property
def resizable(self):
"""gets whether a cell can be resized"""
return self.config.get("resizable", False)
@resizable.setter
def resizable(self, value: bool):
"""defines whether a cell can be resized"""
self.config["resizable"] = value
class PanelType:
"""Panel Types"""
content_top = "content_top"
top_header = "top_header"
bottom_footer = "bottom_footer"
left_side = "left_side"
right_side = "right_side"
content_bottom = "content_bottom"
class Layout:
"""Layout widget implementation"""
widget_set = None
def __init__(self, layout_id, parent=None, session_id=""):
widget_set = self.widget_set or (parent.widget_set if parent else None)
self.parent = parent
self.session_id = session_id or self.parent.session_id
self.name = layout_id
self._has_panel = False
if widget_set == "dhx":
self._base_layout = dhx_layout(layout_id, session_id=self.session_id, parent=parent)
elif widget_set == "w2ui":
self._base_layout = w2ui_layout(layout_id, session_id=self.session_id, parent=parent)
else:
raise ValueError("Widgetset is not defined")
if self.name == "mainwindow":
self.init_widget()
else:
self.widget_set = parent.widget_set
self.on_panel_hide_callable = None
self.on_panel_show_callable = None
self.on_panel_resize_callable = None
self.before_panel_resize_callable = None
@property
def base_widget(self):
"""Returns protected base layout class"""
return self._base_layout
def init_widget(self):
"""Initialize the widget for the first time"""
self._base_layout.init_layout(self._has_panel, self.name)
def add_panels(self, *panels):
"""Add panel to layout top_header, bottom_footer, left_side, right_side, content_bottom"""
for apanel in panels:
apanel.build_defaults(self)
add_function = getattr(self._base_layout, f"add_{str(apanel.panel_type)}")
add_function(**apanel.config)
setattr(self, apanel.panel_id, apanel)
self._has_panel = True
def hide(self, panel):
"""Hide a panel"""
self._base_layout.hide_panel(panel.panel_id)
def repaint(self):
"""Repaint the layout"""
self._base_layout.repaint()
def show(self, panel):
"""Show a panel"""
self._base_layout.show_panel(panel.panel_id)
def toggle(self, panel):
"""Toggle show hide of a panel"""
self._base_layout.top_header(panel.panel_id)
def on_panel_hide(self, callable, ret_widget_values=None, block_signal=False):
"""Panel hide event hook"""
#TODO implement ret_widget_values
ret_id_list = []
self.on_panel_hide_callable = callable
self._base_layout.on_panel_hide(self.on_panel_hide_return, ret_id_list, block_signal)
def on_panel_hide_return(self, panel_id):
"""Panel hide event return"""
self.on_panel_hide_callable(panel_id)
def on_panel_show(self, callable, ret_widget_values=None, block_signal=False):
"""Panel show event hook"""
#TODO implement ret_widget_values
ret_id_list = []
self.on_panel_show_callable = callable
self._base_layout.on_panel_show(self.on_panel_show_return, ret_id_list, block_signal)
def on_panel_show_return(self, panel_id):
"""Panel show event return"""
self.on_panel_show_callable(panel_id)
def on_panel_resize(self, callable, ret_widget_values=None, block_signal=False):
"""Panel resize event hook"""
#TODO implement ret_widget_values
ret_id_list = []
self.on_panel_resize_callable = callable
self._base_layout.on_panel_resize(self.on_panel_resize_return, ret_id_list, block_signal)
def on_panel_resize_return(self, panel_id):
"""Panel resize event return"""
self.on_panel_resize_callable(panel_id)
def before_panel_resize(self, callable, ret_widget_values=None, block_signal=False):
"""Panel before resize event hook"""
#TODO implement ret_widget_values
ret_id_list = []
self.before_panel_resize_callable = callable
self._base_layout.before_panel_resize(
self.before_panel_resize_return,
ret_id_list,
block_signal
)
def before_panel_resize_return(self, panel_id):
"""Panel before resize event return"""
self.before_panel_resize_callable(panel_id)
def attach_widget(self, widget, panel=None):
"""Attach a widget to the layout on a specific panel"""
uid = widget.base_widget._raw_layout._unique_id
panel_id = panel.panel_id if panel else None
config = widget.base_widget._build_config()
self.base_widget.attach_widget(uid, panel_id or self.name, config)
| StarcoderdataPython |
22651 | <reponame>EnjoyLifeFund/macHighSierra-py36-pkgs
# -*- coding: utf-8 -*-
# Copyright (c) 2013, <NAME>
# All rights reserved.
# This file is part of PyDSM.
# PyDSM is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# PyDSM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with PyDSM. If not, see <http://www.gnu.org/licenses/>.
"""
Acoustic weighting functions (:mod:`pydsm.audio_weightings`)
============================================================
Some standard acoustic weighting functions.
This module includes the A-, B- and C-weightings from the
ANSI Standards S1.4-1983 and S1.42-2001.
It also includes the D-weighting from the now withdrawn IEC 537.
It also includes the F-weighting proposed by <NAME>.
The weighting functions can be expressed either in terms of
acoustic power or in terms of signal amplitude.
The weighting functions are also available in terms of a filter-based
implementation. In this case, be careful since no normalization is
present so that the gain at 1 kHz can be arbitrary. The filter
transfer function is referred to a signal amplitude weighting.
.. currentmodule:: pydsm.audio_weightings
Weighting functions
-------------------
.. autosummary::
:toctree: generated/
a_weighting
b_weighting
c_weighting
d_weighting
f_weighting
Filter implementation of weighting functions
--------------------------------------------
.. autodata:: a_zpk
:annotation:
.. autodata:: b_zpk
:annotation:
.. autodata:: c_zpk
:annotation:
.. autodata:: d_zpk
:annotation:
.. autodata:: f_zpk
:annotation:
Normalization constants
-----------------------
.. autodata:: a_weighting_gain
:annotation:
.. autodata:: b_weighting_gain
:annotation:
.. autodata:: c_weighting_gain
:annotation:
.. autodata:: d_weighting_gain
:annotation:
.. autodata:: f_weighting_gain
:annotation:
Notes
-----
The ANSI and IEC weightings are also described in Wikipedia [1]
and summarized in some illustrative web pages such as [2]_ and
[3]_. The F-weighting is documented in [4]_.
The filter-based implementation of the F-weighting is so high-order that
evaluation of the transfer function may require special care.
.. [1] Wikipedia (http://en.wikipedia.org/wiki/A-weighting)
.. [2] Cross spectrum (http://www.cross-spectrum.com/audio/weighting.html)
.. [3] Product Technology Parters "Noise Measurement Briefing"
(http://www.ptpart.co.uk/noise-measurement-briefing/)
.. [4] <NAME> "Psychoacoustically Optimal Noise
Shaping," J. Audio Eng. Soc., Vol. 40 No. 7/8 1992 July/August
"""
from __future__ import division, print_function
import numpy as np
__all__ = ["a_zpk", "a_weighting", "b_zpk", "b_weighting",
"c_zpk", "c_weighting", "d_zpk", "d_weighting",
"f_zpk", "f_weighting"]
a_zpk = (2*np.pi*np.asarray([0., 0., 0., 0.]),
2*np.pi*np.asarray([-20.6, -20.6, -107.7, -739.9, -12200., -12200.]),
(2*np.pi*12200.)**2)
"""A-weighting filter in zpk form."""
b_zpk = (2*np.pi*np.asarray([0., 0., 0.]),
2*np.pi*np.asarray([-20.6, -20.6, -158.5, -12200., -12200.]),
(2*np.pi*12200.)**2)
"""B-weighting filter in zpk form."""
c_zpk = (2*np.pi*np.asarray([0., 0.]),
2*np.pi*np.asarray([-20.6, -20.6, -12200., -12200.]),
(2*np.pi*12200.)**2)
"""C-weighting filter in zpk form."""
d_zpk = (2*np.pi*np.asarray([0., -519.8+876.2j, -519.8-876.2j]),
2*np.pi*np.asarray([-282.7, -1160., -1712+2628j, -1712-2628j]),
91104.32)
"""D-weighting filter in zpk form."""
f_zpk = (2*np.pi*np.asarray([0., 0., 0.,
-580+1030j, -580-1030j,
-3180+8750j, -3180-8750j,
-3180+8750j, -3180-8750j,
-3180+8750j, -3180-8750j]),
2*np.pi*np.asarray([-180., -180., -180.,
-1630., -1630.,
-2510+3850j, -2510-3850j,
-2510+3850j, -2510-3850j,
-2510+3850j, -2510-3850j,
-2510+3850j, -2510-3850j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j]),
1.6810544531883432e+207)
"""F-weighting filter in zpk form."""
# Note: evaluating the transfer function of f_zpk may require special care
# since the high order implies that for many frequency values both the
# numerator and the denominator take very large values (in magnitude). Taking
# the ratio of large complex values may lead to overflow in numpy even if
# individually the numerator, the denominator and the result should not
# overflow.
def a_weighting(f, normal=True, power=True):
"""Returns the A-weighting as a function of frequency.
Parameters
----------
f : float or array of floats
frequency where the weighting function is computed
normal : bool
whether the function should be normalized to have unit gain at
1 kHz.
power : bool
whether the function should express the weighting in terms of
acoustic power or signal amplitude
Returns
-------
w : float or array of floats
value of the weigting function
"""
if power:
return a_weighting(f, normal, power=False)**2
w = (12200.0**2*f**4)/((f**2+20.6**2) *
np.sqrt((f**2+107.7**2) *
(f**2+737.9**2))*(f**2+12200.0**2))
return w if not normal else w*a_weighting_gain
a_weighting_gain = 1/a_weighting(1000, normal=False, power=False)
"""Normalization gain to apply to A-weighting filter (namely, the
attenuation of the filter at 1 kHz)"""
def b_weighting(f, normal=True, power=True):
"""Returns the B-weighting as a function of frequency.
Parameters
----------
f : float or array of floats
frequency where the weighting function is computed
normal : bool
whether the function should be normalized to have unit gain at
1 kHz.
power : bool
whether the function should express the weighting in terms of
acoustic power or signal amplitude
Returns
-------
w : float or array of floats
value of the weigting function
"""
if power:
return b_weighting(f, normal, power=False)**2
w = (12200.0**2*f**3)/((f**2+20.6**2) *
np.sqrt(f**2+158.5**2)*(f**2+12200.0**2))
return w if not normal else w*b_weighting_gain
b_weighting_gain = 1/b_weighting(1000, normal=False, power=False)
"""Normalization gain to apply to B-weighting filter (namely, the
attenuation of the filter at 1 kHz)"""
def c_weighting(f, normal=True, power=True):
"""Returns the C-weighting as a function of frequency.
Parameters
----------
f : float or array of floats
frequency where the weighting function is computed
normal : bool
whether the function should be normalized to have unit gain at
1 kHz.
power : bool
whether the function should express the weighting in terms of
acoustic power or signal amplitude
Returns
-------
w : float or array of floats
value of the weigting function
"""
if power:
return c_weighting(f, normal, power=False)**2
w = (12200.0**2*f**2)/((f**2+20.6**2)*(f**2+12200.0**2))
return w if not normal else w*c_weighting_gain
c_weighting_gain = 1/c_weighting(1000, normal=False, power=False)
"""Normalization gain to apply to C-weighting filter (namely, the
attenuation of the filter at 1 kHz)"""
def d_weighting(f, normal=True, power=True):
"""Returns the D-weighting as a function of frequency.
Parameters
----------
f : float or array of floats
frequency where the weighting function is computed
normal : bool
whether the function should be normalized to have unit gain at
1 kHz. This parameter is ignored, since this weighting function
is always normalized.
power : bool
whether the function should express the weighting in terms of
acoustic power or signal amplitude
Returns
-------
w : float or array of floats
value of the weigting function
"""
if power:
return d_weighting(f, normal, power=False)**2
def h(f):
return (((1037918.48-f**2)**2+1080768.16*f**2) /
((9837328.0-f**2)**2+11723776.0*f**2))
return (f/6.8966888496476E-5 *
np.sqrt(h(f)/((f**2+79919.29)*(f**2+1345600.0))))
d_weighting_gain = 1.
"""Normalization gain to apply to D-weighting filter (namely, the
attenuation of the filter at 1 kHz)"""
def f_weighting(f, normal=True, power=True):
"""Returns the F-weighting as a function of frequency.
Parameters
----------
f : float or array of floats
frequency where the weighting function is computed
normal : bool
whether the function should be normalized to have unit gain at
1 kHz.
power : bool
whether the function should express the weighting in terms of
acoustic power or signal amplitude
Returns
-------
w : float or array of floats
value of the weigting function
Notes
-----
The F-weighting function is documented in [1]_.
.. [1] <NAME> "Psychoacoustically Optimal Noise Shaping,"
J. Audio Eng. Soc., Vol. 40 No. 7/8 1992 July/August
"""
if not power:
return np.sqrt(f_weighting(f, normal, power=True))
fx = f/1000.
g = 2.536e-5
z1 = fx**2
z2 = ((0.58**2)+(1.03**2)-z1)**2 + 4.0*(0.58**2)*z1
z3 = ((3.18**2)+(8.75**2)-z1)**2 + 4.0*(3.18**2)*z1
p1 = 0.18**2+z1
p2 = 1.63**2+z1
p3 = ((2.51**2)+(3.85**2)-z1)**2 + 4.0*(2.51**2)*z1
p4 = ((6.62**2)+(14.29**2)-z1)**2 + 4.0*(6.62**2)*z1
w = ((g*((z1**3)*z2*(z3**3)) /
((p1**3)*(p2**2)*(p3**4))*((1e5/p4)**20)))
return w if not normal else w*f_weighting_gain
# Set normalization gain
f_weighting_gain = 1/f_weighting(1000, normal=False, power=True)
"""Normalization gain to apply to F-weighting filter (namely, the
attenuation of the filter at 1 kHz)"""
| StarcoderdataPython |
1764210 | from rule import *
from expr_tree import *
from typing import Optional, Set
class Reduce:
@staticmethod
def reduce_expr_string(ex_str: str):
ex = parse_expr(ex_str)
return Reduce.reduce(ex)
@staticmethod
def reduce(ex: BooleanFunction):
# Step 0
rules_1, rules_2, rules_3 = Rule.read_rules("rules.json")
g = ex
rules = []
ex_list = []
min_ex = g
temp_ex = g
old_ex_set = {g}
rules_13 = rules_1 # + rules_3
def step_2(_ex, ex_set):
print("STEP 2")
found_result = False
found_ex = None
found_rule = None
for _rule in rules_2:
_temp_rules = Reduce.apply_equal_rule(_ex, _rule)
for _temp_rule in _temp_rules:
# print("temp_r", temp_r)
_h, _new_rule = Rule.rule_replace(_ex, _temp_rule)
if _new_rule and not ex_set.__contains__(_h):
found_result = True
found_ex = _h
found_rule = _rule
print("r", _rule)
print("h", _h)
print("found")
break
if found_result:
break
return found_result, found_ex, found_rule
def step_3(_ex, ex_set):
print("STEP 3")
found_result = False
found_ex = None
found_rule = None
for _rule in rules_13:
# print("r", _rule)
_temp_rules = Reduce.apply_equal_rule(_ex, _rule)
for _temp_rule in _temp_rules:
# print("temp_r", temp_r)
_h, _new_rule = Rule.rule_replace(_ex, _temp_rule)
# print("h", h)
if _new_rule and not ex_set.__contains__(_h) and simpler(_h, _ex):
print("found")
if found_result:
if k_degree(found_ex) > k_degree(_h):
print("choose better rule")
found_ex = _h
found_rule = _rule
else:
found_result = True
found_ex = _h
found_rule = _rule
print("r", _rule)
print("h", _h)
break
if found_result:
break
return found_result, found_ex, found_rule
def step_4(min_expr, temp_expr, expr_list, rule_list):
if min_expr == temp_expr:
return expr_list, rule_list
if min_expr in expr_list:
i = expr_list.index(min_expr)
return expr_list[0:i + 1], rule_list[0:i + 1]
else:
return expr_list, rule_list
# Step 1
found = True
while found and k_degree(min_ex) > 0:
found = False
for r in rules_1:
temp_rules = Reduce.apply_equal_rule(min_ex, r)
for temp_r in temp_rules:
h, new_rule = Rule.rule_replace(min_ex, temp_r)
if new_rule and not old_ex_set.__contains__(h) and simpler(h, min_ex):
if found:
print("choose better rule")
rules.append(r)
ex_list.append(h)
old_ex_set.add(h)
min_ex = h
temp_ex = h
else:
rules.append(r)
ex_list.append(h)
old_ex_set.add(h)
min_ex = h
temp_ex = h
print("h", h)
print("r", r)
print("found")
break
if found:
break
# if rules.__len__() == 0:
# return min_ex, rules # to step 2
# el
if min_ex.args.__len__() == 1 and (
min_ex.args[0].func is BooleanTrue or min_ex.args[0].func is BooleanFalse):
ex_list, rules = step_4(min_ex, temp_ex, ex_list, rules)
return min_ex, rules, ex_list # to step 4
# Step 2
print("STEP 2")
found, f_ex, f_rule = step_2(temp_ex, old_ex_set)
if f_ex and f_rule:
rules.append(f_rule)
ex_list.append(f_ex)
old_ex_set.add(f_ex)
temp_ex = f_ex
if not found:
ex_list, rules = step_4(min_ex, temp_ex, ex_list, rules)
return min_ex, rules, ex_list # to step 4
# Step 3
print("STEP 3")
can_expand = True
found = True
while found and k_degree(temp_ex) > 0:
found, f_ex, f_rule = step_3(temp_ex, old_ex_set)
if f_ex is not None and f_rule:
rules.append(f_rule)
ex_list.append(f_ex)
old_ex_set.add(f_ex)
temp_ex = f_ex
if simpler(f_ex, min_ex):
min_ex = f_ex
if not found and (can_expand or k_degree(temp_ex) <= 0):
can_expand, f_ex, f_rule = step_2(temp_ex, old_ex_set)
found = can_expand
if f_ex and f_rule:
rules.append(f_rule)
ex_list.append(f_ex)
old_ex_set.add(f_ex)
temp_ex = f_ex
ex_list, rules = step_4(min_ex, temp_ex, ex_list, rules)
return min_ex, rules, ex_list
@staticmethod
def reduce_2_expr_string(ex_str: str):
ex = parse_expr(ex_str)
return Reduce.reduce_2(ex)
@staticmethod
def reduce_2(ex: BooleanFunction):
def apply_found_rule(_ex, _rule):
nonlocal ex_list, rules, min_ex, temp_ex, old_ex_set
print("found_rule", _rule)
pprint(_ex)
rules.append(_rule)
ex_list.append(_ex)
old_ex_set.add(_ex)
temp_ex = _ex
if simpler(_ex, min_ex):
min_ex = _ex
def find_rules(_ex, rules_list):
nonlocal ex_list, rules, min_ex, temp_ex, old_ex_set
new_ex_count = 0
found_result = True
while found_result:
found_result = False
found_ex = None
found_rule = None
for _rule in rules_list:
# print("r", _rule)
_temp_rules = Reduce.apply_equal_rule(_ex, _rule)
for _temp_rule in _temp_rules:
# print("temp_r", temp_r)
_h, _new_rule = Rule.rule_replace(_ex, _temp_rule)
# print("h", h)
if _new_rule and not old_ex_set.__contains__(_h):
if found_result:
if simpler(_h, found_ex):
# Trong một luật, chọn ra các tạo biểu thức ngắn nhất
found_ex = _h
found_rule = _rule
else:
found_result = True
found_ex = _h
found_rule = _rule
if found_result:
apply_found_rule(found_ex, found_rule)
_ex = found_ex
new_ex_count += 1
return new_ex_count > 0
def distribute_rules(_ex):
nonlocal ex_list, rules, min_ex, temp_ex, old_ex_set
found_result = False
found_ex = None
found_rule = ('Luật phân phối', None, None)
def valid_expr(_expr: BooleanFunction):
if _expr.func is Or:
return len([sub_ag for sub_ag in ag.args if sub_ag.func is And])
elif _expr.func is And:
return len([sub_ag for sub_ag in ag.args if sub_ag.func is Or])
return 0
for ag in preorder_traversal(_ex):
valid_count = valid_expr(ag)
if valid_count > 0:
if valid_count == 1:
_h = Reduce.distributive_law(_ex, ag)
elif valid_count == 2 and ag.args.__len__() == 2:
_h = Reduce.distributive_law_2_args(_ex, ag)
else:
continue
if _h is not None and not old_ex_set.__contains__(_h):
if found_result:
if simpler(found_ex, _h):
found_ex = _h
else:
found_result = True
found_ex = _h
if found_result:
apply_found_rule(found_ex, found_rule)
return found_result
def de_morgan_expand_rules(_ex):
nonlocal ex_list, rules, min_ex, temp_ex, old_ex_set
found_result = False
found_ex = None
found_rule = ('Luật De Morgan', True, None)
def valid_expr(_expr: BooleanFunction):
return _expr.func is Not and (_expr.args[0].func is And or _expr.args[0].func is Or)
for ag in preorder_traversal(_ex):
if valid_expr(ag):
_h = Reduce.de_morgan_expand_law(_ex, ag)
if _h is not None and not old_ex_set.__contains__(_h):
if found_result:
if simpler(found_ex, _h):
found_ex = _h
else:
found_result = True
found_ex = _h
if found_result:
apply_found_rule(found_ex, found_rule)
return found_result
def de_morgan_reduce_rules(_ex):
nonlocal ex_list, rules, min_ex, temp_ex, old_ex_set
found_result = False
found_ex = None
found_rule = ('Luật De Morgan', False, None)
def valid_expr(_expr: BooleanFunction):
if _expr.func is not And and _expr.func is not Or:
return False
return [sub_expr for sub_expr in _expr.args if sub_expr.func is Not].__len__() > 1
for ag in preorder_traversal(_ex):
if valid_expr(ag):
_h = Reduce.de_morgan_reduce_law(_ex, ag)
if _h is not None and not old_ex_set.__contains__(_h):
if found_result:
if simpler(_h, found_ex):
found_ex = _h
elif simpler(_h, min_ex):
found_result = True
found_ex = _h
if found_result:
apply_found_rule(found_ex, found_rule)
return found_result
def remove_useless_steps():
nonlocal ex_list, rules
if min_ex == temp_ex:
return
if min_ex in ex_list:
i = ex_list.index(min_ex)
ex_list = ex_list[0:i + 1]
rules = rules[0:i + 1]
# Step 0
all_groups = ["group_1", "group_2", "group_3", "group_4", "group_5", "group_6", "group_7", "group_2_1"]
all_rules = Rule.read_rules_2("rules_2.json", all_groups)
rules_1 = all_rules[0]
rules_2 = all_rules[1]
rules_3 = all_rules[2]
rules_4 = all_rules[3]
rules_5 = all_rules[4]
rules_6 = all_rules[5]
rules_7 = all_rules[6]
rules_2_1 = all_rules[7]
g = ex
rules = []
ex_list = []
min_ex = g
temp_ex = g
old_ex_set = {g}
found = True
distribution_count = 0
found_distribution = False
while found and k_degree(temp_ex) > 0:
found = False
# Group 1
found_in_group_1 = temp_ex.atoms(Not).__len__() > 0 or temp_ex.atoms(
BooleanTrue).__len__() > 0 or temp_ex.atoms(
BooleanFalse).__len__() > 0
while found_in_group_1:
found_in_group_1 = find_rules(temp_ex, rules_1)
found_in_group_1 = found_in_group_1 and (temp_ex.atoms(Not).__len__() > 0 or temp_ex.atoms(
BooleanTrue).__len__() > 0 or temp_ex.atoms(
BooleanFalse).__len__() > 0)
found = found_in_group_1 or found
print('found in group 1', found)
# Group 2
while find_rules(temp_ex, rules_2):
found = True
print('found in group 2', found)
# Group 2.1
if not found_distribution:
while find_rules(temp_ex, rules_2_1):
found = True
print('found in group 2.1', found)
# Group 4
found_implies = False
implies_count = temp_ex.atoms(Implies).__len__()
while implies_count > 0:
found_implies = find_rules(temp_ex, rules_4) or found_implies
implies_count = temp_ex.atoms(Implies).__len__()
print('found in group 4', found_implies)
# Group 5
if found_implies:
found = True
while find_rules(temp_ex, rules_5):
print('found in group 5')
# Group 3
while find_rules(temp_ex, rules_3):
found = True
print('found in group 3', found)
if found is False:
found = de_morgan_expand_rules(temp_ex) or found
if found is False and distribution_count < 5:
found_distribution = distribute_rules(temp_ex)
found = found_distribution or found
if found:
distribution_count += 1
else:
found_distribution = False
if found is False:
found = de_morgan_reduce_rules(temp_ex) or found
remove_useless_steps()
return min_ex, rules, ex_list
@staticmethod
def equivalent_expr_string(ex_str_1: str, ex_str_2: str):
ex1 = parse_expr(ex_str_1)
ex2 = parse_expr(ex_str_2)
return Reduce.equivalent(ex1, ex2)
@staticmethod
def equivalent(ex1: BooleanFunction, ex2: BooleanFunction):
if ex1 == ex2:
return True, None, None
if simpler(ex1, ex2):
ex1, ex2 = ex2, ex1
min_ex1, rules_1, expr_list_1 = Reduce.reduce_2(ex1)
if expr_list_1[-1] == ex2:
return True, (ex1, rules_1, expr_list_1), None
min_ex2, rules_2, expr_list_2 = Reduce.reduce_2(ex2)
if expr_list_1[-1] == expr_list_2[-1]:
return True, (ex1, rules_1, expr_list_1), (ex2, rules_2, expr_list_2)
return False, None, None
@staticmethod
def apply_equal_rule(ex: BooleanFunction, rule):
left_rule_ex = rule[1]
right_rule_ex = rule[2]
rules = Rule.apply_rule(ex, left_rule_ex, right_rule_ex)
# rules.extend(Reduce.apply_rule(ex, right_rule_ex, left_rule_ex))
return rules
@staticmethod
def de_morgan_expand_law(ex: BooleanFunction, ag: BooleanFunction) -> BooleanFunction:
func = And if ag.args[0].func is Or else Or
args = ag.args[0].args
new_arg = list(map(lambda sub_arg: Not(sub_arg), list(args)))
return ex.xreplace({ag: func(*new_arg)})
@staticmethod
def de_morgan_reduce_law(ex: BooleanFunction, ag: BooleanFunction) -> BooleanFunction:
func = And if ag.func is Or else Or
not_args, other_args = sift(ag.args, lambda _arg: _arg.func is Not, binary=True)
not_args = list(map(lambda _arg: _arg.args[0], not_args))
new_not_expr = Not(func(*not_args))
if other_args.__len__() > 0:
new_expr = ag.func(new_not_expr, *other_args)
else:
new_expr = new_not_expr
return ex.xreplace({ag: new_expr})
@staticmethod
def distributive_law(ex: BooleanFunction, ag: BooleanFunction) -> BooleanFunction:
args = list(ag.args)
func = And if ag.func is Or else Or
small_args = args
big_args = max([x for x in ag.args if x.func is func], key=lambda x: x.args.__len__())
small_args.remove(big_args)
# tìm biểu thức trong small_args có phàn trùng lớn nhất với big_args
count = 0
best_args = small_args[0]
big_args_set = Reduce.normalise_args_set(set(big_args.args))
for a in small_args:
if a.func is Symbol:
count_a = 1 if big_args_set.__contains__(a) else 0
else:
set_a = Reduce.normalise_args_set(set(a.args))
count_a = (big_args_set.intersection(set_a)).__len__()
if count_a > count:
count = count_a
best_args = a
small_args.remove(best_args)
new_args = list(map(lambda x: ag.func(best_args, x), list(big_args.args)))
sub_expr = func(*new_args)
small_args.append(sub_expr)
return ex.xreplace({ag: ag.func(*small_args)})
@staticmethod
def distributive_law_2_args(ex: BooleanFunction, ag: BooleanFunction) -> Optional[BooleanFunction]:
if ag.args.__len__() != 2:
return None
left_args = list(ag.args[0].args)
right_args = list(ag.args[1].args)
if left_args.__len__() > 3 or right_args.__len__() > 3:
return None
func = And if ag.func is Or else Or
from itertools import product
sub_expr = [ag.func(lhs, rhs) for lhs, rhs in product(left_args, right_args)]
return ex.xreplace({ag: func(*sub_expr)})
@staticmethod
def normalise_args_set(s: Set[BooleanFunction]) -> Set[BooleanFunction]:
item_to_remove = set(item for item in s if item.func is Not)
s = s.difference(item_to_remove)
new_items = set()
for i in item_to_remove:
new_items = new_items.union(set(i.args))
return s.union(new_items)
| StarcoderdataPython |
1759308 | #!/user/bin/python
# -*- coding: utf-8 -*-
# program# : Name
# =>
# Write a program that computes the value of a+aa+aaa+aaaa
# with a given digit as the value of a.
# Suppose the following input is supplied to the program:
# 9
# Then, the output should be:
# 11106
# Hints:
# In case of input data being supplied to the question, it should be
# assumed to be a console input.
# Simple Approach
# joining strings
def concat(value, limit):
tempvar = ''
for iter in range(1, limit + 1):
tempvar = tempvar + value
print tempvar
return tempvar
def main():
try:
print "sample input\n", "9,4"
num = int(raw_input('Enter a Number: '))
limit = int(raw_input('Enter a Number: '))
ans = 0
stru = str(num) # !IMP
for iter in range(1, limit + 1):
# print int(concat('9', j))
ans = ans + int(concat(stru, iter)) # performs 9+99+999+9999
print ans
except:
print 'Exception:', 'Invalid Input'
if __name__ == '__main__':
main()
# 9, 4
# 9000
# 900
# 90
# 9
# 9999 + 999+ 99+9
| StarcoderdataPython |
1694874 | """TF lite converter for larq models."""
import tensorflow as tf
import numpy as np
import larq as lq
from larq_compute_engine import bsign, bconv2d64
from larq_compute_engine.tf.python.utils import tf_2_or_newer
from tensorflow.keras.utils import get_custom_objects
get_custom_objects()["bsign"] = bsign
quantizer_replacements = {
"SteSign": bsign,
"ste_sign": bsign,
"approx_sign": bsign,
"MagnitudeAwareSign": None,
"magnitude_aware_sign": None,
"swish_sign": bsign,
"SwishSign": bsign,
"SteTern": None,
"ste_tern": None,
"SteHeaviside": None,
"ste_heaviside": None,
"DoReFaQuantizer": None,
"dorefa_quantizer": None,
}
def create_bconv_layer(
weights, strides, padding, transpose=True, fused_multiply=None, fused_add=None
):
"""
Creates a binary convolution layer for tflite.
If `transpose` is True, transposes from HWIO to OHWI
When `fused_multiply` is not `None`, it should be a 1D array of size equal to the filter out-channel dimension.
In this case, a multiplication op is inserted *after* the convolution.
This multiplication op will be merged into the batchnorm op by the converter.
This has two purposes:
- Implement the multiplication for the back-transformation from {0,1} to {-1,1}
- Implement the multiplication for magnitude_aware_sign in BiRealNet
"""
strides = [1, strides[0], strides[1], 1]
padding = padding.upper()
# Here the weights are still HWIO
dotproduct_size = weights.shape[0] * weights.shape[1] * weights.shape[2]
filter_format = "HWIO"
if transpose:
# Transpose: change from HWIO to OHWI
weights = np.moveaxis(weights, 3, 0)
filter_format = "OHWI"
weights = np.sign(np.sign(weights) + 0.5)
out_channels = weights.shape[0]
if fused_multiply is None:
fused_multiply = np.full(shape=(out_channels), fill_value=1)
elif len(fused_multiply.shape) != 1 or fused_multiply.shape[0] != out_channels:
raise Exception(
f"ERROR: Argument fused_multiply should have shape ({weights.shape[0]}) but has shape {fused_multiply.shape}"
)
if fused_add is None:
fused_add = np.full(shape=(out_channels), fill_value=0)
elif len(fused_add.shape) != 1 or fused_add.shape[0] != out_channels:
raise Exception(
f"ERROR: Argument fused_add should have shape ({weights.shape[0]}) but has shape {fused_add.shape}"
)
# The bconv will do the following:
# output = fused_add[channel] + fused_multiply[channel] * popcount
# We use this to implement two things:
# - `y1 = n - 2 * popcount` (the backtransformation to -1,+1 space)
# - `y2 = a + b * y1` (optional fused batchnorm)
# Together they become
# `y = (a + b*n) + (-2b) * popcount
fused_add = fused_add + dotproduct_size * fused_multiply
fused_multiply = -2 * fused_multiply
def bconv_op(x):
y = bconv2d64(
x,
weights,
fused_multiply,
fused_add,
strides,
padding,
data_format="NHWC",
filter_format=filter_format,
)
return y
return bconv_op
def replace_layers(model, replacement_dict):
"""
This function is adapted from
https://stackoverflow.com/questions/49492255/how-to-replace-or-insert-intermediate-layer-in-keras-model
Note: it currently fails on complicated networks such as two networks in parallel, i.e. two input tensors, run separate models on them and have two output tensors, but the whole thing viewed as one network.
However, we will probably switch to another conversion method once we understand grappler and other tensorflow parts, so for now this method is fine because it works on all Larq models.
"""
# Auxiliary dictionary to describe the network graph
network_dict = {"input_layers_of": {}, "new_output_tensor_of": {}}
# Set the input layers of each layer
for layer in model.layers:
for node in layer.outbound_nodes:
layer_name = node.outbound_layer.name
if layer_name not in network_dict["input_layers_of"]:
network_dict["input_layers_of"].update({layer_name: [layer.name]})
else:
network_dict["input_layers_of"][layer_name].append(layer.name)
# Set the output tensor of the input layer
network_dict["new_output_tensor_of"].update({model.layers[0].name: model.input})
# Iterate over all layers after the input
for layer in model.layers[1:]:
if not layer.name in network_dict["input_layers_of"]:
print(f"ERROR: {layer.name} not in input_layers_of")
return None
# Determine input tensors
layer_input = [
network_dict["new_output_tensor_of"][layer_aux]
for layer_aux in network_dict["input_layers_of"][layer.name]
]
if len(layer_input) == 1:
layer_input = layer_input[0]
# Insert layer if name matches the regular expression
if layer.name in replacement_dict:
x = layer_input
new_layer = replacement_dict[layer.name]
new_layer.name = "{}_new".format(layer.name)
x = new_layer(x)
else:
x = layer(layer_input)
# Set new output tensor (the original one, or the one of the inserted
# layer)
network_dict["new_output_tensor_of"].update({layer.name: x})
return tf.keras.Model(inputs=model.inputs, outputs=x)
class ModelConverter:
"""Converter to create TF lite models from Larq Keras models
This converter will convert the input quantizers to their tflite counterpart.
It will remove the kernel quantizers and only store the signs instead of the latent weights.
# Arguments
model: The Keras model to convert.
!!! example
```python
from larq_zoo import BiRealNet
model = BiRealNet(weights="imagenet")
conv = ModelConverter(model)
tflite_model = conv.convert()
# Or directly save it to a file:
conv.convert("birealnet.tflite")
```
"""
def __init__(self, model):
self.model = model
def convert(self, filename=None):
"""Convert and return the tflite model.
Optionally save the model to a file.
# Arguments
filename: If `None`, then returns the tflite model object. If its a string then it saves the model to that filename.
"""
if not self.fix_quantizers():
print("Model contains unsupported quantizers. No conversion will be done.")
return None
tflite_model = None
result_log = []
result_summary = []
if tf_2_or_newer():
result_summary.append("Session method: Tensorflow 1.x only")
else:
try:
tflite_model = self.convert_sessionmethod()
result_summary.append("Session method: success")
except Exception as e:
result_log.append(f"Session method log:\n{str(e)}")
result_summary.append("Session method: failed")
try:
tflite_model2 = self.convert_kerasmethod(new_converter=True)
if tflite_model is None:
tflite_model = tflite_model2
result_summary.append("MLIR method: success")
except Exception as e:
result_log.append(f"MLIR method log:\n{str(e)}")
result_summary.append("MLIR method: failed")
try:
tflite_model3 = self.convert_kerasmethod(new_converter=False)
if tflite_model is None:
tflite_model = tflite_model3
result_summary.append("Keras method: success")
except Exception as e:
result_log.append(f"Keras method log:\n{str(e)}")
result_summary.append("Keras method: failed")
print("\n----------------\nConversion logs:")
for log in result_log:
print("----------------")
print(log)
print("----------------\nConversion summary:")
for log in result_summary:
print(log)
if tflite_model is not None:
if filename is not None:
print(f"Saving tf lite model as {filename}")
open(filename, "wb").write(tflite_model)
else:
print("Did not save tf lite model.")
return tflite_model
def fix_quantizers(self):
result = True
replacement_dict = {}
for l in self.model.layers:
supported_input_quantizer = False
supported_kernel_quantizer = False
mul_weights = None
input_quantizer = None
try:
input_quantizer = l.input_quantizer
except AttributeError:
pass
if input_quantizer is not None:
name = lq.quantizers.serialize(input_quantizer)
if isinstance(name, dict):
name = name["class_name"]
if not isinstance(name, str) or name not in quantizer_replacements:
print(f"ERROR: Input quantizer {name} unknown.")
result = False
elif quantizer_replacements[name] is None:
print(f"ERROR: Input quantizer {name} not yet supported.")
result = False
else:
l.input_quantizer = quantizer_replacements[name]
supported_input_quantizer = True
kernel_quantizer = None
try:
kernel_quantizer = l.kernel_quantizer
except AttributeError:
pass
if kernel_quantizer is None:
# When its trained with Bop then it doesn't have kernel quantizers
# So for QuantConv2D just assume its a binary kernel
if isinstance(l, lq.layers.QuantConv2D):
supported_kernel_quantizer = True
else:
name = lq.quantizers.serialize(kernel_quantizer)
if isinstance(name, dict):
name = name["class_name"]
if not isinstance(name, str) or name not in quantizer_replacements:
print(f"ERROR: Kernel quantizer {name} unknown.")
result = False
elif name == "magnitude_aware_sign":
w = l.get_weights()[0]
absw = np.abs(w)
means = np.mean(absw, axis=tuple(range(len(w.shape) - 1)))
mul_weights = means
supported_kernel_quantizer = True
# l.set_weights([means * np.sign(np.sign(w) + 0.5)])
elif quantizer_replacements[name] is None:
print(f"ERROR: Kernel quantizer {name} not yet supported.")
result = False
else:
supported_kernel_quantizer = True
if supported_input_quantizer and supported_kernel_quantizer:
l.kernel_quantizer = None
w = l.get_weights()[0]
if isinstance(l, lq.layers.QuantConv2D):
if len(w.shape) != 4:
print(
f"ERROR: Weights of layer {l.name} have shape {w.shape} which does not have rank 4."
)
result = False
else:
# Create a new layer with those weights
# TODO: Detect if there is a batchnorm and put that into
# fused_multiply, fused_add
bconvlayer = create_bconv_layer(
w,
l.strides,
l.padding,
transpose=True,
fused_multiply=mul_weights,
fused_add=None,
)
replacement_dict[l.name] = bconvlayer
else:
binary_weights = np.sign(np.sign(w) + 0.5)
l.set_weights([binary_weights])
if result and replacement_dict:
new_model = replace_layers(self.model, replacement_dict)
if new_model is None:
return False
else:
self.model = new_model
return result
def convert_kerasmethod(self, new_converter=False):
"""Conversion through the 'Keras method'
This method works with many normal models. When adding a single Lambda layer, such as `tf.keras.layers.Lambda(tf.sign)` or with a custom op, then it still works.
However, sometimes, when adding *more than one* of such layers, at any place in the network, then it stops working.
"""
if tf_2_or_newer():
converter = tf.lite.TFLiteConverter.from_keras_model(self.model)
else:
keras_file = "/tmp/modelconverter_temporary.h5"
tf.keras.models.save_model(self.model, keras_file)
converter = tf.lite.TFLiteConverter.from_keras_model_file(keras_file)
converter.allow_custom_ops = True
if new_converter:
converter.experimental_enable_mlir_converter = True
converter.experimental_new_converter = True
return converter.convert()
def convert_sessionmethod(self):
"""Conversion through the 'Session method'
Unlike the Keras method, this one works with multiple Lambda layers with custom ops. However, it sometimes fails with BatchNormalization layers.
Although it is a different error message, in the following issue it is suggested to replace `tf.keras.layers.BatchNormalization` by `tf.layers.batch_normalization(fused=False)`.
https://github.com/tensorflow/tensorflow/issues/25301
"""
converter = tf.lite.TFLiteConverter.from_session(
tf.compat.v1.keras.backend.get_session(),
self.model.inputs,
self.model.outputs,
)
converter.allow_custom_ops = True
return converter.convert()
| StarcoderdataPython |
3343428 | """
An unofficial native Python wrapper for the LivePerson Engagement History API
Documentation:
https://developers.liveperson.com/data-engagement-history-methods.html
Brands can now search, filter and keep copies of chat transcripts and related data, for example surveys, to later
integrate and further analyze their data with third-party tools (DWH, CRM systems, etc.). 99.5 % of chat transcript data
is available within 5 minutes. All other chat transcript data (including metadata like Engagement Attributes) is
available for up to 2 hours after a chat has ended, and is stored for 13 months.
Usage Example:
1. Choose User Service Login or OAuth1 Authentication.
# For User Service Login
> from lp_api_wrapper import UserLogin
> auth = UserLogin(account_id='1234', username='YOURUSERNAME', password='<PASSWORD>')
# For OAuth1 Authentication
> from lp_api_wrapper import OAuthLogin
> auth = OAuthLogin(account_id='1234', app_key='K', app_secret='S', access_token='T', access_token_secret='TS')
2. Import EngagementHistory and get data from connection
> from lp_api_wrapper import EngagementHistory
> eh_conn = EngagementHistory(auth=auth)
> body = {'start': {'from': 1491004800000, 'to': 1491091199000}}
> data = eh_conn.engagements(body)
"""
import concurrent.futures
import requests
from ...util import (LoginService, UserLogin, OAuthLogin)
from typing import (List, Optional, Union)
class EngagementHistory(LoginService):
def __init__(self, auth: Union[UserLogin, OAuthLogin]) -> None:
super().__init__(auth=auth)
self.eh_domain = self.get_domain(service_name='engHistDomain')
def engagements(self, body: dict, offset: int = 0, limit: int = 100, sort: Optional[str] = None) -> dict:
"""
Documentation:
https://developers.liveperson.com/data_api-engagement-history-methods.html
Note:
WILL RETURN 1 OFFSET OF DATA. For the complete data set of the date range, use the method 'all_engagements'.
This method returns engagements with all their metadata and related transcripts, based on a given filter,
for example, time range, skill/s, keywords, etc.
:param body: REQUIRED Enter body parameters that are the same as the API documentation.
:param offset: Specifies from which record to retrieve the chat. Default is 0.
:param limit: Max amount of conversations to be received in the response. Default and max is 100.
:param sort: Sort the results in a predefined order.
:return: Dictionary of the json data_api from the request.
"""
url = 'https://{}/interaction_history/api/account/{}/interactions/search?'
# Establish Authorization
auth_args = self.authorize(headers={'content-type': 'application/json'})
# Generate request
r = requests.post(
url=url.format(self.eh_domain, self.account_id),
params={'offset': offset, 'limit': limit, 'sort': sort},
json=body,
**auth_args
)
# Check request status
if r.status_code == requests.codes.ok:
return r.json()
else:
print('Error: {}'.format(r.json()))
r.raise_for_status()
def all_engagements(self, body: dict, offset: int = 0, limit: int = 100, sort: Optional[str] = None,
max_concurrent_requests: int = 5, debug: bool = False) -> Union[List, List[dict]]:
"""
Documentation:
https://developers.liveperson.com/data_api-messaging-interactions-conversations.html
Note:
WILL RETURN ALL OFFSETS OF DATA. Please use the method 'conversations' for testing.
This method returns engagements with all their metadata and related transcripts, based on a given filter,
for example, time range, skill/s, keywords, etc.
:param offset: Specifies from which record to retrieve the chat. Default is 0.
:param limit: Max amount of conversations to be received in the response. Default and max is 100.
:param sort: Sort the results in a predefined order.
:param body: Enter body parameters that are the same as the API documentation.
:param max_concurrent_requests: Maximum concurrent requests.
:param debug: Shows status of requests.
:return: List of all interactionHistoryRecords within the start time range.
"""
count = self.engagements(body, offset, limit, sort)['_metadata']['count']
# Returns an empty list
if count == 0:
return []
# Inner function to process concurrent requests.
def get_record(b, o, l, s):
if self.bearer:
# If User Login is used.
api_data = []
for attempt in range(1, 3):
try:
api_data = self.engagements(body=b, offset=o, limit=l, sort=s)['interactionHistoryRecords']
except requests.HTTPError:
print('Reconnecting... [Attempt {}, Offset {}]'.format(attempt, o))
self.user_login(username=self.auth.username, password=self.auth.password)
print('Woot! We have connection!')
continue
break
return api_data
else:
# If OAuth1 is used.
return self.engagements(body=b, offset=o, limit=l, sort=s)['interactionHistoryRecords']
interaction_history_records = []
# Multi-threading to handle multiple requests at a time.
with concurrent.futures.ThreadPoolExecutor(max_workers=max_concurrent_requests) as executor:
future_requests = {
executor.submit(get_record, body, offset, limit, sort): offset for offset in range(0, count, 100)
}
for future in concurrent.futures.as_completed(future_requests):
if debug:
print('Record Count: {}, Offset: {} finished.'.format(count, future_requests[future]))
# Add data to results.
interaction_history_records.extend(future.result())
return interaction_history_records
| StarcoderdataPython |
1610766 | <reponame>NaHCO314/api-client<filename>tests/get_problem_csacademy.py
import unittest
from onlinejudge_api.main import main
class GetProblemCSAcademyTest(unittest.TestCase):
def test_k_swap(self) -> None:
url = 'https://csacademy.com/contest/round-39/task/k-swap/'
expected = {
"status": "ok",
"messages": [],
"result": {
"url": "https://csacademy.com/contest/round-39/task/k-swap/",
"tests": [
{
"input": "4 2\n4 3 2 1\n",
"output": "2 3 4 1 "
},
{
"input": "7 2\n4 3 2 1 2 3 4\n",
"output": "2 2 3 3 4 1 4 "
},
{
"input": "10 2\n4 3 2 1 2 3 4 3 2 1\n",
"output": "2 2 2 3 3 3 4 1 4 1 "
},
],
"context": {}
},
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
def test_unfair_game(self) -> None:
url = 'https://csacademy.com/contest/archive/task/unfair_game/'
expected = {
"status": "ok",
"messages": [],
"result": {
"url": "https://csacademy.com/contest/archive/task/unfair_game/",
"tests": [
{
"input": "5\n-1 2 10 -10 3\n",
"output": "14\n"
},
{
"input": "5\n-5 2 -10 4 -7\n",
"output": "-1\n"
},
],
"context": {}
},
}
actual = main(['get-problem', url], debug=True)
self.assertEqual(expected, actual)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.