max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
wizard/multiple_test_request_wizard.py | IDRISSOUM/hospital_management | 0 | 12797751 | # -*- coding: utf-8 -*-
# Part of BrowseInfo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from datetime import date,datetime
class wizard_multiple_test_request(models.TransientModel):
_name = 'wizard.multiple.test.request'
request_date = fields.Datetime('Request Date', required = True)
wizard_multiple_test_patient_id = fields.Many2one('medical.patient','Patient', required = True)
urgent = fields.Boolean('Urgent',)
wizard_multiple_test_physician_id = fields.Many2one('medical.physician','Doctor', required = True)
wizard_multiple_test_owner_partner_id = fields.Many2one('res.partner','Owner')
tests_ids = fields.Many2many('medical.test_type',
'lab_test_report_test_rel', 'test_id', 'report_id', 'Tests')
@api.multi
def create_lab_test(self):
wizard_obj = self
patient_id = wizard_obj.wizard_multiple_test_patient_id
phy_id = wizard_obj.wizard_multiple_test_physician_id
new_created_id_list = []
date = wizard_obj.request_date
for test_id in wizard_obj.tests_ids:
lab_test_req_obj = self.env['medical.test_type']
test_browse_record = lab_test_req_obj.browse(test_id.id)
test_name = test_browse_record.name
medical_test_request_obj = self.env['medical.patient.lab.test']
new_created_id = medical_test_request_obj.create({'date': date,
'doctor_id': phy_id.id,
'patient_id':patient_id.id,
'state': 'tested',
'name':test_id.id,
'request' :self.env['ir.sequence'].next_by_code('test_seq')
})
new_created_id_list.append(new_created_id.id)
if new_created_id_list:
imd = self.env['ir.model.data']
action = imd.xmlid_to_object('hospital_management.action_medical_patient_lab_test')
list_view_id = imd.xmlid_to_res_id('hospital_management.medical_patient_lab_test_tree_view')
result = {
'name': action.name,
'help': action.help,
'type': action.type,
'views': [ [list_view_id,'tree' ]],
'target': action.target,
'context': action.context,
'res_model': action.res_model,
}
if len(new_created_id_list) :
result['domain'] = "[('id','in',%s)]" % new_created_id_list
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | 2.0625 | 2 |
backend/django/KlasHelper/model.py | Ryulth/KlasHelperRemaster | 7 | 12797752 | # This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
#
# Also note: You'll have to insert the output of 'django-admin sqlcustom [app_label]'
# into your database.
from __future__ import unicode_literals
from django.db import models
class Comment(models.Model):
comment_id = models.IntegerField()
class_code = models.CharField(max_length=20)
post_id = models.IntegerField(blank=True, null=True)
create_date = models.DateTimeField(blank=True, null=True)
author_id = models.CharField(max_length=20)
content = models.TextField(blank=True, null=True)
flag = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'comment'
class Course20182(models.Model):
class_code = models.CharField(max_length=20)
class_name = models.CharField(max_length=100)
class_year = models.CharField(max_length=10, blank=True, null=True)
quota = models.CharField(max_length=10, blank=True, null=True)
instructor = models.CharField(max_length=100, blank=True, null=True)
credit = models.CharField(max_length=10, blank=True, null=True)
class_hour_room = models.CharField(max_length=500, blank=True, null=True)
class_type = models.CharField(max_length=20, blank=True, null=True)
class_lan = models.CharField(max_length=50, blank=True, null=True)
notice = models.CharField(max_length=100, blank=True, null=True)
campus = models.CharField(max_length=10, blank=True, null=True)
class Meta:
managed = False
db_table = 'course_2018_20'
class DjangoMigrations(models.Model):
app = models.CharField(max_length=255)
name = models.CharField(max_length=255)
applied = models.DateTimeField()
class Meta:
managed = False
db_table = 'django_migrations'
class Post(models.Model):
post_id = models.IntegerField()
class_code = models.CharField(max_length=20)
author_id = models.CharField(max_length=20)
title = models.CharField(max_length=255)
content = models.TextField(blank=True, null=True)
create_date = models.DateTimeField(blank=True, null=True)
hit = models.IntegerField()
flag = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'post'
class User(models.Model):
klas_id = models.CharField(primary_key=True, max_length=20)
naver_id = models.CharField(max_length=20, blank=True, null=True)
lectures = models.CharField(max_length=512, blank=True, null=True)
name = models.CharField(max_length=20, blank=True, null=True)
class Meta:
managed = False
db_table = 'user'
| 1.992188 | 2 |
ms/get_citations.py | yoavram-lab/EffectiveNPI | 0 | 12797753 | <reponame>yoavram-lab/EffectiveNPI
import click
@click.command()
@click.argument('aux_filename', default='ms.aux', type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True))
@click.argument('output_filename', default='citation_keys', type=click.Path(file_okay=True, dir_okay=False, writable=True))
@click.option('-v/-V', '--verbose/--no-verbose', default=False)
def main(aux_filename, output_filename, verbose):
with open(aux_filename) as f:
lines = f.readlines()
lines = (line.strip() for line in lines if line.startswith(r'\citation'))
lines = (line[len('\citation{'):-1] for line in lines)
citations = set()
for line in lines:
for c in line.split(','):
citations.add(c)
if verbose:
print("Found {} citations in {}".format(len(citations), aux_filename))
with open(output_filename, 'wt') as f:
f.write('\n'.join(sorted(citations)))
if __name__ == '__main__':
main() | 3.0625 | 3 |
utils/estimation_for_grid_Feynman_database.py | smeznar/ProGED | 5 | 12797754 | <reponame>smeznar/ProGED<filename>utils/estimation_for_grid_Feynman_database.py
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 7 11:22:45 2021
@author: jureb
"""
import numpy as np
import pandas as pd
import sys
import os
import pickle
import multiprocessing as mp
sys.path.append(os.getcwd()+"/source")
import ProGED as pg
import warnings
#warnings.filterwarnings("ignore")
np.random.seed(0)
if __name__ == "__main__":
datadir = ""
eqN = int(sys.argv[1])
modelsfile = sys.argv[2]
processN = int(sys.argv[3])
eqfile = "source/FeynmanEquations.csv"
reference = pd.read_csv(eqfile)
print("eqN: " + str(eqN) + ", file: " + reference["Filename"][eqN])
data = np.loadtxt(datadir + reference["Filename"][eqN])
sampleind = np.random.randint(0,10**6,1000)
print("--Loading models")
with open(modelsfile, "rb") as file:
models = pickle.load(file)
pool = mp.Pool(processN)
print("--Fitting models")
models = pg.fit_models(models, data[sampleind], target_variable_index=-1, pool_map = pool.map, verbosity = 1)
print("--Exporting results")
with open("results/" + modelsfile.split(".")[0] + "_fit.models", "wb") as file:
pickle.dump(models, file)
| 1.992188 | 2 |
tests/util/test_transform.py | xiangze/edward | 5,200 | 12797755 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import edward as ed
import numpy as np
import tensorflow as tf
from collections import namedtuple
from edward.models import (
Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag,
Normal, Poisson, TransformedDistribution)
from tensorflow.contrib.distributions import bijectors
class test_transform_class(tf.test.TestCase):
def assertSamplePosNeg(self, sample):
num_pos = np.sum((sample > 0.0), axis=0, keepdims=True)
num_neg = np.sum((sample < 0.0), axis=0, keepdims=True)
self.assertTrue((num_pos > 0).all())
self.assertTrue((num_neg > 0).all())
def test_args(self):
with self.test_session():
x = Normal(-100.0, 1.0)
y = ed.transform(x, bijectors.Softplus())
sample = y.sample(10).eval()
self.assertTrue((sample >= 0.0).all())
def test_kwargs(self):
with self.test_session():
x = Normal(-100.0, 1.0)
y = ed.transform(x, bijector=bijectors.Softplus())
sample = y.sample(10).eval()
self.assertTrue((sample >= 0.0).all())
def test_01(self):
with self.test_session():
x = Beta(1.0, 1.0)
y = ed.transform(x)
self.assertIsInstance(y, TransformedDistribution)
sample = y.sample(10, seed=1).eval()
self.assertSamplePosNeg(sample)
def test_nonnegative(self):
with self.test_session():
x = Gamma(1.0, 1.0)
y = ed.transform(x)
self.assertIsInstance(y, TransformedDistribution)
sample = y.sample(10, seed=1).eval()
self.assertSamplePosNeg(sample)
def test_simplex(self):
with self.test_session():
x = Dirichlet([1.1, 1.2, 1.3, 1.4])
y = ed.transform(x)
self.assertIsInstance(y, TransformedDistribution)
sample = y.sample(10, seed=1).eval()
self.assertSamplePosNeg(sample)
def test_real(self):
with self.test_session():
x = Normal(0.0, 1.0)
y = ed.transform(x)
self.assertIsInstance(y, Normal)
sample = y.sample(10, seed=1).eval()
self.assertSamplePosNeg(sample)
def test_multivariate_real(self):
with self.test_session():
x = MultivariateNormalDiag(tf.zeros(2), tf.ones(2))
y = ed.transform(x)
sample = y.sample(10, seed=1).eval()
self.assertSamplePosNeg(sample)
def test_no_support(self):
with self.test_session():
x = DirichletProcess(1.0, Normal(0.0, 1.0))
with self.assertRaises(AttributeError):
y = ed.transform(x)
def test_unhandled_support(self):
with self.test_session():
FakeRV = namedtuple('FakeRV', ['support'])
x = FakeRV(support='rational')
with self.assertRaises(ValueError):
y = ed.transform(x)
if __name__ == '__main__':
tf.test.main()
| 2.140625 | 2 |
beyond_the_basics/01.organizing_larger_programs/reader/__main__.py | tonper19/PythonDemos | 0 | 12797756 | import sys
import reader
r = reader.Reader(sys.argv[1])
try:
print(r.read())
finally:
r.close()
| 1.96875 | 2 |
commands/server/start.py | dccs-tech/mcmi-cluster | 1 | 12797757 | <reponame>dccs-tech/mcmi-cluster
from systems.commands.index import Command
class Start(Command('server.start')):
def exec(self):
def start_server(server):
self.data("Starting server", str(server))
server.start()
self.run_list(self.server_instances, start_server)
| 2.140625 | 2 |
tr/trans.py | hegistva/sync-reader | 0 | 12797758 | <gh_stars>0
from tr.books import book_manager
from tr.libs.trans import book_mapper
from tr.libs.trans import utils
from tr.libs.speech import aligner
book_id = '20000LeaguesUnderTheSea'
book_id = 'AroundTheWorldIn80Days'
# book_manager.downloadBook(book_id)
# book_mapper.mapChapter(utils.Lang.FRA, utils.Lang.ENG, book_id, 1, doMapping=False, debug=True)
# book_mapper.mapBook(utils.Lang.FRA, utils.Lang.ENG, book_id, chapters=1, chapterToPrint=1)
# map sentence for all chapters, save results in beads files
# book_mapper.beadMapBook(utils.Lang.FRA, utils.Lang.ENG, book_id)
# speech
# aligner.alignChapter(utils.Lang.ENG, book_id, 1)
# aligner.alignChapter(utils.Lang.FRA, book_id, 1)
# start at min, stop at max-1
# for chapter in range(7, 38):
# aligner.alignChapter(utils.Lang.ENG, book_id, chapter)
# for chapter in range(13, 38):
# aligner.alignChapter(utils.Lang.FRA, book_id, chapter)
for i in range(1, 38):
r = aligner.findBoundaries('fra', book_id, i)
print("Chapitre %s: %s" % (i, r))
r = aligner.findBoundaries('eng', book_id, i)
print("Chapter %s: %s" % (i, r))
| 2.3125 | 2 |
model/vae.py | kefirski/mnist_cdvae | 1 | 12797759 | import torch as t
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from model.encoder import Encoder
from model.decoder import Decoder
import math
class VAE(nn.Module):
def __init__(self):
super(VAE, self).__init__()
self.encoder = Encoder()
self.decoder = Decoder()
def forward(self, input, z=None):
"""
:param input: an Float tensor with shape of [batch_size, 1, 28, 28]
:param z: an Float tensor with shape of [batch_size, latent_size] if sampling is performed
:return: an Float tensor with shape of [batch_size, 1, 28, 28], [batch_size, 16], [batch_size, 16]
"""
mu, logvar = self.encoder(input)
[batch_size, latent_size] = mu.size()
std = t.exp(0.5 * logvar)
z = Variable(t.randn([batch_size, 15, latent_size]))
if input.is_cuda:
z = z.cuda()
mu_repeated = mu.unsqueeze(1).repeat(1, 15, 1)
std_repeated = std.unsqueeze(1).repeat(1, 15, 1)
z = z * std_repeated + mu_repeated
z = z.view(batch_size * 15, -1)
return self.decoder(z), mu, logvar, z
def encode(self, input):
return self.encoder(input)
def decode(self, input):
return self.decoder(input)
@staticmethod
def monte_carlo_divergence(z, mu, std, n):
[batch_size, latent_size] = mu.size()
log_p_z_x = VAE.normal_prob(z, mu, std)
log_p_z = VAE.normal_prob(z,
Variable(t.zeros(batch_size, latent_size)),
Variable(t.ones(batch_size, latent_size)))
result = log_p_z_x - log_p_z
return result.view(-1, n).sum(1) / n
@staticmethod
def normal_prob(z, mu, std):
return t.exp(-0.5 * ((z - mu) * t.pow(std + 1e-8, -1) * (z - mu)).sum(1)) / \
t.sqrt(t.abs(2 * math.pi * std.prod(1)))
@staticmethod
def divergence_with_prior(mu, logvar):
return (-0.5 * t.sum(logvar - t.pow(mu, 2) - t.exp(logvar) + 1, 1)).mean()
@staticmethod
def divergence_with_posterior(p_first, p_second):
"""
:params p_first, p_second: tuples with parameters of distribution over latent variables
:return: divirgence estimation
"""
return 0.5 * t.sum(2 * p_second[1] - 2 * p_first[1] + t.exp(p_first[1]) / (t.exp(p_second[1]) + 1e-8) +
t.pow(p_second[0] - p_second[0], 2) / (t.exp(p_second[1]) + 1e-8) - 1).mean()
| 2.5 | 2 |
bdn/verification/tasks/listen_ethereum_ipfs_hash_storage.py | OpenSourceUniversity/bdn | 1 | 12797760 | import logging
from celery import shared_task
from bdn import contract
from bdn import redis
from .perform_ipfs_meta_verifications_array import (
perform_ipfs_meta_verifications_array)
logger = logging.getLogger(__name__)
@shared_task
def listen_ethereum_ipfs_hash_storage():
redis_db = redis.get_redis()
verification_storage = contract.contract('VerificationStorage')
event = verification_storage.events.Verification
last_block = redis_db.get('_verification_filter_block') or 0
if last_block != 0:
last_block = int(last_block)
hash_filter = event.createFilter(fromBlock=last_block)
for entry in hash_filter.get_all_entries():
block_number = int(entry['blockNumber'])
entry_args = dict(entry['args'])
entry_data = {
'transactionHash': entry['transactionHash'].hex(),
'blockHash': entry['blockHash'].hex(),
'blockNumber': entry['blockNumber'],
'args': {
'ipfsHash': entry_args.get('ipfsHash', b'').decode(),
},
}
perform_ipfs_meta_verifications_array.delay(entry_data)
if block_number > last_block:
redis_db.set('_verification_filter_block', block_number)
| 2.1875 | 2 |
tests/test_exception.py | petertriho/prestans | 12 | 12797761 | import unittest
from prestans.http import STATUS
from prestans.http import VERB
from prestans import exception
class ExceptionBase(unittest.TestCase):
def test_http_status(self):
base_value = exception.Base(http_status=STATUS.OK, message="message")
self.assertEqual(base_value.http_status, STATUS.OK)
base_value.http_status = STATUS.NO_CONTENT
self.assertEqual(base_value.http_status, STATUS.NO_CONTENT)
def test_stack_trace(self):
base = exception.Base(http_status=STATUS.OK, message="message")
self.assertEqual(base.stack_trace, [])
def test_push_trace(self):
pass
def test_message(self):
base_value = exception.Base(http_status=STATUS.OK, message="message")
self.assertEqual(base_value.message, "message")
def test_str(self):
base = exception.Base(http_status=STATUS.OK, message="message")
self.assertEqual(base.http_status, STATUS.OK)
self.assertEqual(str(base.message), "message")
class ExceptionUnsupportedVocabularyError(unittest.TestCase):
def test_init(self):
unsupported_vocabulary_error = exception.UnsupportedVocabularyError(
accept_header="accept",
supported_types=["a", "b", "c"]
)
self.assertEqual(unsupported_vocabulary_error.http_status, STATUS.NOT_IMPLEMENTED)
self.assertEqual(unsupported_vocabulary_error.message, "Unsupported vocabulary in the Accept header")
stack_trace = [{
"accept_header": "accept",
"supported_types": ["a", "b", "c"]
}]
self.assertEqual(unsupported_vocabulary_error.stack_trace, stack_trace)
class ExceptionUnsupportedContentTypeError(unittest.TestCase):
def test_init(self):
unsupported_content_type = exception.UnsupportedContentTypeError("text/plain", "application/json")
self.assertEqual(unsupported_content_type.http_status, STATUS.NOT_IMPLEMENTED)
self.assertEqual(unsupported_content_type.message, "Unsupported Content-Type in Request")
stack_trace = [{
"requested_type": "text/plain",
"supported_types": "application/json"
}]
self.assertEqual(unsupported_content_type.stack_trace, stack_trace)
class ExceptionValidationError(unittest.TestCase):
def test_init(self):
validation_error = exception.ValidationError(
message="message",
attribute_name="attribute",
value="value",
blueprint={"key": "value"}
)
self.assertEqual(validation_error.http_status, STATUS.BAD_REQUEST)
self.assertEqual(validation_error.message, "message")
self.assertEqual(validation_error.stack_trace, [
{
"attribute_name": "attribute",
"value": "value",
"message": "message",
"blueprint": {"key": "value"}
}
])
self.assertEqual(str(validation_error), "attribute message")
class ExceptionHandlerException(unittest.TestCase):
def test_init(self):
from prestans.rest import Request
import logging
logging.basicConfig()
self.logger = logging.getLogger("prestans")
from prestans.deserializer import JSON
charset = "utf-8"
serializers = [JSON()]
default_serializer = JSON()
request_environ = {
"REQUEST_METHOD": VERB.GET,
"PATH_INFO": "/url",
"HTTP_USER_AGENT": "chrome",
"wsgi.url_scheme": "https",
"SERVER_NAME": "localhost",
"SERVER_PORT": "8080"
}
request = Request(
environ=request_environ,
charset=charset,
logger=self.logger,
deserializers=serializers,
default_deserializer=default_serializer
)
handler_exception = exception.HandlerException(STATUS.FORBIDDEN, "message")
handler_exception.request = request
self.assertEqual(handler_exception.http_status, STATUS.FORBIDDEN)
self.assertEqual(handler_exception.message, "message")
self.assertEqual(handler_exception.request, request)
self.assertEqual(handler_exception.log_message, 'GET https://localhost:8080/url chrome "message"')
self.assertEqual(str(handler_exception), 'GET https://localhost:8080/url chrome "message"')
handler_exception_without_request = exception.HandlerException(STATUS.NOT_FOUND, "message")
self.assertEqual(handler_exception_without_request.http_status, STATUS.NOT_FOUND)
self.assertEqual(handler_exception_without_request.message, "message")
self.assertEqual(handler_exception_without_request.log_message, "message")
self.assertEqual(str(handler_exception_without_request), "message")
class ExceptionRequestException(unittest.TestCase):
def test_init(self):
request_exception = exception.RequestException(STATUS.BAD_REQUEST, "bad request")
self.assertEqual(request_exception.http_status, STATUS.BAD_REQUEST)
self.assertEqual(request_exception.message, "bad request")
class ExceptionUnimplementedVerbError(unittest.TestCase):
def test_init(self):
unimplemented_verb = exception.UnimplementedVerbError("GET")
self.assertEqual(unimplemented_verb.http_status, STATUS.NOT_IMPLEMENTED)
self.assertEqual(unimplemented_verb.message, "API does not implement the HTTP Verb")
self.assertEqual(unimplemented_verb.stack_trace, [{"verb": "GET"}])
class ExceptionNoEndpointError(unittest.TestCase):
def test_init(self):
no_endpoint = exception.NoEndpointError()
self.assertEqual(no_endpoint.http_status, STATUS.NOT_FOUND)
self.assertEqual(no_endpoint.message, "API does not provide this end-point")
class ExceptionAuthenticationError(unittest.TestCase):
def test_init(self):
authentication = exception.AuthenticationError()
self.assertEqual(authentication.http_status, STATUS.UNAUTHORIZED)
self.assertEqual(authentication.message, "Authentication Error; service is only available to authenticated")
authentication_custom = exception.AuthenticationError("Custom message")
self.assertEqual(authentication_custom.http_status, STATUS.UNAUTHORIZED)
self.assertEqual(authentication_custom.message, "Custom message")
class ExceptionAuthorizationError(unittest.TestCase):
def test_init(self):
authorization = exception.AuthorizationError("Role")
self.assertEqual(authorization.http_status, STATUS.FORBIDDEN)
self.assertEqual(authorization.message, "Role is not allowed to access this resource")
class ExceptionSerializationFailedError(unittest.TestCase):
def test_init(self):
serialization_failed_error = exception.SerializationFailedError("format")
self.assertEqual(serialization_failed_error.http_status, STATUS.NOT_FOUND)
self.assertEqual(serialization_failed_error.message, "Serialization failed: format")
self.assertEqual(str(serialization_failed_error), "Serialization failed: format")
class ExceptionDeSerializationFailedError(unittest.TestCase):
def test_init(self):
deserialization_failed_error = exception.DeSerializationFailedError("format")
self.assertEqual(deserialization_failed_error.http_status, STATUS.NOT_FOUND)
self.assertEqual(deserialization_failed_error.message, "DeSerialization failed: format")
self.assertEqual(str(deserialization_failed_error), "DeSerialization failed: format")
class ExceptionAttributeFilterDiffers(unittest.TestCase):
def test_init(self):
attribute_filter_differs = exception.AttributeFilterDiffers(["cat", "dog"])
self.assertEqual(attribute_filter_differs.http_status, STATUS.BAD_REQUEST)
self.assertEqual(
attribute_filter_differs.message,
"attribute filter contains attributes (cat, dog) that are not part of template"
)
class ExceptionInconsistentPersistentDataError(unittest.TestCase):
def test_init(self):
error = exception.InconsistentPersistentDataError("name", "error message")
self.assertEqual(error.http_status, STATUS.INTERNAL_SERVER_ERROR)
self.assertEqual(error.message, "Data Adapter failed to validate stored data on the server")
self.assertEqual(
str(error),
"DataAdapter failed to adapt name, Data Adapter failed to validate stored data on the server"
)
self.assertEqual(error.stack_trace, [{'exception_message': "error message", 'attribute_name': "name"}])
class ExceptionDataValidationException(unittest.TestCase):
def test_init(self):
exp = exception.DataValidationException("message")
self.assertEqual(exp.http_status, STATUS.BAD_REQUEST)
self.assertEqual(exp.message, "message")
class ExceptionRequiredAttributeError(unittest.TestCase):
def test_init(self):
exp = exception.RequiredAttributeError()
self.assertEqual(exp.http_status, STATUS.BAD_REQUEST)
self.assertEqual(exp.message, "attribute is required and does not provide a default value")
class ExceptionParseFailedError(unittest.TestCase):
def test_init(self):
default_msg = exception.ParseFailedError()
self.assertEqual(default_msg.http_status, STATUS.BAD_REQUEST)
self.assertEqual(default_msg.message, "Parser Failed")
custom_msg = exception.ParseFailedError("custom")
self.assertEqual(custom_msg.http_status, STATUS.BAD_REQUEST)
self.assertEqual(custom_msg.message, "custom")
class ExceptionLessThanMinimumError(unittest.TestCase):
def test_init(self):
exp = exception.LessThanMinimumError(3, 5)
self.assertEqual(exp.http_status, STATUS.BAD_REQUEST)
self.assertEqual(exp.message, "3 is less than the allowed minimum of 5")
class ExceptionMoreThanMaximumError(unittest.TestCase):
def test_init(self):
exp = exception.MoreThanMaximumError(5, 3)
self.assertEqual(exp.http_status, STATUS.BAD_REQUEST)
self.assertEqual(exp.message, "5 is more than the allowed maximum of 3")
class ExceptionInvalidChoiceError(unittest.TestCase):
def test_init(self):
exp = exception.InvalidChoiceError(3, [1, 2, 5])
self.assertEqual(exp.http_status, STATUS.BAD_REQUEST)
self.assertEqual(exp.message, "value 3 is not one of these choices 1, 2, 5")
class ExceptionMinimumLengthError(unittest.TestCase):
def test_init(self):
exp = exception.MinimumLengthError("dog", 5)
self.assertEqual(exp.http_status, STATUS.BAD_REQUEST)
self.assertEqual(exp.message, "length of value: dog has to be greater than 5")
class ExceptionMaximumLengthError(unittest.TestCase):
def test_init(self):
exp = exception.MaximumLengthError("dog", 2)
self.assertEqual(exp.http_status, STATUS.BAD_REQUEST)
self.assertEqual(exp.message, "length of value: dog has to be less than 2")
class ExceptionInvalidTypeError(unittest.TestCase):
def test_init(self):
exp = exception.InvalidTypeError("str", "int")
self.assertEqual(exp.http_status, STATUS.BAD_REQUEST)
self.assertEqual(exp.message, "data type str given, expected int")
class ExceptionMissingParameterError(unittest.TestCase):
def test_init(self):
missing_parameter = exception.MissingParameterError()
self.assertEqual(missing_parameter.http_status, STATUS.BAD_REQUEST)
self.assertEqual(missing_parameter.message, "missing parameter")
class ExceptionInvalidFormatError(unittest.TestCase):
def test_init(self):
invalid_format = exception.InvalidFormatError("cat")
self.assertEqual(invalid_format.http_status, STATUS.BAD_REQUEST)
self.assertEqual(invalid_format.message, "invalid value cat provided")
class ExceptionInvalidMetaValueError(unittest.TestCase):
def test_init(self):
invalid_meta_value = exception.InvalidMetaValueError()
self.assertEqual(invalid_meta_value.http_status, STATUS.BAD_REQUEST)
self.assertEqual(invalid_meta_value.message, "invalid meta value")
class ExceptionUnregisteredAdapterError(unittest.TestCase):
def test_init(self):
unregistered_adapter = exception.UnregisteredAdapterError("namespace.Model")
self.assertEqual(unregistered_adapter.http_status, STATUS.BAD_REQUEST)
self.assertEqual(unregistered_adapter.message, "no registered adapters for data model namespace.Model")
class ExceptionResponseException(unittest.TestCase):
def test_init(self):
from prestans.types import Model
class MyModel(Model):
pass
my_model = MyModel()
response = exception.ResponseException(STATUS.OK, "message", my_model)
self.assertEqual(response.http_status, STATUS.OK)
self.assertEqual(response.message, "message")
self.assertEqual(response.response_model, my_model)
self.assertRaises(TypeError, exception.ResponseException, STATUS.INTERNAL_SERVER_ERROR, "message", "string")
class ExceptionServiceUnavailable(unittest.TestCase):
def test_init(self):
service_unavailable = exception.ServiceUnavailable()
self.assertEqual(service_unavailable.http_status, STATUS.SERVICE_UNAVAILABLE)
self.assertEqual(service_unavailable.message, "Service Unavailable")
class ExceptionBadRequest(unittest.TestCase):
def test_init(self):
bad_request = exception.BadRequest()
self.assertEqual(bad_request.http_status, STATUS.BAD_REQUEST)
self.assertEqual(bad_request.message, "Bad Request")
class ExceptionConflict(unittest.TestCase):
def test_init(self):
conflict = exception.Conflict()
self.assertEqual(conflict.http_status, STATUS.CONFLICT)
self.assertEqual(conflict.message, "Conflict")
class ExceptionNotFound(unittest.TestCase):
def test_init(self):
not_found = exception.NotFound()
self.assertEqual(not_found.http_status, STATUS.NOT_FOUND)
self.assertEqual(not_found.message, "Not Found")
class ExceptionUnauthorized(unittest.TestCase):
def test_init(self):
unauthorized = exception.Unauthorized()
self.assertEqual(unauthorized.http_status, STATUS.UNAUTHORIZED)
self.assertEqual(unauthorized.message, "Unauthorized")
class ExceptionMovedPermanently(unittest.TestCase):
def test_init(self):
moved_permanently = exception.MovedPermanently()
self.assertEqual(moved_permanently.http_status, STATUS.MOVED_PERMANENTLY)
self.assertEqual(moved_permanently.message, "Moved Permanently")
class ExceptionPaymentRequired(unittest.TestCase):
def test_init(self):
payment_required = exception.PaymentRequired()
self.assertEqual(payment_required.http_status, STATUS.PAYMENT_REQUIRED)
self.assertEqual(payment_required.message, "Payment Required")
class ExceptionForbidden(unittest.TestCase):
def test_init(self):
forbidden = exception.Forbidden()
self.assertEqual(forbidden.http_status, STATUS.FORBIDDEN)
self.assertEqual(forbidden.message, "Forbidden")
class ExceptionInternalServerError(unittest.TestCase):
def test_init(self):
internal_server_error = exception.InternalServerError()
self.assertEqual(internal_server_error.http_status, STATUS.INTERNAL_SERVER_ERROR)
self.assertEqual(internal_server_error.message, "Internal Server Error")
| 2.875 | 3 |
scraper.py | PetterDK/ps5-stock | 0 | 12797762 | <gh_stars>0
import bs4 as bs
import urllib.request
import time
import json
import mail as m
import requests as r
import os
database_ip = os.environ['database']
get_products = 'http://'+database_ip+'/get-all-products'
update_product = 'http://'+database_ip+'/update-product'
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'}
def update(url, stock, time):
params = {'url': url, 'stock': stock, 'time': time}
r.get(url=update_product, params=params)
def filter_html(url, find, identifier):
req = urllib.request.Request(url, headers=headers)
source = urllib.request.urlopen(req).read()
soup = bs.BeautifulSoup(source, 'html.parser')
return soup.find(find, class_=identifier)
while True:
in_stock = []
data = r.get(get_products).json()
data = json.loads(data)
for product in data:
try:
product_url = product['product_url']
name = product['product_name']
identifier = product['class']
store = product['store']
find = product['find']
filtered = filter_html(product_url, find, identifier)
if store == "Elgiganten" or store == "Proshop" or store == "Happii" or store == "Merlin":
if filtered is None:
print("Item available", name, store)
update(product_url, "På lager", time.strftime('%H:%M:%S', time.localtime()))
in_stock.append({'store': store, 'name': name, 'url': product_url})
else:
print("Item unavailable", name, store)
update(product_url, "Ikke på lager", time.strftime('%H:%M:%S', time.localtime()))
elif store == "Bilka" or store == "Coolshop" or store == "Power" or store == "Foetex" or store == "BR" or store == "Expert":
if filtered is None:
print('Item unavailable', name, store)
update(product_url, "Ikke på lager", time.strftime('%H:%M:%S', time.localtime()))
else:
print('Item available', name, store)
update(product_url, "På lager", time.strftime('%H:%M:%S', time.localtime()))
in_stock.append({'store': store, 'name': name, 'url': product_url})
time.sleep(1)
except Exception as e:
print(e)
if len(in_stock) > 0:
print('Lets email!')
m.send_email(in_stock)
| 2.78125 | 3 |
day-11/python_alexlyttle/octopus.py | alan-turing-institute/advent-of-code-2021 | 12 | 12797763 | <gh_stars>10-100
import numpy as np
def load_input(file_name):
with open(file_name, 'r') as file:
return file.read()
def party_from_str(s):
lines = s.splitlines()
return np.array([[int(i) for i in line] for line in lines])
def update(party):
"""Update the party of octopuses.
There is probably a faster way."""
party += 1
while np.any(party > 9):
i, j = np.where(party > 9)
n = i > 0
e = j < party.shape[1] - 1
s = i < party.shape[0] - 1
w = j > 0
ne = n & e
se = s & e
nw = n & w
sw = s & w
# north, northeast, east, southeast, south, southwest, west, northwest
i_adj = [
i[n] - 1, i[ne] - 1, i[e], i[se] + 1,
i[s] + 1, i[sw] + 1, i[w], i[nw] - 1,
]
j_adj = [
j[n], j[ne] + 1, j[e] + 1, j[se] + 1,
j[s], j[sw] - 1, j[w] - 1, j[nw] - 1,
]
party[(i, j)] = 0
# This is a bit slow and could be improved
is_adj = np.full(party.shape, False)
for d in range(8):
is_adj[(i_adj[d], j_adj[d])] = True
party[(party > 0) & is_adj] += 1
is_adj[:, :] = False
return party
def count_flashes(party, num_steps=100):
count = 0
for step in range(num_steps):
party = update(party)
count += np.count_nonzero(party == 0)
return count
def synchronise(party):
step = 0
while np.any(party > 0):
party = update(party)
step += 1
return step
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Count octopus flashes'
)
parser.add_argument('input_file', metavar='INPUT_FILE', type=str,
help='input file name')
parser.add_argument('-s', '--sync', action='store_true')
args = parser.parse_args()
s = load_input(args.input_file)
party = party_from_str(s)
if args.sync:
step = synchronise(party)
print(f'Step = {step}')
else:
count = count_flashes(party)
print(f'Count = {count}')
| 2.875 | 3 |
data/tools/addon_manager/__init__.py | blackberry/Wesnoth | 12 | 12797764 | # This let's python know about the addon_manager module. | 1.101563 | 1 |
oslash/state.py | stjordanis/OSlash | 668 | 12797765 | <filename>oslash/state.py
from typing import Callable, Tuple, Any, TypeVar, Generic
from .util import Unit
from .typing import Functor
from .typing import Monad
TState = TypeVar("TState")
TSource = TypeVar("TSource")
TResult = TypeVar("TResult")
class State(Generic[TSource, TState]):
"""The state monad.
Wraps stateful computations. A stateful computation is a function
that takes a state and returns a result and new state:
state -> (result, state')
"""
def __init__(self, fn: Callable[[TState], Tuple[TSource, TState]]) -> None:
"""Initialize a new state.
Keyword arguments:
fn -- State processor.
"""
self._fn = fn
@classmethod
def unit(cls, value: TSource) -> "State[TSource, TState]":
r"""Create new State.
The unit function creates a new State object wrapping a stateful
computation.
State $ \s -> (x, s)
"""
return cls(lambda state: (value, state))
def map(self, mapper: Callable[[TSource], TResult]) -> "State[TResult, TState]":
def _(a: Any, state: Any) -> Tuple[Any, Any]:
return mapper(a), state
return State(lambda state: _(*self.run(state)))
def bind(self, fn: Callable[[TSource], "State[TState, TResult]"]) -> "State[TResult, TState]":
r"""m >>= k = State $ \s -> let (a, s') = runState m s
in runState (k a) s'
"""
def _(result: Any, state: Any) -> Tuple[Any, Any]:
return fn(result).run(state)
return State(lambda state: _(*self.run(state)))
@classmethod
def get(cls) -> "State[TState, TState]":
r"""get = state $ \s -> (s, s)"""
return State(lambda state: (state, state))
@classmethod
def put(cls, new_state: TState) -> "State[Tuple, TState]":
r"""put newState = state $ \s -> ((), newState)"""
return State(lambda state: (Unit, new_state))
def run(self, state: TState) -> Tuple[TSource, TState]:
"""Return wrapped state computation.
This is the inverse of unit and returns the wrapped function.
"""
return self._fn(state)
def __call__(self, state: Any) -> Tuple:
return self.run(state)
assert issubclass(State, Functor)
assert issubclass(State, Monad)
| 2.75 | 3 |
flappy.py | Kimhwiwoong/Flappy_Bird_ver.2 | 0 | 12797766 | <reponame>Kimhwiwoong/Flappy_Bird_ver.2
from itertools import cycle
from operator import itemgetter
import random
import sys
import math
import pygame
from pygame.locals import *
import time
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 50, 50)
YELLOW = (255, 255, 0)
GREEN = (0, 255, 50)
BLUE = (50, 50, 255)
GREY = (200, 200, 200)
ORANGE = (200, 100, 50)
CYAN = (0, 255, 255)
MAGENTA = (255, 0, 255)
TRANS = (1, 1, 1)
TOPFIVE=[('kim',0),('kim',0),('kim',0),('kim',0),('kim',0)]
FPS = 30
SCREENWIDTH = 288
SCREENHEIGHT = 512
# amount by which base can maximum shift to left
PIPEGAPSIZE = 130 # gap between upper and lower part of pipe
BASEY=SCREENHEIGHT * 0.79
# image, sound and hitmask dicts
IMAGES, SOUNDS, HITMASKS = {}, {}, {}
# True if the user plays the fury mode
FURYMODE = False
EASYMODE = False
# In fury mode, the pipe sapwn system is different than in
# normal mode, we add pipes with a "timer" (a frame counter)
FURYMODE_FRAMES_TO_SPAWN_PIPES = 35
# pipes particles amount (for each pipe)
FURYMODE_PARTICLES = 8
# max particles for each pipe hit
FURYMODE_PARTICLES_MAX = 48
# list of all possible players (tuple of 3 positions of flap)
PLAYERS_LIST = (
# red bird
(
'assets/sprites/redbird-upflap.png',
'assets/sprites/redbird-midflap.png',
'assets/sprites/redbird-downflap.png',
),
# blue bird
(
# amount by which base can maximum shift to left
'assets/sprites/bluebird-upflap.png',
'assets/sprites/bluebird-midflap.png',
'assets/sprites/bluebird-downflap.png',
),
# yellow bird
(
'assets/sprites/yellowbird-upflap.png',
'assets/sprites/yellowbird-midflap.png',
'assets/sprites/yellowbird-downflap.png',
),
)
# list of backgrounds
BACKGROUNDS_LIST = (
'assets/sprites/background-day.png',
'assets/sprites/background-night.png',
)
# list of pipes
PIPES_LIST = (
'assets/sprites/pipe-green.png',
'assets/sprites/pipe-red.png',
)
try:
xrange
except NameError:
xrange = range
class Keyboard(object):
keys = {pygame.K_a: 'A', pygame.K_b: 'B', pygame.K_c: 'C', pygame.K_d: 'D',
pygame.K_e: 'E', pygame.K_f: 'F', pygame.K_g: 'G', pygame.K_h: 'H',
pygame.K_i: 'I', pygame.K_j: 'J', pygame.K_k: 'K', pygame.K_l: 'L',
pygame.K_m: 'M', pygame.K_n: 'N', pygame.K_o: 'O', pygame.K_p: 'P',
pygame.K_q: 'Q', pygame.K_r: 'R', pygame.K_s: 'S', pygame.K_t: 'T',
pygame.K_u: 'U', pygame.K_v: 'V', pygame.K_w: 'W', pygame.K_x: 'X',
pygame.K_y: 'Y', pygame.K_z: 'Z'}
def main():
global SCREEN, FPSCLOCK, SLIDER
pygame.init()
FPSCLOCK = pygame.time.Clock()
SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))
pygame.display.set_caption('Flappy Bird')
# numbers sprites for score display
IMAGES['numbers'] = (
pygame.image.load('assets/sprites/0.png').convert_alpha(),
pygame.image.load('assets/sprites/1.png').convert_alpha(),
pygame.image.load('assets/sprites/2.png').convert_alpha(),
pygame.image.load('assets/sprites/3.png').convert_alpha(),
pygame.image.load('assets/sprites/4.png').convert_alpha(),
pygame.image.load('assets/sprites/5.png').convert_alpha(),
pygame.image.load('assets/sprites/6.png').convert_alpha(),
pygame.image.load('assets/sprites/7.png').convert_alpha(),
pygame.image.load('assets/sprites/8.png').convert_alpha(),
pygame.image.load('assets/sprites/9.png').convert_alpha()
)
# game over sprite
IMAGES['gameover'] = pygame.image.load('assets/sprites/gameover.png').convert_alpha()
# message sprite for welcome screen
IMAGES['message'] = pygame.image.load('assets/sprites/message.png').convert_alpha()
# base (ground) sprite
IMAGES['base'] = pygame.image.load('assets/sprites/base.png').convert_alpha()
# the "fury mode" button for welcome screen (with the key)
IMAGES['furymode'] = pygame.image.load('assets/sprites/furymode.png').convert_alpha()
IMAGES['furymode-key'] = pygame.image.load('assets/sprites/furymode-key.png').convert_alpha()
IMAGES['easymode'] = pygame.image.load('assets/sprites/easymode.png').convert_alpha()
IMAGES['hardmode'] = pygame.image.load('assets/sprites/hardmode.png').convert_alpha()
# speaker sprite
IMAGES['speaker'] = (pygame.image.load('assets/sprites/speaker_full.png').convert_alpha(),
pygame.image.load('assets/sprites/speaker_mute.png').convert_alpha())
# 추가된 부분
IMAGES['coin'] = pygame.image.load('assets/sprites/coin.png').convert_alpha()
# sounds
if 'win' in sys.platform:
soundExt = '.wav'
else:
soundExt = '.ogg'
SOUNDS['die'] = pygame.mixer.Sound('assets/audio/die' + soundExt)
SOUNDS['hit'] = pygame.mixer.Sound('assets/audio/hit' + soundExt)
SOUNDS['point'] = pygame.mixer.Sound('assets/audio/point' + soundExt)
SOUNDS['swoosh'] = pygame.mixer.Sound('assets/audio/swoosh' + soundExt)
SOUNDS['wing'] = pygame.mixer.Sound('assets/audio/wing' + soundExt)
# volume slider(defaultValue, maximum, minimum, Xposition, Yposition)
SLIDER = Slider(0.5, 1, 0, 190, 0)
while True:
# select random background sprites
randBg = random.randint(0, len(BACKGROUNDS_LIST) - 1)
IMAGES['background'] = pygame.image.load(BACKGROUNDS_LIST[randBg]).convert()
# select random player sprites
randPlayer = random.randint(0, len(PLAYERS_LIST) - 1)
IMAGES['player'] = (
pygame.image.load(PLAYERS_LIST[randPlayer][0]).convert_alpha(),
pygame.image.load(PLAYERS_LIST[randPlayer][1]).convert_alpha(),
pygame.image.load(PLAYERS_LIST[randPlayer][2]).convert_alpha(),
)
# select random pipe sprites
pipeindex = random.randint(0, len(PIPES_LIST) - 1)
IMAGES['pipe'] = (
pygame.transform.rotate(
pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), 180),
pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(),
)
# pipes' particles for fury mode
# pipes are green
if pipeindex == 0:
IMAGES['pipe-particle'] = (
pygame.image.load('assets/sprites/particles-green-0.png').convert_alpha(),
pygame.image.load('assets/sprites/particles-green-1.png').convert_alpha(),
pygame.image.load('assets/sprites/particles-green-2.png').convert_alpha(),
pygame.image.load('assets/sprites/particles-green-3.png').convert_alpha(),
pygame.image.load('assets/sprites/particles-green-4.png').convert_alpha(),
pygame.image.load('assets/sprites/particles-green-5.png').convert_alpha(),
pygame.image.load('assets/sprites/particles-green-6.png').convert_alpha(),
pygame.image.load('assets/sprites/particles-green-7.png').convert_alpha(),
)
else:
IMAGES['pipe-particle'] = (
pygame.image.load('assets/sprites/particles-red-0.png').convert_alpha(),
pygame.image.load('assets/sprites/particles-red-1.png').convert_alpha(),
pygame.image.load('assets/sprites/particles-red-2.png').convert_alpha(),
pygame.image.load('assets/sprites/particles-red-3.png').convert_alpha(),
pygame.image.load('assets/sprites/particles-red-4.png').convert_alpha(),
pygame.image.load('assets/sprites/particles-red-5.png').convert_alpha(),
pygame.image.load('assets/sprites/particles-red-6.png').convert_alpha(),
pygame.image.load('assets/sprites/particles-red-7.png').convert_alpha(),
)
# hismask for pipes
HITMASKS['pipe'] = (
getHitmask(IMAGES['pipe'][0]),
getHitmask(IMAGES['pipe'][1]),
)
# hitmask for player
HITMASKS['player'] = (
getHitmask(IMAGES['player'][0]),
getHitmask(IMAGES['player'][1]),
getHitmask(IMAGES['player'][2]),
)
# 추가된 부분
HITMASKS['coin'] = (
getHitmask(IMAGES['coin']),
)
movementInfo = showWelcomeAnimation()
crashInfo = mainGame(movementInfo)
showGameOverScreen(crashInfo)
def showWelcomeAnimation():
"""Shows welcome screen animation of flappy bird"""
global FURYMODE, EASYMODE
# index of player to blit on screen
playerIndex = 0
playerIndexGen = cycle([0, 1, 2, 1])
# iterator used to change playerIndex after every 5th iteration
loopIter = 0
playerx = int(SCREENWIDTH * 0.2)
playery = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2)
messagex = int((SCREENWIDTH - IMAGES['message'].get_width()) / 2)
messagey = int(SCREENHEIGHT * 0.12)
easymodex = int((SCREENWIDTH - IMAGES['easymode'].get_width())/2)
easymodey = int(SCREENHEIGHT * 0.68)
hardmodex = int((SCREENWIDTH - IMAGES['hardmode'].get_width())/2)
hardmodey = int(SCREENHEIGHT * 0.74)
furymodex = int((SCREENWIDTH - IMAGES['furymode'].get_width()) / 2)
furymodey = int(SCREENHEIGHT * 0.80)
# just at right of the fury mode button (8 is right padding)
furymodeKeyX = furymodex + IMAGES['furymode'].get_width() + 8
furymodeKeyY = furymodey + IMAGES['furymode-key'].get_height() / 2
basex = 0
# amount by which base can maximum shift to left
baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width()
# player shm for up-down motion on welcome screen
playerShmVals = {'val': 0, 'dir': 1}
# initialize volume
for sound in SOUNDS:
SOUNDS[sound].set_volume(SLIDER.val)
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
if SLIDER.button_rect.collidepoint(pos):
SLIDER.hit = True
elif event.type == pygame.MOUSEBUTTONUP:
SLIDER.hit = False
# Move volume slider
if SLIDER.hit:
SLIDER.move()
for sounds in SOUNDS:
SOUNDS[sounds].set_volume(SLIDER.val)
#(2) key for easymode
if (event.type == KEYDOWN and event.key == K_2) or ((event.type == MOUSEBUTTONDOWN and event.button == 1) and IMAGES['easymode'].get_rect(center=(easymodex+54,easymodey+14)).collidepoint(pygame.mouse.get_pos())):
EASYMODE = True
# make first flap sound and return values for mainGame
SOUNDS['wing'].play()
return {
'playery': playery + playerShmVals['val'],
'basex': basex,
'playerIndexGen': playerIndexGen,
}
elif (event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP)) or ((event.type == MOUSEBUTTONDOWN and event.button == 1) and IMAGES['hardmode'].get_rect(center=(hardmodex+54,hardmodey+14)).collidepoint(pygame.mouse.get_pos())):
SOUNDS['wing'].play()
return {
'playery': playery + playerShmVals['val'],
'basex': basex,
'playerIndexGen': playerIndexGen,
}
# (1) key for fury mode
if (event.type == KEYDOWN and event.key == K_1) or ((event.type == MOUSEBUTTONDOWN and event.button == 1) and IMAGES['furymode'].get_rect(center=(furymodex+54,furymodey+14)).collidepoint(pygame.mouse.get_pos())):
# make first flap sound and return values for mainGame
FURYMODE = True
SOUNDS['wing'].play()
return {
'playery': playery + playerShmVals['val'],
'basex': basex,
'playerIndexGen': playerIndexGen,
}
# adjust playery, playerIndex, basex
if (loopIter + 1) % 5 == 0:
playerIndex = next(playerIndexGen)
loopIter = (loopIter + 1) % 30
basex = -((-basex + 4) % baseShift)
playerShm(playerShmVals)
# draw sprites
SCREEN.blit(IMAGES['background'], (0,0))
SLIDER.draw()
if(SLIDER.val>0):
SCREEN.blit(IMAGES['speaker'][0], (160,15))
else :
SCREEN.blit(IMAGES['speaker'][1], (160,15))
SCREEN.blit(IMAGES['player'][playerIndex],
(playerx, playery + playerShmVals['val']))
SCREEN.blit(IMAGES['message'], (messagex, messagey))
SCREEN.blit(IMAGES['base'], (basex, BASEY))
SCREEN.blit(IMAGES['easymode'],(easymodex,easymodey))
SCREEN.blit(IMAGES['hardmode'],(hardmodex,hardmodey))
SCREEN.blit(IMAGES['furymode'], (furymodex, furymodey))
SCREEN.blit(IMAGES['furymode-key'], (furymodeKeyX, furymodeKeyY))
pygame.display.update()
FPSCLOCK.tick(FPS)
def mainGame(movementInfo):
global FURYMODE, FURYMODE_FRAMES_TO_SPAWN_PIPES, EASYMODE
DIFFICULTY = 0
score = playerIndex = loopIter = 0
playerIndexGen = movementInfo['playerIndexGen']
playerx, playery = int(SCREENWIDTH * 0.2), movementInfo['playery']
basex = movementInfo['basex']
baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width()
# no need to spawn pipes at start
if FURYMODE:
# list of upper pipes
upperPipes = []
# list of lowerpipe
lowerPipes = []
# list of particles
# a particle is an object with attributes:
# {'x': position-x, 'y': position-y,
# 'vx': velocity-x, 'vy': velocity-y,
# 'i': index in textures list}
particles = []
# 추가된 부분
coins = []
else:
if EASYMODE:
DIFFICULTY = 4
# get 2 new pipes to add to upperPipes lowerPipes list
newPipe1 = getRandomPipe(DIFFICULTY)
newPipe2 = getRandomPipe(DIFFICULTY)
# list of upper pipes
upperPipes = [
{'x': SCREENWIDTH + 200, 'y': newPipe1[0]['y']},
{'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[0]['y']},
]
# list of lowerpipe
lowerPipes = [
{'x': SCREENWIDTH + 200, 'y': newPipe1[1]['y']},
{'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[1]['y']},
]
# 추가된 부분
newCoin1 = getRandomCoin()
newCoin2 = getRandomCoin()
coins = [
{'x': SCREENWIDTH + 280, 'y': newCoin1[0]['y']},
{'x': SCREENWIDTH + 280 + (SCREENWIDTH / 2), 'y': newCoin2[0]['y']},
]
pipeVelX = -4
# player velocity, max velocity, downward accleration, accleration on flap
playerVelY = -9 # player's velocity along Y, default same as playerFlapped
playerMaxVelY = 10 # max vel along Y, max descend speed
playerMinVelY = -8 # min vel along Y, max ascend speed
playerAccY = 1 # players downward accleration
playerRot = 45 # player's rotation
playerVelRot = 3 # angular speed
playerRotThr = 20 # rotation threshold
playerFlapAcc = -9 # players speed on flapping
playerFlapped = False # True when player flaps
# The counter to spawn new pipes
furymodePipeFrameCounter = 0
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if (event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP)) or (event.type == MOUSEBUTTONDOWN and event.button == 1):
if playery > -2 * IMAGES['player'][0].get_height():
playerVelY = playerFlapAcc
playerFlapped = True
SOUNDS['wing'].play()
# check for crash here
crashTest = checkCrash({'x': playerx, 'y': playery, 'index': playerIndex},
upperPipes, lowerPipes)
# 추가된 부분
coinTest = checkCoin({'x': playerx, 'y': playery, 'index': playerIndex}, coins)
if crashTest[0]:
# the player hits a pipe in fury mode
if FURYMODE and not crashTest[1]:
spawnParticles(particles, crashTest[3])
# remove the pipe
# it's an upper pipe
if crashTest[2]:
upperPipes.remove(crashTest[3])
score+=1
# it's a lower pipe
else:
lowerPipes.remove(crashTest[3])
else:
return {
'y': playery,
'groundCrash': crashTest[1],
'basex': basex,
'upperPipes': upperPipes,
'lowerPipes': lowerPipes,
'score': score,
'playerVelY': playerVelY,
'playerRot': playerRot
}
# 추가된 부분
if coinTest[0]:
score += 1
SOUNDS['point'].play()
coins.pop(0)
# check for score
playerMidPos = playerx + IMAGES['player'][0].get_width() / 2
for pipe in upperPipes:
pipeMidPos = pipe['x'] + IMAGES['pipe'][0].get_width() / 2
if pipeMidPos <= playerMidPos < pipeMidPos + 4:
score += 1
SOUNDS['point'].play()
# playerIndex basex change
if (loopIter + 1) % 3 == 0:
playerIndex = next(playerIndexGen)
loopIter = (loopIter + 1) % 30
basex = -((-basex + 100) % baseShift)
# rotate the player
if playerRot > -90:
playerRot -= playerVelRot
# player's movement
if playerVelY < playerMaxVelY and not playerFlapped:
playerVelY += playerAccY
if playerFlapped:
playerFlapped = False
# more rotation to cover the threshold (calculated in visible rotation)
playerRot = 45
playerHeight = IMAGES['player'][playerIndex].get_height()
playery += min(playerVelY, BASEY - playery - playerHeight)
# move pipes to left
for uPipe in upperPipes:
uPipe['x'] += pipeVelX
for lPipe in lowerPipes:
lPipe['x'] += pipeVelX
# 추가된 부분
for coin in coins:
coin['x'] += pipeVelX
# update (add / remove) pipes and particles
if FURYMODE:
furymodePipeFrameCounter += 1
# the counter has the max value, we must spawn new pipes
if furymodePipeFrameCounter == FURYMODE_FRAMES_TO_SPAWN_PIPES:
# counter reset
furymodePipeFrameCounter = 0
# pipe spawn
pipes = getRandomPipe(DIFFICULTY)
upperPipes.append(pipes[0])
lowerPipes.append(pipes[1])
# check if a pipe must be removed from the list
for uPipe in upperPipes:
if uPipe['x'] < -IMAGES['pipe'][0].get_width():
upperPipes.remove(uPipe)
for lPipe in lowerPipes:
if lPipe['x'] < -IMAGES['pipe'][0].get_width():
lowerPipes.remove(lPipe)
# particles
for particle in particles:
# speed
particle['x'] += particle['vx']
particle['y'] += particle['vy']
# gravity
particle['vy'] += playerAccY
# remove if the particle is under the ground
if particle['y'] >= BASEY:
particles.remove(particle)
else:
# add new pipes when first pipe is about to touch left of screen
if 0 < upperPipes[0]['x'] < 5:
newPipe = getRandomPipe(DIFFICULTY)
upperPipes.append(newPipe[0])
lowerPipes.append(newPipe[1])
# 추가된 부분
newCoin = getRandomCoin()
coins.append(newCoin[0])
# remove first pipe if its out of the screen
if upperPipes[0]['x'] < -IMAGES['pipe'][0].get_width():
lowerPipes.pop(0)
upperPipes.pop(0)
# 추가된 부분
if coins[0]['x'] < -IMAGES['coin'].get_width():
coins.pop(0)
# draw sprites
SCREEN.blit(IMAGES['background'], (0,0))
for uPipe in upperPipes:
SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y']))
for lPipe in lowerPipes:
SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y']))
# 추가된 부분
for coin in coins:
SCREEN.blit(IMAGES['coin'], (coin['x'], coin['y']))
# pipes' particles
if FURYMODE:
for particle in particles:
SCREEN.blit(IMAGES['pipe-particle'][particle['i']], (particle['x'], particle['y']))
SCREEN.blit(IMAGES['base'], (basex, BASEY))
# print score so player overlaps the score
showScore(score)
# Player rotation has a threshold
visibleRot = playerRotThr
if playerRot <= playerRotThr:
visibleRot = playerRot
playerSurface = pygame.transform.rotate(IMAGES['player'][playerIndex], visibleRot)
SCREEN.blit(playerSurface, (playerx, playery))
pygame.display.update()
FPSCLOCK.tick(FPS)
def showGameOverScreen(crashInfo):
"""crashes the player down ans shows gameover image"""
global FURYMODE, EASYMODE
FURYMODE = False
EASYMODE = False
score = crashInfo['score']
playerx = SCREENWIDTH * 0.2
playery = crashInfo['y']
playerHeight = IMAGES['player'][0].get_height()
playerVelY = crashInfo['playerVelY']
playerAccY = 2
playerRot = crashInfo['playerRot']
playerVelRot = 7
count=0
gameover = True
basex = crashInfo['basex']
upperPipes, lowerPipes = crashInfo['upperPipes'], crashInfo['lowerPipes']
# play hit and die sounds
SOUNDS['hit'].play()
if not crashInfo['groundCrash']:
SOUNDS['die'].play()
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if (event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP)) or (event.type == MOUSEBUTTONDOWN and event.button == 1):
if playery + playerHeight >= BASEY - 1:
return
# player y shift
if playery + playerHeight < BASEY - 1:
playery += min(playerVelY, BASEY - playery - playerHeight)
# player velocity change
if playerVelY < 15:
playerVelY += playerAccY
# rotate only when it's a pipe crash
if not crashInfo['groundCrash']:
if playerRot > -90:
playerRot -= playerVelRot
# draw sprites
overx = int((SCREENWIDTH - IMAGES['gameover'].get_width()) / 2)
overy = int(SCREENHEIGHT * 0.5)
#SCREEN.blit(IMAGES['background'], (0,0))
for uPipe, lPipe in zip(upperPipes, lowerPipes):
SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y']))
SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y']))
SCREEN.blit(IMAGES['base'], (basex, BASEY))
showScore(score)
playerSurface = pygame.transform.rotate(IMAGES['player'][1], playerRot)
SCREEN.blit(playerSurface, (playerx,playery))
#showScore(score)
if (score > TOPFIVE[4][1] and count==0) :
SCREEN.blit(IMAGES['gameover'], (overx,overy))
pygame.display.update()
gameover = False
pygame.time.delay(1000)
SCREEN.blit(IMAGES['background'], (0,0))
writeScore(score)
count=count+1
pygame.display.update()
elif(gameover == True):
SCREEN.blit(IMAGES['gameover'], (overx,overy))
pygame.display.update()
gameover = False
pygame.time.delay(1000)
showLeaderboard()
FPSCLOCK.tick(FPS)
pygame.display.update()
def showLeaderboard():
fontobject = pygame.font.Font(None,30)
SCREEN.blit(IMAGES['background'],(0,0))
SCREEN.blit(pygame.font.Font(None,50).render("LEADERBOARD", 1, RED),((SCREEN.get_width() / 2) -132, (SCREEN.get_height() / 2) -220))
for i in range(0,5) :
SCREEN.blit(fontobject.render(TOPFIVE[i][0], 1, RED),((SCREEN.get_width() / 2) - 100, (SCREEN.get_height() / 2) -160 + (50*i)))
SCREEN.blit(fontobject.render(str(TOPFIVE[i][1]), 1,RED),((SCREEN.get_width() / 2) + 75, (SCREEN.get_height() / 2) -160 + (50*i)))
FPSCLOCK.tick(FPS)
pygame.display.update()
def playerShm(playerShm):
"""oscillates the value of playerShm['val'] between 8 and -8"""
if abs(playerShm['val']) == 8:
playerShm['dir'] *= -1
if playerShm['dir'] == 1:
playerShm['val'] += 1
else:
playerShm['val'] -= 1
def getRandomPipe(DIFFICULTY):
PIPEGAPSIZE = 100 + DIFFICULTY * 10
""" returns a randomly generated pipe """
# y of gap between upper and lower pipe
gapY = random.randrange(0, int(BASEY * 0.6 - PIPEGAPSIZE))
gapY += int(BASEY * 0.2)
pipeHeight = IMAGES['pipe'][0].get_height()
pipeX = SCREENWIDTH + 10
return [
{'x': pipeX, 'y': gapY - pipeHeight}, # upper pipe
{'x': pipeX, 'y': gapY + PIPEGAPSIZE}, # lower pipe
]
# 추가된 부분
def getRandomCoin():
""" returns a randomly generated coin """
coinY = random.randrange(20, int(BASEY * 0.6))
coinX = SCREENWIDTH + 100
return [
{'x': coinX, 'y': coinY},
]
def showScore(score):
"""displays score in center of screen"""
scoreDigits = [int(x) for x in list(str(score))]
totalWidth = 0 # total width of all numbers to be printed
for digit in scoreDigits:
totalWidth += IMAGES['numbers'][digit].get_width()
Xoffset = (SCREENWIDTH - totalWidth) / 2
for digit in scoreDigits:
SCREEN.blit(IMAGES['numbers'][digit], (Xoffset, SCREENHEIGHT * 0.1))
Xoffset += IMAGES['numbers'][digit].get_width()
def spawnParticles(particles, pipe):
"""
Add paticles to the particle list randomly
generated with pipe's rectangle (hitbox)
"""
global FURYMODE_PARTICLES, FURYMODE_PARTICLES_MAX, SOUNDS
pipeW = IMAGES['pipe'][0].get_width()
pipeH = IMAGES['pipe'][0].get_height()
for i in range(FURYMODE_PARTICLES_MAX):
particle = {}
particle['x'] = random.randint(pipe['x'], pipe['x'] + pipeW)
particle['y'] = random.randint(pipe['y'], pipe['y'] + pipeH)
particle['i'] = random.randint(1, FURYMODE_PARTICLES) - 1
# random angle for a minimum velocity
vel = random.random() * 10 + 5
aMin = -math.pi * .35
aMax = math.pi * .25
angle = random.random() * (aMax - aMin) + aMin
particle['vx'] = math.cos(angle) * vel
particle['vy'] = math.sin(angle) * vel
particles.append(particle)
# sound effect
SOUNDS['hit'].play()
def checkCrash(player, upperPipes, lowerPipes):
"""returns True if player collders with base or pipes."""
global FURYMODE
pi = player['index']
player['w'] = IMAGES['player'][0].get_width()
player['h'] = IMAGES['player'][0].get_height()
# if player crashes into ground
if player['y'] + player['h'] >= BASEY - 1:
return [True, True]
else:
playerRect = pygame.Rect(player['x'], player['y'],
player['w'], player['h'])
pipeW = IMAGES['pipe'][0].get_width()
pipeH = IMAGES['pipe'][0].get_height()
for uPipe in upperPipes:
# pipe rect
uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], pipeW, pipeH)
# player and pipe hitmasks
pHitMask = HITMASKS['player'][pi]
uHitmask = HITMASKS['pipe'][0]
# if bird collided with pipe
uCollide = pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask)
if uCollide:
# for fury mode we want to break the pipe so we
# must return which pipe is colliding (lower or upper)
if FURYMODE:
return [True, False, True, uPipe]
# normal mode
return [True, False]
for lPipe in lowerPipes:
# pipe rect
lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], pipeW, pipeH)
# player and pipe hitmasks
pHitMask = HITMASKS['player'][pi]
lHitmask = HITMASKS['pipe'][0]
# if bird collided with pipe
lCollide = pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask)
if lCollide:
# for fury mode we want to break the pipe so we
# must return which pipe is colliding (lower or upper)
if FURYMODE:
return [True, False, False, lPipe]
# normal mode
return [True, False]
return [False, False]
# 추가된 부분
def checkCoin(player, coins):
pi = player['index']
player['w'] = IMAGES['player'][0].get_width()
player['h'] = IMAGES['player'][0].get_height()
playerRect = pygame.Rect(player['x'], player['y'], player['w'], player['h'])
coinW = IMAGES['coin'].get_width()
coinH = IMAGES['coin'].get_height()
for coin in coins:
coinRect = pygame.Rect(coin['x'], coin['y'], coinW, coinH)
pHitMask = HITMASKS['player'][pi]
cHitMask = HITMASKS['coin'][0]
cCollide = pixelCollision(playerRect, coinRect, pHitMask, cHitMask)
if cCollide :
return [True, False]
return [False, False]
def pixelCollision(rect1, rect2, hitmask1, hitmask2):
"""Checks if two objects collide and not just their rects"""
rect = rect1.clip(rect2)
if rect.width == 0 or rect.height == 0:
return False
x1, y1 = rect.x - rect1.x, rect.y - rect1.y
x2, y2 = rect.x - rect2.x, rect.y - rect2.y
for x in xrange(rect.width):
for y in xrange(rect.height):
if hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]:
return True
return False
def writeScore(score):
TOPFIVE.append((ask(SCREEN,"NAME: "),score))
TOPFIVE.sort(key=itemgetter(1),reverse= True)
TOPFIVE.pop()
def getHitmask(image):
"""returns a hitmask using an image's alpha."""
mask = []
for x in xrange(image.get_width()):
mask.append([])
for y in xrange(image.get_height()):
mask[x].append(bool(image.get_at((x,y))[3]))
return mask
def get_key():
while 1:
event = pygame.event.poll()
if event.type == KEYDOWN:
return event.key
else:
pass
def display_box(screen, message):
fontobject = pygame.font.Font(None,18)
fontobject1 = pygame.font.Font(None,30)
"Print a message in a box in the middle of the screen"
pygame.draw.rect(screen, (0,0,0),
((screen.get_width() / 2) - 100,
(screen.get_height() / 2) - 10,
200,20), 0)
pygame.draw.rect(screen, (255,255,255),
((screen.get_width() / 2) - 102,
(screen.get_height() / 2) - 12,
204,24), 1)
if len(message) != 0:
screen.blit(fontobject1.render("HIGH SCORE!!!", 1, (255,255,255)),
((screen.get_width() / 2) - 75, (screen.get_height() / 2) - 50))
screen.blit(fontobject.render(message, 1, (255,255,255)),
((screen.get_width() / 2) - 100, (screen.get_height() / 2) - 10))
pygame.display.flip()
def ask(screen, question):
"ask(screen, question) -> answer"
pygame.font.init()
current_string = []
display_box(screen, question + ": " + "".join(current_string))
while 1:
inkey = get_key()
if inkey == K_BACKSPACE:
current_string = current_string[0:-1]
elif inkey == K_RETURN:
break
elif inkey == K_MINUS:
current_string.append("_")
elif inkey <= 127:
current_string.append(chr(inkey))
display_box(screen, question + ": " + "".join(current_string))
return "".join(current_string)
class Slider():
def __init__(self, val, maxi, mini, xpos, ypos):
self.val = val # start value
self.maxi = maxi # maximum at slider position right
self.mini = mini # minimum at slider position left
self.xpos = xpos # x-location on screen
self.ypos = ypos # y-location on screen
self.surf = pygame.surface.Surface((95, 40))
self.hit = False # the hit attribute indicates slider movement due to mouse interaction
# Static graphics - slider background #
self.surf.set_colorkey(BLACK)
pygame.draw.rect(self.surf, WHITE, [5, 30, 80, 5], 0)
# dynamic graphics - button surface #
self.button_surf = pygame.surface.Surface((15, 15))
self.button_surf.fill(TRANS)
self.button_surf.set_colorkey(TRANS)
pygame.draw.circle(self.button_surf, ORANGE, (6, 6), 6, 0)
def draw(self):
""" Combination of static and dynamic graphics in a copy of
the basic slide surface
"""
# static
surf = self.surf.copy()
# dynamic
pos = (10+int((self.val-self.mini)/(self.maxi-self.mini)*80), 33)
self.button_rect = self.button_surf.get_rect(center=pos)
surf.blit(self.button_surf, self.button_rect)
self.button_rect.move_ip(self.xpos, self.ypos) # move of button box to correct screen position
# screen
SCREEN.blit(IMAGES['background'], (0,0))
SCREEN.blit(surf, (self.xpos, self.ypos))
def move(self):
"""
The dynamic part; reacts to movement of the slider button.
"""
self.val = (pygame.mouse.get_pos()[0] - self.xpos - 10) / 80 * (self.maxi - self.mini) + self.mini
if self.val < self.mini:
self.val = self.mini
if self.val > self.maxi:
self.val = self.maxi
if __name__ == '__main__':
main()
| 2.703125 | 3 |
touchtechnology/news/templatetags/news.py | goodtune/vitriolic | 0 | 12797767 | from django.core.paginator import Paginator
from django.template import Library
from django.utils.translation import ugettext_lazy as _
from touchtechnology.news.models import Article, Category
register = Library()
@register.filter("category")
def get_category(slug):
return Category.objects.get(slug=slug)
@register.inclusion_tag('touchtechnology/news/_related_list.html',
takes_context=True)
def related_articles(context, article, limit=None, order_by=None):
categories = article.categories.live()
articles = Article.objects.live() \
.exclude(pk=article.pk) \
.filter(categories__in=categories) \
.distinct()
if order_by is not None:
articles = articles.order_by(*order_by.split(','))
if limit is not None:
articles = articles[:int(limit)]
# FIXME backwards compatibility for custom templates
context['slice'] = ':'
context['article_list'] = articles
return context
@register.inclusion_tag('touchtechnology/news/_related_list.html',
takes_context=True)
def related_categories(context, article=None, limit=None):
"""
If an article is provided, then we select categories relating to it.
Otherwise we select all article categories.
"""
if article is None:
categories = Category.objects.all()
else:
categories = article.categories.all()
context['category_list'] = categories
return context
@register.inclusion_tag('touchtechnology/news/_latest_articles.html',
takes_context=True)
def latest_articles(context, count=5, title=_("Latest News")):
articles = Article.objects.live()
paginator = Paginator(articles, count)
page = paginator.page(1)
context['paginator'] = paginator
context['page'] = page
context['article_list'] = page.object_list
context['title'] = title
return context
| 2.265625 | 2 |
src/flask_bombril/r.py | marcoprado17/flask-bone | 0 | 12797768 | <gh_stars>0
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# ======================================================================================================================
# The MIT License (MIT)
# ======================================================================================================================
# Copyright (c) 2016 [<NAME> - <EMAIL>]
# ======================================================================================================================
class Resources:
def __init__(self):
self.string = self.__Strings()
self.id = self.__Ids()
self.dimen = self.__Dimens()
class __Strings:
def __init__(self):
self.validators = self.__Validators()
self.test_message = "Mensagem de teste"
self.test_message_2 = "Mensagem de teste 2"
self.static = "static"
self.toast = "toast"
self.category_separator = "-"
class __Validators:
def __init__(self):
self.required_field = "Campo obrigatório."
self.invalid_email_format = "Formato de email inválido."
self.email_already_registered = "Email já cadastrado."
self.unique_field = "Valor já registrado."
self.field_min_length_singular = "O campo deve possuir no mínimo %(min_length)d caracter."
self.field_min_length_plural = "O campo deve possuir no mínimo %(min_length)d caracteres."
self.field_max_length_singular = "O campo deve possuir no máximo %(max_length)d caracter."
self.field_max_length_plural = "O campo deve possuir no máximo %(max_length)d caracteres."
self.field_length_range = "O campo deve possuir entre %(min_length)d e %(max_length)d caracteres."
self.invalid_field_name = "Invalid field name '%(field_name)s'."
self.field_must_be_equal_to = "Este campo precisa ser igual ao campo %(other_name)s."
self.always_error = "Essa mensagem de erro sempre será lançada para esse campo"
class __Ids:
def __init__(self):
self.example = "example"
class __Dimens:
def __init__(self):
self.test_int = 42
self.test_int_2 = 17
R = Resources()
| 2.140625 | 2 |
gittip/__init__.py | gchiam/gittip-py | 0 | 12797769 | # -*- coding: utf-8 -*-
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
| 1.054688 | 1 |
src/migrations/versions/da911e0e68bc_add_user_roles.py | mstrechen/advanced-news-scraper | 0 | 12797770 | <gh_stars>0
"""add_user_roles
Revision ID: <KEY>
Revises: 8f176326a337
Create Date: 2020-08-09 23:00:56.671372
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '8f176326a337'
branch_labels = None
depends_on = None
ROLES = ['admin', 'user']
def upgrade():
op.add_column('users', sa.Column('role', sa.Enum(*ROLES), default='user'))
def downgrade():
op.drop_column('users', 'role')
| 1.171875 | 1 |
tests/integrationtests/mock_data.py | efre-lod/efre-lod-api | 4 | 12797771 | <filename>tests/integrationtests/mock_data.py
import os
import lod_api
import difflib
import json
from collections import OrderedDict
class MockDataHandler():
def __init__(self):
# define path for test data
self.path = os.path.join(lod_api.__path__[0], "../../tests/data/mockout/")
print(self.path)
def normalize_data(self, data, format):
""" normalize given data with a given format in order to compare them with data dumps.
"""
if format == "nq":
print("sort nq file")
lines = sorted(data.split("\n"))
data_out = "\n".join(lines)
else:
data_out = data
# if format == "json":
# data_json = json.loads(data, object_pairs_hook=orderedDict)
# data_out = json.dumps(data_json)
# elif format == "jsonl":
# lines = []
# for js in data.split("\n"):
# if js:
# print(js)
# data_json = json.loads(js, object_pairs_hook=orderedDict)
# lines.append(json.dumps(data_json))
# data_out = "\n".join(lines)
return data_out
def _sanitize_fname(self, fname, extension=".dat"):
fname = fname.replace("/", "-")
return fname + extension
def write(self, fname, data, format=None):
fname = self._sanitize_fname(fname)
with open(os.path.join(self.path, fname), "w+") as outfile:
outfile.write(self.normalize_data(data, format))
def compare(self, fname, data, format=None):
fname = self._sanitize_fname(fname)
output_data = self.normalize_data(data, format)
with open(os.path.join(self.path, fname), "r") as infile:
compdata = infile.read()
diff = difflib.unified_diff(output_data, compdata,
fromfile="API-data", tofile=fname)
print("".join(diff))
assert(output_data == compdata)
| 2.609375 | 3 |
fline/utils/logging/base_group.py | asromahin/fline | 5 | 12797772 | class BaseGroup:
def __init__(self, log_type):
self.log_type = log_type
def __call__(self, data):
return data
| 2.265625 | 2 |
benches/tokenizer.py | cgwalters/logreduce-tokenizer | 0 | 12797773 | # Copyright (C) 2022 Red Hat
# SPDX-License-Identifier: Apache-2.0
# A copy of logreduce.tokenizer
import re
import os
DAYS = "sunday|monday|tuesday|wednesday|thursday|friday|saturday"
MONTHS = (
"january|february|march|april|may|june|july|august|september|"
"october|november|december"
)
SHORT_MONTHS = "jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dev"
SHORT_DAYS = "mon|tue|wed|thu|fri|sat|sun"
UUID_RE = r"[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-" "?[0-9a-f]{12}"
IPV4_RE = (
r"(([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])\.){3}"
r"([01]?[0-9]?[0-9]|2[0-4][0-9]|2[5][0-5])"
)
IPV6_RE = r"([0-9A-Fa-f]{0,4}:){2,6}(\d{1,3}\.){0,3}[0-9A-Fa-f]{1,3}"
MAC_RE = r"([0-9a-fA-F]{2}[:-]){5}([0-9a-fA-F]{2})"
class Tokenizer:
rawline_re = re.compile(
# useless http GET
r'"GET / HTTP/1.1"'
r'|"OPTIONS * HTTP/1.0" 200'
# ssh keys
r"|AAAA[A-Z][0-9]"
# hashed password
r"|\$[0-9]\$"
# Certificates
r"|-----BEGIN"
# git status
r"|HEAD is now at|Change-Id: "
# Download statement
r"| ETA "
# yum mirrors information
r"|\* [a-zA-Z]+: [a-zA-Z0-9\.-]*$|Trying other mirror."
# ssh scan attempts
r'|audit.*exe="/usr/sbin/sshd"|sshd.*[iI]nvalid user'
r"|sshd.*Unable to connect using the available authentication methods"
r"|unix_chkpwd.*: password check failed for user"
r"|sshd.*: authentication failure"
r"|sshd.*: Failed password for"
r"|sshd.*- POSSIBLE BREAK-IN ATTEMPT"
# zuul random test
r"|zuul.*echo BECOME-SUCCESS-"
r"|^[^ ]{64}$"
# useless debug statement
r"|ovs-ofctl .* (dump-ports|dump-flows|show)\b"
r"|(ip|eb)tables .* -L\b"
)
# See https://en.wikipedia.org/wiki/Percent-encoding
uri_percent_re = re.compile(r"%[2345][0-9A-F]")
ip_re = re.compile(r"%s|%s|%s" % (IPV4_RE, IPV6_RE, MAC_RE))
# For some unknown reason, '_' in (?=) doesn't work in prefix match
# re.sub(r'(?=\b|_)test(?=\b|_)', 'RNG', 'AUTH_test_') -> doesn't work
# re.sub(r'(?=\b|_)_?test(?=\b|_)', 'RNG', 'AUTH_test_') -> works
power2_re = re.compile(
r"(?=\b|_)_?(?:[\w+/]{128}|[\w+/]{64}|"
r"[0-9a-fA-F]{40}|[0-9a-fA-F]{32})(?=\b|_)"
)
uuid_re = re.compile(r"(?=\b|_)_?(?:%s|tx[^ ]{32})(?=\b|_)" % UUID_RE, re.I)
date_re = re.compile(
r"\b(?:%s|%s|%s|%s)\b" % (DAYS, SHORT_DAYS, SHORT_MONTHS, MONTHS), re.I
)
heat_re = re.compile(r"-\w{12}[- \"$]")
comments = re.compile(r'(?:[\s]*# |^%% |^#|^[\s]*id = ").*')
alpha_re = re.compile(r"[^a-zA-Z_\/\s]+")
gitver_re = re.compile(r"git\w+")
digits_re = re.compile(r"0x[0-9a-fA-F]{2,}|[0-9]+(?:\.\d+)?")
randpath_re = re.compile(
r"(?:/tmp/ansible\.\w{8}" r"|/tmp/tmp\w{6}" r"|/tmp/tmp\.\w{10})\b"
)
gitsha_re = re.compile(r"\b\w{7}\.\.\w{7}\b")
hash_re = re.compile(r"SHA256:[\w+/]{43}\b")
@staticmethod
def process(line: str) -> str:
# Ignore some raw pattern first
if Tokenizer.rawline_re.search(line):
return ""
strip = line
# Break URI percent encoding
strip = Tokenizer.uri_percent_re.sub(" ", strip)
# Remove words that are exactly 32, 64 or 128 character longs
strip = Tokenizer.power2_re.sub("RNGN", strip)
# Remove uuid
strip = Tokenizer.uuid_re.sub("RNGU", strip)
# Remove heat short uuid but keep spacing
# ObjectName-2kbhkd45kcs3-ServiceName -> ObjectName-HEATID-ServiceName
strip = Tokenizer.heat_re.sub(" HEATID ", strip)
# Remove git sha
strip = Tokenizer.gitsha_re.sub("RNGG", strip)
# Remove hashes
strip = Tokenizer.hash_re.sub("RNGH", strip)
# Remove random path
strip = Tokenizer.randpath_re.sub("RNGP", strip)
# Remove date
strip = Tokenizer.date_re.sub("DATE", strip)
# Remove ip/addr
strip = Tokenizer.ip_re.sub("RNGI", strip)
# Remove numbers
strip = Tokenizer.digits_re.sub("", strip)
# Only keep characters
strip = Tokenizer.alpha_re.sub(" ", strip)
# Remove tiny words
strip = " ".join(filter(lambda x: len(x) > 3, strip.split()))
# Weight failure token
for token in ("error", "fail", "warn"):
if token in strip.lower():
strip += " %sA %sB %sC %sD" % (token, token, token, token)
return strip
| 2.234375 | 2 |
flink_rest_client/v1/jobs.py | frego-dev/flink-rest-client | 0 | 12797774 | from flink_rest_client.common import _execute_rest_request, RestException
class JobTrigger:
def __init__(self, prefix, type_name, job_id, trigger_id):
self._prefix = prefix
self._type_name = type_name
self.job_id = job_id
self.trigger_id = trigger_id
@property
def status(self):
return _execute_rest_request(
url=f"{self._prefix}/{self.job_id}/{self._type_name}/{self.trigger_id}"
)
class JobVertexSubtaskClient:
def __init__(self, prefix):
"""
Constructor.
Parameters
----------
prefix: str
REST API url prefix. It must contain the host, port pair.
"""
self._prefix = prefix
@property
def prefix_url(self):
return f"{self._prefix}/subtasks"
def subtask_ids(self):
"""
Returns the subtask identifiers.
Returns
-------
list
Positive integer list of subtask ids.
"""
return [elem["subtask"] for elem in self.accumulators()["subtasks"]]
def accumulators(self):
"""
Returns all user-defined accumulators for all subtasks of a task.
Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/accumulators
Returns
-------
dict
User-defined accumulators
"""
return _execute_rest_request(url=f"{self.prefix_url}/accumulators")
def metric_names(self):
"""
Returns the supported metric names.
Returns
-------
list
List of metric names.
"""
return [
elem["id"]
for elem in _execute_rest_request(url=f"{self.prefix_url}/metrics")
]
def metrics(self, metric_names=None, agg_modes=None, subtask_ids=None):
"""
Provides access to aggregated subtask metrics.
By default it returns with all existing metric names.
Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/metrics
Parameters
----------
metric_names: list
(optional) List of selected specific metric names. Default: <all metrics>
agg_modes: list
(optional) List of aggregation modes which should be calculated. Available aggregations are: "min, max,
sum, avg". Default: <all modes>
subtask_ids: list
List of positive integers to select specific subtasks. The list of valid subtask ids is available through
the subtask_ids() method. Default: <all subtasks>.
Returns
-------
dict
Key-value pairs of metrics.
"""
if metric_names is None:
metric_names = self.metric_names()
supported_agg_modes = ["min", "max", "sum", "avg"]
if agg_modes is None:
agg_modes = supported_agg_modes
if len(set(agg_modes).difference(set(supported_agg_modes))) > 0:
raise RestException(
f"The provided aggregation modes list contains invalid value. Supported aggregation "
f"modes: {','.join(supported_agg_modes)}; given list: {','.join(agg_modes)}"
)
if subtask_ids is None:
subtask_ids = self.subtask_ids()
params = {
"get": ",".join(metric_names),
"agg": ",".join(agg_modes),
"subtasks": ",".join([str(elem) for elem in subtask_ids]),
}
query_result = _execute_rest_request(
url=f"{self.prefix_url}/metrics", params=params
)
result = {}
for elem in query_result:
metric_name = elem.pop("id")
result[metric_name] = elem
return result
def get(self, subtask_id):
"""
Returns details of the current or latest execution attempt of a subtask.
Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex
Parameters
----------
subtask_id: int
Positive integer value that identifies a subtask.
Returns
-------
dict
"""
return _execute_rest_request(url=f"{self.prefix_url}/{subtask_id}")
def get_attempt(self, subtask_id, attempt_id=None):
"""
Returns details of an execution attempt of a subtask. Multiple execution attempts happen in case of
failure/recovery.
Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex/attempts/:attempt
Parameters
----------
subtask_id: int
Positive integer value that identifies a subtask.
attempt_id: int
(Optional) Positive integer value that identifies an execution attempt.
Default: current execution attempt's id
Returns
-------
dict
Details of the selected attempt.
"""
if attempt_id is None:
return self.get(subtask_id)
return _execute_rest_request(
url=f"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}"
)
def get_attempt_accumulators(self, subtask_id, attempt_id=None):
"""
Returns the accumulators of an execution attempt of a subtask. Multiple execution attempts happen in case of
failure/recovery.
Parameters
----------
subtask_id: int
Positive integer value that identifies a subtask.
attempt_id: int
(Optional) Positive integer value that identifies an execution attempt.
Default: current execution attempt's id
Returns
-------
dict
The accumulators of the selected execution attempt of a subtask.
"""
if attempt_id is None:
attempt_id = self.get(subtask_id)["attempt"]
return _execute_rest_request(
url=f"{self.prefix_url}/{subtask_id}/attempts/{attempt_id}/accumulators"
)
class JobVertexClient:
def __init__(self, prefix, job_id, vertex_id):
"""
Constructor.
Parameters
----------
prefix: str
REST API url prefix. It must contain the host, port pair.
"""
self._prefix = prefix
self.job_id = job_id
self.vertex_id = vertex_id
@property
def prefix_url(self):
return f"{self._prefix}/{self.job_id}/vertices/{self.vertex_id}"
@property
def subtasks(self):
return JobVertexSubtaskClient(self.prefix_url)
def details(self):
"""
Returns details for a task, with a summary for each of its subtasks.
Endpoint: [GET] /jobs/:jobid/vertices/:vertexid
Returns
-------
dict
details for a task.
"""
return _execute_rest_request(url=self.prefix_url)
def backpressure(self):
"""
Returns back-pressure information for a job, and may initiate back-pressure sampling if necessary.
Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/backpressure
Notes
-----
The deprecated status means that the back pressure stats are not available.
Returns
-------
dict
Backpressure information
"""
return _execute_rest_request(url=f"{self.prefix_url}/backpressure")
def metric_names(self):
"""
Returns the supported metric names.
Returns
-------
list
List of metric names.
"""
return [
elem["id"]
for elem in _execute_rest_request(url=f"{self.prefix_url}/metrics")
]
def metrics(self, metric_names=None):
"""
Provides access to task metrics.
Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/metrics
Returns
-------
dict
Task metrics.
"""
if metric_names is None:
metric_names = self.metric_names()
params = {"get": ",".join(metric_names)}
query_result = _execute_rest_request(
url=f"{self.prefix_url}/metrics", params=params
)
result = {}
for elem in query_result:
metric_name = elem.pop("id")
result[metric_name] = elem["value"]
return result
def subtasktimes(self):
"""
Returns time-related information for all subtasks of a task.
Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/subtasktimes
Returns
-------
dict
Time-related information for all subtasks
"""
return _execute_rest_request(url=f"{self.prefix_url}/subtasktimes")
def taskmanagers(self):
"""
Returns task information aggregated by task manager.
Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/taskmanagers
Returns
-------
dict
Task information aggregated by task manager.
"""
return _execute_rest_request(url=f"{self.prefix_url}/taskmanagers")
def watermarks(self):
"""
Returns the watermarks for all subtasks of a task.
Endpoint: [GET] /jobs/:jobid/vertices/:vertexid/watermarks
Returns
-------
list
Watermarks for all subtasks of a task.
"""
return _execute_rest_request(url=f"{self.prefix_url}/watermarks")
class JobsClient:
def __init__(self, prefix):
"""
Constructor.
Parameters
----------
prefix: str
REST API url prefix. It must contain the host, port pair.
"""
self.prefix = f"{prefix}/jobs"
def all(self):
"""
Returns an overview over all jobs and their current state.
Endpoint: [GET] /jobs
Returns
-------
list
List of jobs and their current state.
"""
return _execute_rest_request(url=self.prefix)["jobs"]
def job_ids(self):
"""
Returns the list of job_ids.
Returns
-------
list
List of job ids.
"""
return [elem["id"] for elem in self.all()]
def overview(self):
"""
Returns an overview over all jobs.
Endpoint: [GET] /jobs/overview
Returns
-------
list
List of existing jobs.
"""
return _execute_rest_request(url=f"{self.prefix}/overview")["jobs"]
def metric_names(self):
"""
Returns the supported metric names.
Returns
-------
list
List of metric names.
"""
return [
elem["id"] for elem in _execute_rest_request(url=f"{self.prefix}/metrics")
]
def metrics(self, metric_names=None, agg_modes=None, job_ids=None):
"""
Returns an overview over all jobs.
Endpoint: [GET] /jobs/metrics
Parameters
----------
metric_names: list
(optional) List of selected specific metric names. Default: <all metrics>
agg_modes: list
(optional) List of aggregation modes which should be calculated. Available aggregations are: "min, max,
sum, avg". Default: <all modes>
job_ids: list
List of 32-character hexadecimal strings to select specific jobs. The list of valid jobs
are available through the job_ids() method. Default: <all taskmanagers>.
Returns
-------
dict
Aggregated job metrics.
"""
if metric_names is None:
metric_names = self.metric_names()
supported_agg_modes = ["min", "max", "sum", "avg"]
if agg_modes is None:
agg_modes = supported_agg_modes
if len(set(agg_modes).difference(set(supported_agg_modes))) > 0:
raise RestException(
f"The provided aggregation modes list contains invalid value. Supported aggregation "
f"modes: {','.join(supported_agg_modes)}; given list: {','.join(agg_modes)}"
)
if job_ids is None:
job_ids = self.job_ids()
params = {
"get": ",".join(metric_names),
"agg": ",".join(agg_modes),
"jobs": ",".join(job_ids),
}
query_result = _execute_rest_request(
url=f"{self.prefix}/metrics", params=params
)
result = {}
for elem in query_result:
metric_name = elem.pop("id")
result[metric_name] = elem
return result
def get(self, job_id):
"""
Returns details of a job.
Endpoint: [GET] /jobs/:jobid
Parameters
----------
job_id: str
32-character hexadecimal string value that identifies a job.
Returns
-------
dict
Details of the selected job.
"""
return _execute_rest_request(url=f"{self.prefix}/{job_id}")
def get_config(self, job_id):
"""
Returns the configuration of a job.
Endpoint: [GET] /jobs/:jobid/config
Parameters
----------
job_id: str
32-character hexadecimal string value that identifies a job.
Returns
-------
dict
Job configuration
"""
return _execute_rest_request(url=f"{self.prefix}/{job_id}/config")
def get_exceptions(self, job_id):
"""
Returns the most recent exceptions that have been handled by Flink for this job.
Endpoint: [GET] /jobs/:jobid/exceptions
Parameters
----------
job_id: str
32-character hexadecimal string value that identifies a job.
Returns
-------
dict
The most recent exceptions.
"""
return _execute_rest_request(url=f"{self.prefix}/{job_id}/exceptions")
def get_execution_result(self, job_id):
"""
Returns the result of a job execution. Gives access to the execution time of the job and to all accumulators
created by this job.
Endpoint: [GET] /jobs/:jobid/execution-result
Parameters
----------
job_id: str
32-character hexadecimal string value that identifies a job.
Returns
-------
dict
The execution result of the selected job.
"""
return _execute_rest_request(url=f"{self.prefix}/{job_id}/execution-result")
def get_metrics(self, job_id, metric_names=None):
"""
Provides access to job metrics.
Endpoint: [GET] /jobs/:jobid/metrics
Parameters
----------
job_id: str
32-character hexadecimal string value that identifies a job.
metric_names: list
(optional) List of selected specific metric names. Default: <all metrics>
Returns
-------
dict
Job metrics.
"""
if metric_names is None:
metric_names = self.metric_names()
params = {"get": ",".join(metric_names)}
query_result = _execute_rest_request(
url=f"{self.prefix}/{job_id}/metrics", params=params
)
return dict([(elem["id"], elem["value"]) for elem in query_result])
def get_plan(self, job_id):
"""
Returns the dataflow plan of a job.
Endpoint: [GET] /jobs/:jobid/plan
Parameters
----------
job_id: str
32-character hexadecimal string value that identifies a job.
Returns
-------
dict
Dataflow plan
"""
return _execute_rest_request(url=f"{self.prefix}/{job_id}/plan")["plan"]
def get_vertex_ids(self, job_id):
"""
Returns the ids of vertices of the selected job.
Parameters
----------
job_id: str
32-character hexadecimal string value that identifies a job.
Returns
-------
list
List of identifiers.
"""
return [elem["id"] for elem in self.get(job_id)["vertices"]]
def get_accumulators(self, job_id, include_serialized_value=None):
"""
Returns the accumulators for all tasks of a job, aggregated across the respective subtasks.
Endpoint: [GET] /jobs/:jobid/accumulators
Parameters
----------
job_id: str
32-character hexadecimal string value that identifies a job.
include_serialized_value: bool
(Optional) Boolean value that specifies whether serialized user task accumulators should be included in
the response.
Returns
-------
dict
Accumulators for all task.
"""
params = {}
if include_serialized_value is not None:
params["includeSerializedValue"] = (
"true" if include_serialized_value else "false"
)
return _execute_rest_request(
url=f"{self.prefix}/{job_id}/accumulators", http_method="GET", params=params
)
def get_checkpointing_configuration(self, job_id):
"""
Returns the checkpointing configuration of the selected job_id
Endpoint: [GET] /jobs/:jobid/checkpoints/config
Parameters
----------
job_id: str
32-character hexadecimal string value that identifies a job.
Returns
-------
dict
Checkpointing configuration of the selected job.
"""
return _execute_rest_request(
url=f"{self.prefix}/{job_id}/checkpoints/config", http_method="GET"
)
def get_checkpoints(self, job_id):
"""
Returns checkpointing statistics for a job.
Endpoint: [GET] /jobs/:jobid/checkpoints
Parameters
----------
job_id: str
32-character hexadecimal string value that identifies a job.
Returns
-------
dict
Checkpointing statistics for the selected job: counts, summary, latest and history.
"""
return _execute_rest_request(
url=f"{self.prefix}/{job_id}/checkpoints", http_method="GET"
)
def get_checkpoint_ids(self, job_id):
"""
Returns checkpoint ids of the job_id.
Parameters
----------
job_id: str
32-character hexadecimal string value that identifies a job.
Returns
-------
list
List of checkpoint ids.
"""
return [elem["id"] for elem in self.get_checkpoints(job_id=job_id)["history"]]
def get_checkpoint_details(self, job_id, checkpoint_id, show_subtasks=False):
"""
Returns details for a checkpoint.
Endpoint: [GET] /jobs/:jobid/checkpoints/details/:checkpointid
If show_subtasks is true:
Endpoint: [GET] /jobs/:jobid/checkpoints/details/:checkpointid/subtasks/:vertexid
Parameters
----------
job_id: str
32-character hexadecimal string value that identifies a job.
checkpoint_id: int
Long value that identifies a checkpoint.
show_subtasks: bool
If it is True, the details of the subtask are also returned.
Returns
-------
dict
"""
checkpoint_details = _execute_rest_request(
url=f"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}",
http_method="GET",
)
if not show_subtasks:
return checkpoint_details
subtasks = {}
for vertex_id in checkpoint_details["tasks"].keys():
subtasks[vertex_id] = _execute_rest_request(
url=f"{self.prefix}/{job_id}/checkpoints/details/{checkpoint_id}/subtasks/{vertex_id}",
http_method="GET",
)
checkpoint_details["subtasks"] = subtasks
return checkpoint_details
def rescale(self, job_id, parallelism):
"""
Triggers the rescaling of a job. This async operation would return a 'triggerid' for further query identifier.
Endpoint: [GET] /jobs/:jobid/rescaling
Notes
-----
Using Flink version 1.12, the method will raise RestHandlerException because this rescaling is temporarily
disabled. See FLINK-12312.
Parameters
----------
job_id: str
32-character hexadecimal string value that identifies a job.
parallelism: int
Positive integer value that specifies the desired parallelism.
Returns
-------
JobTrigger
Object that can be used to query the status of rescaling.
"""
params = {"parallelism": parallelism}
trigger_id = _execute_rest_request(
url=f"{self.prefix}/{job_id}/rescaling", http_method="PATCH", params=params
)["triggerid"]
return JobTrigger(self.prefix, "rescaling", job_id, trigger_id)
def create_savepoint(self, job_id, target_directory, cancel_job=False):
"""
Triggers a savepoint, and optionally cancels the job afterwards. This async operation would return a
JobTrigger for further query identifier.
Endpoint: [GET] /jobs/:jobid/savepoints
Notes
-----
The target directory has to be a location accessible by both the JobManager(s) and TaskManager(s)
e.g. a location on a distributed file-system or Object Store.
Parameters
----------
job_id: str
32-character hexadecimal string value that identifies a job.
target_directory: str
Savepoint target directory.
cancel_job: bool
If it is True, it also stops the job after the savepoint creation.
Returns
-------
JobTrigger
Object that can be used to query the status of savepoint.
"""
trigger_id = _execute_rest_request(
url=f"{self.prefix}/{job_id}/savepoints",
http_method="POST",
accepted_status_code=202,
json={"cancel-job": cancel_job, "target-directory": target_directory},
)["request-id"]
return JobTrigger(self.prefix, "savepoints", job_id, trigger_id)
def terminate(self, job_id):
"""
Terminates a job.
Endpoint: [PATCH] /jobs/:jobid
Parameters
----------
job_id: str
32-character hexadecimal string value that identifies a job.
Returns
-------
bool
True if the job has been canceled, otherwise False.
"""
res = _execute_rest_request(
url=f"{self.prefix}/{job_id}", http_method="PATCH", accepted_status_code=202
)
if len(res) < 1:
return True
else:
return False
def stop(self, job_id, target_directory, drain=False):
"""
Stops a job with a savepoint. This async operation would return a JobTrigger for further query identifier.
Attention: The target directory has to be a location accessible by both the JobManager(s) and TaskManager(s)
e.g. a location on a distributed file-system or Object Store.
Draining emits the maximum watermark before stopping the job. When the watermark is emitted, all event time
timers will fire, allowing you to process events that depend on this timer (e.g. time windows or process
functions). This is useful when you want to fully shut down your job without leaving any unhandled events
or state.
Endpoint: [GET] /jobs/:jobid/stop
Parameters
----------
job_id: str
32-character hexadecimal string value that identifies a job.
target_directory: str
Savepoint target directory.
drain: bool
(Optional) If it is True, it emits the maximum watermark before stopping the job. default: False
Returns
-------
JobTrigger
Object that can be used to query the status of savepoint.
"""
data = {
"drain": False if drain is None else drain,
"targetDirectory": target_directory,
}
trigger_id = _execute_rest_request(
url=f"{self.prefix}/{job_id}/stop",
http_method="POST",
accepted_status_code=202,
json=data,
)["request-id"]
return JobTrigger(self.prefix, "savepoints", job_id, trigger_id)
def get_vertex(self, job_id, vertex_id):
"""
Returns a JobVertexClient.
Parameters
----------
job_id: str
32-character hexadecimal string value that identifies a job.
vertex_id: str
32-character hexadecimal string value that identifies a vertex.
Returns
-------
JobVertexClient
JobVertexClient instance that can execute vertex related queries.
"""
return JobVertexClient(self.prefix, job_id, vertex_id)
| 2.09375 | 2 |
bin/selectNetwroks.py | ehsanfar/Network_Auctioneer | 2 | 12797775 | """
Copyright 2018, <NAME>, Stevens Institute of Technology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys, os
sys.path.append(os.path.abspath('..'))
from resources.classes import *
from resources.globalv import *
from collections import defaultdict, Counter
from itertools import product
import pickle
import random
import hashlib
from resources.optimizeMILP import optimizeMILP
from multiprocessing import Process, Manager
import argparse
dir_topologies = os.path.abspath('..') + '/topologies_new/'
def createNetTopologies():
global seedlist, filename, numfederates, elementnames, edgedivider
numberfederates = numfederates*[len(elementnames)//numfederates] # print([s.name for s in sources])
destinations = elementnames[-2:]
sources = [e for e in elementnames if e not in destinations]
if os.path.isfile(filename):
with open(filename, 'rb') as infile:
hashNetworkDict = pickle.load(infile)
hashNetworkDict = {h: obj for h,obj in hashNetworkDict.items() if obj.costValueDict}
else:
hashNetworkDict = {}
for seed in seedlist:
# print(seed)
random.seed(seed)
while sum(numberfederates)<len(elementnames):
i = random.choice(range(len(numberfederates)))
numberfederates[i] += 1
namelist = [n*['f%d'%i] for i, n in enumerate(numberfederates)]
federatenames = [e for l in namelist for e in l]
random.shuffle(federatenames)
# print("shuffle:", federatenames)
# all_edges = [(satellites[0],satellites[1]), (satellites[3],stations[0]), (satellites[1],satellites[3]),
# (satellites[2],satellites[4]), (satellites[2],satellites[1]), (satellites[2],satellites[3]), (satellites[3],satellites[4]), (satellites[4],stations[1]), (satellites[2],stations[0])]
# all_possible_edges = [(a,b) for a, b in list(product(elementnames, elementnames)) if (a != b and element_federate_dict[a] != element_federate_dict[b])]
all_possible_edges = []
all_edges = []
# while len([l for l in all_possible_edges if l[1] in destinations])<len(elements)//linkcapacity:
all_possible_edges = [(a,b) for a, b in list(product(elementnames, elementnames)) if (a != b and not (a in destinations))]
all_edges = random.sample(all_possible_edges, int(len(all_possible_edges)//edgedivider))
edge2destin = [l for l in all_possible_edges if l[1] in destinations and l not in all_edges]
existingedges2desgin = [l for l in all_edges if l[1] in destinations]
nume2d = int(len(sources)/2 - len(existingedges2desgin))
# print(nume2d)
if nume2d>0:
newedges = random.sample(edge2destin, nume2d)
# print(len(all_edges))
all_edges = all_edges + newedges
# print(newedges)
# print(len(all_edges))
all_edge_set = set([])
destin_count = 0
for edge in all_edges:
s, d = edge
# if destin_count > len(satellites):
# continue
if s in destinations or d in destinations:
destin_count += linkcapacity
all_edge_set.add((s,d))
all_edge_set.add((d,s))
all_edges = list(all_edge_set)
tempNetTop = NetTop(elementnames, all_edges, federatenames, sources, destinations)
if tempNetTop.hashid not in hashNetworkDict:
# print(seed, tempNetTop.hashid)
hashNetworkDict[tempNetTop.hashid] = tempNetTop
with open(filename, 'wb') as outfile:
pickle.dump(hashNetworkDict, outfile)
def calCostValue(nettopObj):
federatenames = nettopObj.federates
# fedPriceDict = {fname: (sharelinkcost, uselinkcost) for fname in federatenames}
# federates = [Federate(name = f, cash = 0, sharelinkcost = fedPriceDict[f][0], uselinkcost = fedPriceDict[f][1]) for f in set(federatenames)]
# federateDict = {f.name: f for f in federates}
# # print("element names:", nettopObj.elements)
# elements = [Element(name = e, capacity=elementcapacity, size = 0, owner = federateDict[f]) for (e,f) in zip(nettopObj.elements, federatenames)]
# elementDict = {e.name: e for e in elements}
# sources = [e for e in elements if e.name not in nettopObj.destinations]
# # sources = nettopObj.sources
# # print([s.name for s in sources])
# linklist = [Link(source = elementDict[e1], destin = elementDict[e2], capacity = linkcapacity, size = 0, owner = elementDict[e2].owner) for (e1, e2) in nettopObj.edges]
# time = 0
# newtasks = [Task(id = id + n, element=s, lastelement=s, size=size, value=value, expiration=time + 5, init=time, active=True, penalty=penalty) for n, s in enumerate(sources)]
# elfedDict = {e: f for e, f in zip(nettopObj.elements, nettopObj.federates)}
# federates = [Federate(name = f, cash = 0, sharelinkcost = 0, uselinkcost = 0) for f in set(federatenames)]
# federateDict = {f.name: f for f in federates}
# # print("element names:", nettopObj.elements)
# elements = [Element(name = e, capacity=elementcapacity, size = 0, owner = federateDict[f]) for (e,f) in zip(nettopObj.elements, federatenames)]
# elementDict = {e.name: e for e in elements}
# sources = [e for e in elements if e.name not in nettopObj.destinations]
# # sources = nettopObj.sources
# # print([s.name for s in sources])
# linklist = [Link(source = elementDict[e1], destin = elementDict[e2], capacity = linkcapacity, size = 0, owner = elementDict[e2].owner) for (e1, e2) in nettopObj.edges]
# time = 0
# newtasks = [Task(id = id + n, element=s, lastelement=s, size=size, value=value, expiration=time + 5, init=time, active=True, penalty=penalty) for n, s in enumerate(sources)]
# elfedDict = {e: f for e, f in zip(nettopObj.elements, nettopObj.federates)}
# print(elfedDict)
# print("new tasks:", newtasks)
for sharelinkcost, uselinkcost in basetuples:
fedPriceDict = {fname: (sharelinkcost, uselinkcost) for fname in federatenames}
federates = [Federate(name = f, cash = 0, sharelinkcost = fedPriceDict[f][0], uselinkcost = fedPriceDict[f][1]) for f in set(federatenames)]
federateDict = {f.name: f for f in federates}
# print("element names:", nettopObj.elements)
elements = [Element(name = e, capacity=elementcapacity, size = 0, owner = federateDict[f]) for (e,f) in zip(nettopObj.elements, federatenames)]
elementDict = {e.name: e for e in elements}
sources = [e for e in elements if e.name not in nettopObj.destinations]
# sources = nettopObj.sources
# print([s.name for s in sources])
linklist = [Link(source = elementDict[e1], destin = elementDict[e2], capacity = linkcapacity, size = 0, owner = elementDict[e2].owner) for (e1, e2) in nettopObj.edges]
time = 0
newtasks = [Task(id = id + n, element=s, lastelement=s, size=size, value=value, expiration=time + 5, init=time, active=True, penalty=penalty) for n, s in enumerate(sources)]
elfedDict = {e: f for e, f in zip(nettopObj.elements, nettopObj.federates)}
# print("new tuple:", sharelinkcost, uselinkcost)
# print("length of cost value dict:", len(nettopObj.costValueDict))
# print(nettopObj.hashid, nettopObj.costValueDict)
# if (sharelinkcost, uselinkcost) not in nettopObj.costValueDict or nettopObj.costValueDict[(sharelinkcost, uselinkcost)] == 0:
# for f in federates:
# f.cash = 0
# f.sharelinkcost = sharelinkcost
# f.uselinkcost = uselinkcost
edgePriceDict = {e: fedPriceDict[elfedDict[e[1]]][0] for e in nettopObj.edges}
# print(edgePriceDict)
# print(nettopObj.hashid)
# print(fedPriceDict)
# print(linklist)
# print(nettopObj.destinations)
# print(len(newtasks))
# print(federates)
# print(linklist)
solutionObj = MILPSolution(nettopObj.hashid, time, fedPriceDict = fedPriceDict, fedValDict = {f: 0 for f in fedPriceDict.keys()}, edgelist = [])
solutionObj = optimizeMILP(elements = elements, linklist = linklist, destinations = nettopObj.destinations,
storedtasks = [], newtasks = newtasks, time = time, federates = federates, edgePriceDict = edgePriceDict,
solutionObj = solutionObj)
totalvalue = solutionObj.totalvalue
# print(solutionObj.sourceEdgeDict)
# print(solutionObj.fedValDict)
nettopObj.costValueDict[(sharelinkcost, uselinkcost)] = totalvalue
# print("New tuple cost and value:", sharelinkcost, uselinkcost, totalvalue)
def updateCostValue(objlist, proc, tempfilename):
global filename
if os.path.isdir("/home/abbas.ehsanfar/gurobi"):
hostname = os.environ['HOSTNAME']
os.environ['GRB_LICENSE_FILE'] = "/home/abbas.ehsanfar/gurobi/%s/lic%s/gurobi.lic"%(hostname,str(proc%30).zfill(2))
for k, nettopObj in enumerate(objlist):
# print("New topoology:", nettopObj.hashid)
calCostValue(nettopObj)
if k%20 == 0:
objDict = {obj.hashid: obj for obj in objlist}
with open(tempfilename, 'wb') as outfile:
pickle.dump(objDict, outfile)
# with open(filename, 'rb') as infile:
# hashNetworkDict = pickle.load(infile)
# for h, obj in objDict.items():
# hashNetworkDict[h] = obj
# with open(filename, 'wb') as outfile:
# pickle.dump(hashNetworkDict, outfile)
with open(tempfilename, 'wb') as outfile:
objDict = {obj.hashid: obj for obj in objlist}
pickle.dump(objDict, outfile)
def multiProcCostValue():
global nproc, filename
with open(filename, 'rb') as infile:
hashNetworkDict = pickle.load(infile)
topollist = list(hashNetworkDict.values())
N = len(topollist)
allProcs = []
for proc in range(nproc):
tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2)
inds = range(proc, N, nproc)
objlist = [topollist[i] for i in inds]
p = Process(target=updateCostValue, args=(objlist,proc,tempfilename))
p.start()
allProcs.append(p)
for a in allProcs:
a.join()
finalDict = {}
for proc in range(nproc):
tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2)
with open(tempfilename, 'rb') as infile:
hashNetworkDict = pickle.load(infile)
for h, obj in hashNetworkDict.items():
finalDict[h] = obj
with open(filename, 'wb') as outfile:
pickle.dump(finalDict, outfile)
for proc in range(nproc):
os.remove(filename[:-2] + '_proc%s.p'%str(proc).zfill(2))
def calAuctionScore():
global filename
print(filename)
if os.path.isfile(filename):
with open(filename, 'rb') as infile:
hashNetworkDict = pickle.load(infile)
# topollist = hashNetworkDict.values()
# if len(hashNetworkDict) < 1000:
# return
for k, topol in hashNetworkDict.items():
costdict = topol.costValueDict
maxtup = (0, 1000)
for mintup in [(500,501), (400,600)]:
if mintup in costdict:
topol.auctionscore += costdict[maxtup] - costdict[mintup]
# print(topol.auctionscore)
toplist = sorted(hashNetworkDict.values(), key = lambda x: x.auctionscore, reverse = True)[:10]
# print(filename, [e.auctionscore for e in toplist])
with open(filename[:-2] + '_top10.p', 'wb') as outfile:
pickle.dump(toplist, outfile)
with open(filename[:-2] + '_score.p', 'wb') as outfile:
pickle.dump(hashNetworkDict, outfile)
else:
return
def aggregate60Nodes():
for numfederates, numelements, edgedivider in [(4,20,7), (4,20,11), (4,20,3), (4,20,5)]:
filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates, edgedivider)
finalDict = {}
for proc in range(60):
tempfilename = filename[:-2] + '_proc%s.p'%str(proc).zfill(2)
with open(tempfilename, 'rb') as infile:
hashNetworkDict = pickle.load(infile)
for h, obj in list(hashNetworkDict.items()):
finalDict[h] = obj
with open(filename, 'wb') as outfile:
pickle.dump(finalDict, outfile)
def aggregateNetworks():
netlist = []
for (numfederates, numelements), edgedivider in list(fedeldensitylist):
filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d_top10.p'%(numelements, numfederates, edgedivider)
# if os.path.isfile(filename):
with open(filename, 'rb') as infile:
netlist.extend(pickle.load(infile))
hashNetDict = {net.hashid: net for net in netlist}
with open(dir_topologies + 'hashNetDict.p', 'wb') as outfile:
pickle.dump(hashNetDict, outfile)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This processed raw data of twitter.")
parser.add_argument('--nproc', type=int, default=3, help='cores on server')
parser.add_argument('--n', type=int, default=3, help='cores on server')
args = parser.parse_args()
argsdict = vars(args)
nproc = argsdict['nproc']
time = 0
# basecost = [0, 200, 400, 600, 800, 1000]
seedlist = list(range(0,500))
# for (numfederates, numelements), edgedivider in reversed(list(product([(2,10), (2,15), (3,15), (2,20), (3,20), (4,20)], [3,5,7,11]))):
aggregateNetworks()
# for (numfederates, numelements), edgedivider in list(fedeldensitylist):
# filename = dir_topologies + 'hashNetworkDict_elements%d_federates%d_density%d.p'%(numelements, numfederates, edgedivider)
# elementnames = ['e%d'%(i+1) for i in range(numelements)]
# createNetTopologies()
# multiProcCostValue()
# calAuctionScore() | 1.859375 | 2 |
Pyrado/pyrado/tasks/sequential.py | jacarvalho/SimuRLacra | 0 | 12797776 | <filename>Pyrado/pyrado/tasks/sequential.py
import numpy as np
from copy import deepcopy
from typing import Sequence
import pyrado
from pyrado.spaces.base import Space
from pyrado.utils.data_types import EnvSpec
from pyrado.tasks.base import Task
from pyrado.tasks.reward_functions import RewFcn
from pyrado.utils.input_output import print_cbt
class SequentialTasks(Task):
""" Task class for a sequence of tasks a.k.a. goals """
def __init__(self,
tasks: Sequence[Task],
start_idx: int = 0,
hold_rew_when_done: bool = False,
verbose: bool = False):
"""
Constructor
:param tasks: sequence of tasks a.k.a. goals, the order matters
:param start_idx: index of the task to start with, by default with the first one in the list
:param hold_rew_when_done: if `True` reward values for done tasks will be stored and added every step
:param verbose: print messages on task completion
.. note::
`hold_rew_when_done=True` only makes sense for positive rewards.
"""
self._tasks = deepcopy(tasks)
self._idx_curr = start_idx
self.succeeded_tasks = np.full(len(self), False, dtype=bool)
self.failed_tasks = np.full(len(self), False, dtype=bool)
self.succeeded_tasks[:start_idx] = True # check off tasks which are before the start task
self.hold_rew_when_done = hold_rew_when_done
if self.hold_rew_when_done:
self.held_rews = np.zeros(len(self))
self.verbose = verbose
def __len__(self) -> int:
return len(self._tasks)
@property
def env_spec(self) -> EnvSpec:
return self._tasks[0].env_spec # safe to assume that all tasks have the same env_spec
@property
def tasks(self) -> Sequence[Task]:
""" Get the list of tasks. """
return deepcopy(self._tasks)
@property
def idx_curr(self) -> int:
""" Get the index of the currently active task. """
return self._idx_curr
@idx_curr.setter
def idx_curr(self, idx: int):
""" Set the index of the currently active task. """
if not (0 <= idx < len(self)):
raise pyrado.ValueErr(given=idx, ge_constraint='0', le_constraint=f'{len(self) - 1}')
self._idx_curr = idx
@property
def state_des(self) -> np.ndarray:
""" Get the desired state the current task. """
return self._tasks[self._idx_curr].state_des
@state_des.setter
def state_des(self, state_des: np.ndarray):
""" Set the desired state the current task. """
if not isinstance(state_des, np.ndarray):
raise pyrado.TypeErr(given=state_des, expected_type=np.ndarray)
self._tasks[self._idx_curr].state_des = state_des
@property
def space_des(self) -> Space:
""" Get the desired space the current task. """
return self._tasks[self._idx_curr].space_des
@space_des.setter
def space_des(self, space_des: Space):
""" Set the desired space the current task. """
if not isinstance(space_des, Space):
raise pyrado.TypeErr(given=space_des, expected_type=Space)
self._tasks[self._idx_curr].space_des = space_des
@property
def rew_fcn(self) -> RewFcn:
""" Get the reward function of the current task. """
return self._tasks[self._idx_curr].rew_fcn
def step_rew(self, state: np.ndarray, act: np.ndarray, remaining_steps: int) -> float:
""" Get the step reward from the current task. """
step_rew = 0.
if self.hold_rew_when_done:
for i in range(len(self)):
# Iterate over previous tasks
if self.succeeded_tasks[i] or self.failed_tasks[i]:
# Add the last reward from every done task (also true for failed tasks)
step_rew += self.held_rews[i]
if not (self.succeeded_tasks[self._idx_curr] or self.failed_tasks[self._idx_curr]):
# Only give step reward if current sub-task is active
step_rew += self._tasks[self._idx_curr].step_rew(state, act, remaining_steps)
final_rew = self._is_curr_task_done(state, act, remaining_steps) # zero if the task is not done
# self.logger.add_value('successful tasks', self.successful_tasks)
return step_rew + final_rew
def compute_final_rew(self, state: np.ndarray, remaining_steps: int) -> float:
"""
Compute the reward / cost on task completion / fail of this task.
Since this task holds multiple sub-tasks, the final reward / cost is computed for them, too.
.. note::
The `ParallelTasks` class is not a subclass of `TaskWrapper`, i.e. this function only looks at the
immediate sub-tasks.
:param state: current state of the environment
:param remaining_steps: number of time steps left in the episode
:return: final reward of all sub-tasks
"""
sum_final_rew = 0.
for t in self._tasks:
sum_final_rew += t.compute_final_rew(state, remaining_steps)
return sum_final_rew
def reset(self, **kwargs):
""" Reset all tasks. """
self.idx_curr = 0
for s in self._tasks:
s.reset(**kwargs)
# Reset internal check list for done tasks
self.succeeded_tasks = np.full(len(self), False, dtype=bool)
self.failed_tasks = np.full(len(self), False, dtype=bool)
if 'start_idx' in kwargs:
self.succeeded_tasks[:kwargs['start_idx']] = True
# Reset the stored reward values for done tasks
if self.hold_rew_when_done:
self.held_rews = np.zeros(len(self)) # doesn't work with start_idx
def _is_curr_task_done(self,
state: np.ndarray,
act: np.ndarray,
remaining_steps: int,
verbose: bool = False) -> float:
"""
Check if the current task is done. If so, move to the next one and return the final reward of this task.
:param state: current state
:param act: current action
:param remaining_steps: number of time steps left in the episode
:param verbose: print messages on success or failure
:return: final return of the current subtask
"""
if not self.succeeded_tasks[self._idx_curr] and not self.failed_tasks[self._idx_curr] and self._tasks[self._idx_curr].is_done(state):
# Task has not been marked done yet, but is now done
if self._tasks[self._idx_curr].has_succeeded(state):
# Check off successfully completed task
self.succeeded_tasks[self._idx_curr] = True
if verbose:
print_cbt(f'task {self._idx_curr} has succeeded (is done) at state {state}', 'g')
elif self._tasks[self._idx_curr].has_failed(state):
# Check off unsuccessfully completed task
self.failed_tasks[self._idx_curr] = True
if verbose:
print_cbt(f'Task {self._idx_curr} has failed (is done) at state {state}', 'r')
else:
raise pyrado.ValueErr(msg=f'Task {self._idx_curr} neither succeeded or failed but is done!')
# Memorize current reward
if self.hold_rew_when_done:
self.held_rews[self._idx_curr] = self._tasks[self._idx_curr].step_rew(state, act, remaining_steps=0)
# Give a reward for completing the task defined by the task
task_final_rew = self._tasks[self._idx_curr].final_rew(state, remaining_steps)
# Advance to the next task
self.idx_curr = (self._idx_curr + 1) % len(self)
else:
task_final_rew = 0.
return task_final_rew
def has_succeeded(self, state: np.ndarray) -> bool:
"""
Check if this tasks is done. The SequentialTasks is successful if all sub-tasks are successful.
:param state: environments current state
:return: `True` if succeeded
"""
successful = np.all(self.succeeded_tasks)
if successful and self.verbose:
print_cbt(f'All {len(self)} sequential sub-tasks are done successfully', 'g')
return successful
| 2.546875 | 3 |
Python/strings/long2bin.py | ebouaziz/miscripts | 0 | 12797777 | <gh_stars>0
#!/usr/bin/env python2.7
# Deal with long integer encoded as binary strings
# sample long integer
l = (2**32-2)*(2**32-3)*(2**32-7)
# input long integer
print "%x" % l
# create a list
ls = []
while l:
# extract each byte of the long integer and store it into the list
bot = l&((1<<8)-1)
l >>= 8
ls.append(bot)
# pad the list up to 32 items
ls.extend([0]*(32-len(ls)))
# reverse the list
ls.reverse()
# build up output data (signature)
binstring = [chr(c) for c in ls]
# output debug string
print "(%d) %s" % (len(binstring), ''.join('%x' % c for c in ls))
| 3.484375 | 3 |
genetic_algorithm.py | Ziggareto/national_cipher_challenge | 0 | 12797778 | import basics
import random
import math
import matplotlib.pyplot as plt
import numpy as np
import cProfile
import pstats
import time
class node():
# Has a keyword that defines it's means ofo decrypting the text
# Can reproduce to make a mutated offspring
def __init__(self, key=None):
self.key = key
def reproduce(self):
pass
class algorithm():
#has a population of nodes with keywords, can breed to make offspring with random
#mutations/changes, can cull to select for best english scoring offspring
def __init__(self, text, population_size, breeding_times, node_class):
self.text = text
self.breeding_times = breeding_times # how many times each parent will breed
self.population_size = population_size
self.population = []
self.past_generations = []
self.complete_scores = [] # stores the complete score history
self.summary_scores = [] # stores min max mean median
self.node = node_class # stores the type of node
self.scorer = basics.ngram_score('english_trigrams.txt', 'english_quadgrams.txt')
self.base_score = self.scorer.score(basics.generate_random_text(len(text)))
self.english_score = self.scorer.score(basics.generate_english_text(len(text)))
self.cycles_count = 0
self.graphing = False #When turned on, cull() passes new scores to the graph
#self.initialize_population()
def initialize_graph(self):
self.graphing = True
self.ax = plt.gca()
if len(self.summary_scores) > 0:
start_num_points = len(self.summary_scores)
xdata = np.array([x for x in range(1, start_num_points)])
self.lines = [self.ax.plot(xdata, [score[num] for score in self.summary_scores])[0] for num in range(3)]
else:
self.lines = [self.ax.plot([], [])[0] for num in range(3)]
self.ax.relim()
self.ax.autoscale_view(True, True, True)
plt.ion()
plt.show()
def update_line(self, line, new_data):
#Given a line and new_data of the form [new_x, new_y], adds on the new values
line.set_xdata(np.append(line.get_xdata(), new_data[0]))
line.set_ydata(np.append(line.get_ydata(), new_data[1]))
def update_graph(self):
for num in range(len(self.lines)):
self.update_line(self.lines[num], [self.cycles_count, self.summary_scores[-1][num]])
self.ax.relim()
self.ax.autoscale_view(True, True, True)
plt.draw()
plt.pause(0.01)
def initialize_population(self):
# Initializes the population with size self.population, hopefully near to endpoint
pass
def score(self, my_node):
return(self.scorer.score(self.decrypt(self.text, my_node.key)))
def decrypt(self, text, key):
pass
def cycle(self, ntimes=1):
# Does ntimes cycles of breed and cull
for num in range(ntimes):
self.cycles_count += 1
self.breed()
self.cull()
def run_to_score(self, score):
# Keeps cycling until the latest population's mean score is greater than score
while True:
self.cycle()
if self.summary_scores[-1][2] > score:
break
def turnover(self, breeding_times, num_cycles_small, num_cycles_large, cullsize):
for num in range(num_cycles_large):
for num in range(num_cycles_small):
self.breed(size=len(self.population), times=breeding_times)
self.cull(size=cullsize)
def breed(self, size=None, times=None):
"""Replaces self.population with a whole load of newly bred offspring, randomly
selecting who pairs with who"""
if size == None:
size = self.population_size
if times == None:
times = self.breeding_times
self.offspring = []
for pop_num in range(size):
for breed_num in range(times):
self.offspring.append(self.population[pop_num].reproduce())
# archive the parent generation, make the new population the offspring.
self.past_generations.append(self.population)
self.population = self.offspring
def cull(self, size=None):
#size is the final size (post culling) of the population
if size == None:
size = self.population_size
"""Removes the bottom scorers of the population until the population fits
population_size"""
# From each node in population we get [node_index, node_score] in population_ranking
population_ranking = [[x, self.score(self.population[x])] for x in \
range(len(self.population))]
population_ranking.sort(key=lambda x: x[1]) # sort by score from lowest to highest
# The new population is the top population_size guys as ranked
# x[0] is the index of the node
self.population = [self.population[x[0]] for x in population_ranking[-size:]]
# The actual scores, with the same indices as their node counterparts in population
self.ranking = [x[1] for x in population_ranking[-size:]]
#score keeping
self.complete_scores.append(self.ranking)
botpercentile = self.ranking[math.floor(0.05*size)]
toppercentile = self.ranking[math.floor(0.95*size)]
median = self.ranking[math.ceil(size/2)]
self.summary_scores.append([botpercentile, median, toppercentile])
# if graphing is turned on, send the new data to the graph
if self.graphing == True:
self.update_graph()
class genetic_algorithm(algorithm):
def breed(self, size=None, times=None):
"""Replaces self.population with a whole load of newly bred offspring, randomly
selecting who pairs with who"""
if size == None:
size = self.population_size
if times == None:
times = self.breeding_times
self.offspring = []
# 0 will increment each time a node breeds, until it reaches breeding_times
available = [[x, 0] for x in self.population] # who is left available
while True:
# take the first node in available as the base, breed them with random partners
# in available, then remove first node from available
# range(...) ensures we breed the right number of times
for breed_count in range(available[0][1], self.breeding_times):
try: # try to choose a partner from those in available
choice = random.choice(available[1:])
except IndexError: #Sometimes the last guy gets left out
#print('ruh roh')
choice = [random.choice(self.population), -1]
# breed with the chosen partner
self.offspring.append(available[0][0].reproduce(choice[0]))
# increase the partner's breed count by one
choice[1] += 1
# if the partner's bred the requisite number of times, remove them from available
if choice[1] == self.breeding_times:
available.remove(choice)
# remove our start node from available
del(available[0])
# if everyone's bred, break the loop
if len(available) == 0:
break
# archive the parent generation, make the new population the offspring.
self.past_generations.append(self.population)
self.population = self.offspring
def algorithm_avg_time(n, score, algorithm, *args, graphing=False, **kwargs):
"""Makes n algorithms, returns the avg time for them to run to a score, given"""
algorithms = []
for num in range(n):
algorithms.append(algorithm(*args, **kwargs))
if graphing:
algorithms[-1].initialize_graph()
prof = cProfile.Profile()
for num in range(n):
print('{0} out of {1}:'.format(num+1, n), end='')
prof.runctx('algorithms[num].run_to_score(score)', globals(), locals())
if graphing:
for line in algorithms[num].lines:
line.remove()
stats = pstats.Stats()
stats.add(prof)
return(stats)
| 3.3125 | 3 |
nmutils/gui/ptychoViewer/design_rc.py | alexbjorling/nanomax-analysis-utils | 3 | 12797779 | # -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.12.5)
#
# WARNING! All changes made in this file will be lost!
# from PyQt5 import QtCore
from silx.gui import qt as QtCore
qt_resource_data = b"\
\x00\x00\x19\x3d\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x32\x00\x00\x00\x32\x08\x06\x00\x00\x00\x1e\x3f\x88\xb1\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xe3\x01\x1e\x09\x34\x07\xfb\x3d\x97\x4b\x00\x00\x00\x1d\x69\x54\
\x58\x74\x43\x6f\x6d\x6d\x65\x6e\x74\x00\x00\x00\x00\x00\x43\x72\
\x65\x61\x74\x65\x64\x20\x77\x69\x74\x68\x20\x47\x49\x4d\x50\x64\
\x2e\x65\x07\x00\x00\x18\xa1\x49\x44\x41\x54\x68\xde\x35\x99\x67\
\x7c\x55\x55\xfa\xb6\xaf\xbd\x4f\x4b\x4e\x7a\x25\x09\x25\x09\x09\
\x1d\x44\x91\x26\x8a\xe8\x20\x4a\x71\x44\x01\xb1\x2b\x16\xb0\xcc\
\x60\xc1\xae\xef\x08\x63\x77\xfe\x3a\xa2\xa0\x88\x0c\x16\xac\x88\
\x0a\xa2\xa0\x02\x4a\x13\x51\x40\x40\x7a\x42\x4d\x02\x49\x48\x2f\
\x27\x27\xa7\xee\xfb\xfd\xb0\xb7\x9f\xf2\xcb\x29\xfb\xac\x67\xad\
\xfb\xb9\xcb\x7a\x8c\xbc\xe8\x70\xcd\xef\xdc\xc1\x75\x07\x80\x10\
\x3c\xd0\x7b\x04\x1b\xbb\xee\x66\xcf\xe6\x28\xae\x19\x49\x30\xa6\
\x83\xfa\x39\x30\xe3\x1c\xe8\x0d\x0c\xee\xf0\x32\xe3\xd9\x08\x2c\
\xc9\x83\x48\x36\x0c\x38\x08\xe3\xc5\xee\x3b\xe0\x9b\x42\xd8\x0d\
\x7c\xb0\x03\xd2\xbe\x06\xf7\x27\x5d\xe0\xf4\x28\x70\xf9\x20\xe5\
\x04\x47\x56\xed\xa0\x6a\x84\x49\x69\xd8\x22\x73\x85\x9b\xb4\xc7\
\xa6\x42\x73\x6f\x02\x23\x9b\x49\x78\x66\x09\xee\x6f\xc3\x84\xa3\
\x26\xbe\x7d\x97\x62\xb5\xe7\xa3\x59\x3f\x62\xae\xf6\xf3\xf6\xab\
\x11\xae\x5f\x94\x8f\x7b\xe5\x85\x7c\x78\xaf\x9f\xa9\x89\xaf\x52\
\x7b\xf6\x3a\x1a\xc9\x67\xe0\xb7\x43\xd9\xfe\xf7\xcf\x30\xbf\x6b\
\xde\xcf\xdf\x6a\x81\xb9\x40\x03\x6c\xe9\x7a\x88\x7f\x54\x47\x71\
\x7d\x0d\x9c\xee\x0f\x87\xa1\x79\x3f\x1c\xc7\xcf\x50\x60\xc6\xbf\
\x2c\xf8\x4f\x7f\x68\x1e\x00\x1d\xbd\xe0\x60\x0a\x08\xb2\x42\x70\
\x3e\xb0\x28\x0c\x49\x19\x10\x9e\x0a\x34\x74\x03\x6e\x82\xf8\xc3\
\xd0\xf2\x02\x7d\x2f\x99\xc1\xa8\x45\x09\x14\xd7\x42\x5a\x38\x0e\
\x1d\x6d\x44\x18\x88\xf7\xb7\x97\x30\x27\x7e\x47\xa8\xde\xc0\xb7\
\x29\x87\xd6\xa3\x23\xd0\xa9\x2c\x5c\x8f\x4d\xc0\x38\xe4\xa1\xf7\
\xeb\x29\xf0\x99\x8b\xe3\xea\x46\x60\xed\x83\x64\x86\xfb\x91\x1c\
\x4d\xa7\xa6\xc5\xa0\xe0\x64\x22\x93\xdf\x9a\x8b\x59\x19\x8c\x92\
\xb8\x0d\xc8\x81\x78\x14\xc2\x64\x91\x91\x68\xc2\x56\x17\xb8\xce\
\x62\x8d\x83\x0d\x37\xc1\x68\x82\xdc\x70\x77\x02\x2c\xcc\x05\xf2\
\xc1\x30\xe0\x9c\xcd\x1c\x7b\xb3\x8d\xde\xcf\xc2\xae\x3e\xb0\x0b\
\x88\xb9\xc1\x9d\x03\x81\x81\xc0\xd3\xfb\xc1\x3c\x03\xec\x43\x44\
\x11\xb3\x49\x9a\xf3\x24\x7c\x9c\x45\x6c\x8c\xa0\xcb\xef\xb8\xe8\
\x0a\x94\x63\x1a\x3e\x7c\x5d\x93\xd1\xf8\x16\xd2\x9a\x9b\x70\x75\
\xf6\x84\x68\x6f\x28\x3d\xc3\xd0\x4b\xcb\xc9\x8c\x2c\x62\xdf\xb2\
\x9d\xdc\xb2\xfb\x08\x1e\xef\x11\x16\x56\x26\x71\xdb\xb1\x5f\xf8\
\x57\x82\x87\x65\x56\x0e\x2c\xd9\xde\x45\x5a\x81\x74\x27\x6a\xac\
\x43\x17\xcb\xa3\xd8\xa3\x48\xd9\xf9\xd2\xe0\x2e\xda\x7d\x08\x0d\
\x94\x57\x7f\x84\x90\x0a\x3d\x52\x62\xa2\x94\x9a\x2c\x0d\x34\x54\
\x7f\x00\x5d\x27\xf4\x80\xd0\x0f\x42\xaf\x0b\xad\x15\xaa\x8d\xa3\
\xaf\x84\xaa\x02\x28\x7e\xef\x00\x89\xe7\xd5\x41\x95\x9a\x39\x25\
\x8b\x8d\xb2\xcc\x0f\x14\xfc\x0d\xc5\x6f\xf0\x49\xcc\x96\xd8\xa1\
\x28\x87\xa4\xbf\x77\x97\x26\xa3\x76\x36\xa9\x9c\x46\x9d\xa0\x42\
\xed\x4f\xa5\xc9\x1a\xd8\x4f\x07\x8d\x66\x3d\x52\x70\x4c\x4a\xfe\
\x51\x67\xb6\xf8\xb4\xe3\xdc\x1b\x34\x3f\x7f\xa9\x46\x0e\x8d\xeb\
\x92\xfc\x72\x99\x59\xde\x36\x82\x35\xc0\x71\xc8\xf8\x16\x26\x07\
\xa2\xb8\x3c\x1e\x20\x09\xc6\x9d\x65\x75\x29\xcc\xa9\x8f\x30\xe4\
\x66\xa0\x2a\x19\x22\x3e\x88\x87\x69\xb9\x5e\xfc\x38\x00\xae\x06\
\x1e\x04\x2e\x8c\xdb\xd0\x3a\x5f\x90\x6c\xc0\x84\x38\x74\xab\x07\
\x1e\x39\x88\xc6\xfe\x17\x3f\x2f\x93\xce\x12\x0c\xda\x31\xac\x3c\
\x12\x67\x8f\x20\x3e\x35\x0c\x1c\x00\x22\x18\xf8\xb0\x36\xf6\x83\
\xc4\x44\x7c\x74\x12\x25\x4e\xb1\xf9\x09\xc9\x2b\xf2\xe0\xf0\xcd\
\xf4\x30\x3b\x99\x68\xee\x87\x0b\xc6\x53\xf0\xa6\x20\xdf\x4b\x81\
\x37\x99\xe2\x16\x93\x7b\x3c\x07\x31\x17\x0e\xe9\x44\x61\x60\x00\
\x18\xc5\x30\x39\x0e\x74\x58\xd0\xe5\x04\xc1\x5b\xa1\xd5\x03\x33\
\x16\x00\x1b\x92\x41\x01\x48\x6d\xe5\xf4\xdc\x28\xf7\x3c\x0d\x1b\
\x80\x33\x67\xa0\xe8\x13\x48\xfe\x10\xfa\x76\x40\xb3\x01\x9e\xf5\
\x90\x38\x2a\x17\x96\x65\x63\x5a\x10\xfb\x5f\x13\xfc\xed\x23\xa0\
\x02\x38\x06\x04\x60\xdf\x38\x4c\xd3\x0b\x7d\x77\x03\xc2\x45\x3b\
\x66\xa0\x1f\x84\x3a\xf1\x70\x9c\x46\x92\x88\x64\x9c\x02\xab\x15\
\xd1\x04\xf1\x54\x0a\x4f\x0f\x85\x13\x82\xba\x22\x16\x1d\x9b\x42\
\x63\xd5\xc5\xb4\x9b\x01\xc2\xed\x43\xe1\x93\x76\xd4\xb1\x12\xe9\
\x69\x74\xa8\x19\x6d\xaf\x40\xba\x18\x59\x43\xd0\xd3\x21\x54\xbf\
\x18\xc9\x9f\x2a\xb9\xfc\x52\x8a\xa9\x8a\x15\x86\xae\x10\xfa\x57\
\x0c\x35\xcd\x46\x56\x4f\xa4\x81\x09\x52\x41\x9a\xa2\x8b\xd0\x71\
\xa1\xf8\xcd\x3e\x89\xc7\x25\x56\x4a\x3d\x67\x4b\xcb\x90\x36\x21\
\xdd\xe4\x96\x98\x22\xf1\x9e\xc4\x42\x89\xc9\xd2\xa7\x48\x2c\x95\
\x28\x73\xe0\x85\x2c\x5e\x56\x05\x96\xda\x78\x45\x4a\x19\xae\x06\
\xa4\x2a\x1a\x14\x4b\xb9\x43\xba\x20\x59\x61\xe3\x65\xfd\xb3\x58\
\x7a\xce\x1f\xd6\xdb\xee\x23\xda\x53\x34\x5f\x9c\xe8\x40\x0d\xfb\
\x90\x1e\x42\x7f\xb6\x23\x6b\x13\xd2\xf9\xa6\xc2\xff\x41\x95\xeb\
\x90\xd2\x3d\x92\xd1\x53\x2a\x71\xab\x6a\x13\xfa\x8f\xd0\xea\xb3\
\x28\x3a\x16\x89\xf3\x24\x6e\x92\xb8\x56\x62\x92\x42\x4f\xb8\x55\
\x65\x21\xdd\x80\xc4\x1d\x12\x1f\x49\xfc\x21\x99\x9f\x4b\x85\x57\
\x4b\x6b\x90\x26\xba\x25\xde\x95\xc5\x59\x89\x72\x69\x46\x0f\xa9\
\x70\x8c\xe2\x54\x2a\x4a\x48\x3a\xb7\x97\xc4\x1a\x95\x63\x29\x98\
\xfb\xb0\xe4\x9a\xa5\x83\x58\x6a\x64\x9b\xe4\x2f\x52\x83\xf7\x2b\
\xad\xa1\x55\xe3\x86\x48\x1f\x12\x50\xa5\x7b\xad\xf4\xb8\x21\x2a\
\x6a\xd1\xce\x2a\xa4\xb5\x68\xb3\xec\x82\xac\x6b\xd1\x9a\x18\xd2\
\x55\x48\xd9\x48\x69\x86\xaa\x3f\x44\xff\x16\x3a\xb5\xc6\x94\x92\
\xd2\x25\x86\x38\x8b\xbd\x47\x32\x6e\x56\xe4\xca\x62\x9d\x6a\x41\
\x9b\x84\x82\x3f\x20\x25\x96\x38\xef\x7f\x26\xb1\x4c\xe2\x5b\x29\
\xfd\x46\x69\x2b\xd2\x85\xa5\x12\x6f\x49\x6c\x96\x78\x43\x7a\xd8\
\x23\xf1\xaa\xc4\x26\x69\xd8\xc5\x12\x1f\xa9\x91\x98\xe4\xba\x5d\
\x71\xf6\xa9\x81\xb0\xc2\x5c\x2b\xb9\x6e\xd1\x7a\xc2\xba\x17\xe9\
\x3b\x62\x6a\xf0\xbd\x22\x0d\xf5\xab\x82\x45\x22\xb2\x1b\x55\xef\
\x40\x91\x25\xe8\xcb\x38\x52\x11\xd2\x14\xa4\x9b\x4c\xc9\x6b\x4a\
\x49\x1e\x55\x2c\x43\x5f\x06\x91\xee\x43\x72\x17\x4a\xfc\x4d\x62\
\x92\x64\x9e\x27\x2b\xbb\x50\x81\x95\xe8\x47\x0b\xad\x11\xfa\x46\
\xe8\x90\x90\x16\x20\x65\xa4\x49\xdc\xee\x2c\xf2\x6b\x89\xef\xa4\
\xec\xcb\xa5\xf7\x0d\xa9\xa0\xbf\xc4\xff\x24\xf6\x4b\xbd\x2f\x95\
\xf2\x87\x4b\xfc\x2c\x15\x3d\x28\x31\x47\x61\x02\x12\x0f\xa9\x93\
\x5a\x9d\xa6\x46\x32\x27\xa9\x9a\x4a\x7d\x8d\xb4\x84\x98\x54\x38\
\x5e\xe2\x62\x45\xf8\x42\xaf\x13\x93\xa1\xc7\x11\xdd\x40\xa3\x20\
\xbe\x07\xdc\x33\x93\xc0\x17\x81\xd0\x00\xf0\x1d\x22\xb8\x3c\x82\
\xd5\x08\xc9\xff\x01\xca\x52\x80\x9e\xe0\xe9\xc0\xba\xfe\x34\x07\
\x66\x84\x38\x3a\x0a\x48\x80\x0e\x5b\x5d\x70\x01\x05\x40\x7e\x0c\
\x52\x6a\xc0\xfc\xb7\x1b\x3e\x1a\x01\x91\x99\x40\x37\xa0\x0e\x92\
\x0e\xc3\x9d\xcf\xc3\xfb\xf9\xd0\xfe\x36\x10\x83\x5e\xf7\xc3\xd1\
\x6f\x80\x0e\x2c\x52\x30\xe9\x0e\xfc\x8f\x0a\x9e\x24\x42\x23\x99\
\x74\x52\x46\x37\x06\xf3\x13\x49\x7c\x03\xc6\x25\x9c\xd2\x60\xd6\
\x53\x42\x12\x60\x54\xfd\x82\xaa\xfa\xc2\xd6\x4c\x98\xbd\x15\x12\
\xe7\xb9\xc0\x63\x41\xc8\x84\xac\x38\x24\x00\xbf\x00\x5d\x81\x01\
\x06\x8d\x17\x19\x6c\xb8\xdc\xe2\x54\x81\xfd\x52\x9f\xd3\x90\x17\
\x86\xf6\x1c\xd8\x92\x0a\x85\xc0\xc5\xe5\xe0\x8d\x81\x2b\x05\x94\
\x01\x91\xd3\x90\xb8\x20\x17\xde\xeb\x0f\xa1\xa1\xc0\x40\xe0\x0f\
\x18\xbe\x1c\xf6\x0f\x86\xce\x47\x81\x1a\x9b\x3a\x69\x02\xb6\x42\
\xca\x6e\x08\x05\x89\x45\xd7\x53\x8b\x81\x17\x93\x6c\x7e\xc0\x74\
\x7d\x4c\x34\x3e\x8f\x43\x14\x73\x12\x17\xa9\x40\x42\xfa\x19\x8c\
\x52\x19\xca\x05\xba\x20\x2e\x06\xee\x3a\x0c\xfe\x76\x30\xbf\x04\
\x3e\x07\x32\x80\xcb\xa0\xf9\x2a\xd8\x71\x11\xb4\x87\xa1\xdf\x09\
\x83\x82\xed\x90\xb1\x0a\xd8\x9e\x08\xa6\x05\xe3\xc3\xfc\xf4\xb1\
\x18\x14\x85\xdc\x1b\x80\x03\x06\x74\x26\xa2\xe2\x04\x3a\x1f\x69\
\xc7\xec\x1b\xc3\xdd\x28\x5c\x8b\x4d\x8c\xd5\xbd\xa1\xf1\x6a\xa0\
\x18\x68\x05\xb2\x80\x1e\xc0\x66\x48\xff\x11\x46\xee\x04\x3f\xf0\
\xdd\xe5\x28\xf2\x36\x27\x29\xc1\x02\x0a\x68\x04\xc2\xb4\x51\xc0\
\x29\xa0\xbb\x51\x49\x81\xf1\x14\x46\x49\x03\xc6\x45\x42\x5d\x81\
\x66\xc0\x04\x6e\x06\xa6\x6e\x80\x84\xc7\x9d\xcd\x99\x0a\xca\x85\
\x78\x1d\xb8\xd7\x02\x95\x69\xd0\x91\xe5\x54\x98\x68\x0b\x27\x89\
\x58\x0f\xfc\xc0\xb6\xf9\x21\x2e\x3a\x08\xc6\x38\x2f\xd4\x74\x07\
\x86\x01\x17\x01\x67\xc0\xa8\x85\x9e\xab\x88\xdd\xd6\x8c\xbb\x05\
\xd8\xe9\x81\x5f\xde\x06\x75\x01\x0a\xc1\x58\x05\xd7\x3e\x0b\x97\
\xc6\xed\x47\xee\x02\xfe\x04\x36\x4f\x27\xca\x62\x56\x92\x4e\x77\
\x1b\xd8\xa4\xb2\x01\x1f\x75\x98\xf9\x8f\x42\xcd\x1c\x5a\xcc\xe1\
\xb8\x83\x98\x44\xb0\x68\x02\x7c\xc0\x1e\xe0\x9a\x74\xa0\x08\xa8\
\x05\x16\x79\x30\x82\x83\x70\xd3\x08\xc9\x95\xd0\x91\xe8\x14\x60\
\x02\xe9\xf6\x8e\xfa\x0e\x50\x7b\x67\x88\x46\x20\x94\x05\x89\x7d\
\xa3\x50\xd3\xe2\x7c\xae\x0b\x60\x80\x4a\xe0\xf8\x60\xdc\xcf\x54\
\xd9\x4d\x45\x1c\xf8\xc2\xfe\x21\xcf\x09\x98\xf9\x13\x5c\xe9\x3c\
\x36\xd9\xf9\xea\x70\xc0\xb7\x06\xf7\xba\xc5\x0c\xe6\x01\xd2\x71\
\x93\x42\x94\x44\x0e\x63\x31\x91\xfa\x9a\x67\x68\xe7\x26\x16\x58\
\xc9\xb8\x5d\xce\x77\xe5\x34\xea\x21\x60\x7b\x29\x8c\xbd\x0c\xc8\
\x05\xd6\x47\xa1\xf2\x28\x14\xc3\xe9\x3b\x0c\x62\x45\xb5\xc4\x32\
\x6a\x09\xf9\x21\xec\x03\x9f\x0f\x3c\xfd\xa0\xc6\x03\x41\xa0\x3c\
\x0f\x0a\xbf\x12\xe9\x5b\x1a\x61\xc3\x17\xf0\xe7\xaf\xd0\x2c\xa8\
\x2e\x80\xa6\x49\xc0\x14\xec\x4f\x36\x00\x27\x81\xef\x60\x76\x0d\
\xfc\xd3\x81\x45\x14\x38\x88\xcd\x1e\x06\xe0\x8b\x62\x10\x26\x8f\
\x18\xb5\x24\xd0\x8c\x41\x3a\xb3\x39\x0c\x84\x28\xa1\x37\x70\x15\
\xe0\x6e\xc0\xc4\x87\x45\xcc\x39\x91\x62\x20\x25\xe8\x3c\x34\x05\
\x88\x79\xc0\xf4\xc2\xe8\x36\x22\xb7\x59\x54\x03\x1d\x06\xb8\xbd\
\xa0\x64\x68\xf4\xd8\x20\xf3\x3b\x48\xf7\x01\x4d\x09\x10\xbd\x10\
\x32\x87\x05\x09\xb9\xcb\xf0\x76\x82\xe7\x87\x72\x98\xb7\x1b\x6a\
\x9f\x71\x8e\x7b\xa7\x8d\x9d\x8b\x6a\x60\x2c\xd0\xe2\xf4\x7b\xb2\
\x53\x40\xa7\x43\x83\x56\x04\xa8\xc7\xcf\x59\xb2\x30\x38\x8b\x9f\
\x6c\x4e\x72\x3e\x71\x92\x8c\xf7\x71\xa9\x1b\x85\x0c\xc4\x5d\x48\
\x0c\x37\x50\xee\xb4\xdd\x2c\x60\xf8\x77\xc0\x07\xb6\x25\x22\x16\
\xb5\x51\xb0\xb4\x88\x9e\x1f\xb6\xd3\xd3\x48\x00\xb7\x0f\x92\x5b\
\xe1\xfc\x5a\x62\x33\xe1\xf4\x14\xfb\x24\x3d\x40\xe9\x72\x70\xcf\
\x4d\x83\x53\xbd\x21\x9e\x47\x12\x9d\xe0\x09\x40\x34\x19\x62\xfd\
\x9c\x9e\x6a\x06\x32\x81\xc1\x50\x57\x85\x7a\x74\x80\x1b\x8c\x6c\
\xe7\xad\x0c\xc0\xed\xc0\xab\xdc\x07\x64\xe0\x61\x0f\xd9\xe4\x13\
\xe7\x02\xdc\x34\x92\x4a\x10\x94\x0f\x74\xe0\x36\x77\x61\xf8\x94\
\xa3\xb1\xd4\x13\x02\xfa\x02\xcf\x34\x42\x97\xbb\x80\xad\x29\x30\
\xb9\x9d\xce\xfb\x21\xbc\x07\xd2\x3f\x32\xa0\x5c\x50\xef\x03\x2b\
\x05\xa2\x3d\xed\x10\x43\x0b\x3c\xb7\x8d\xdf\x9f\x86\xf4\x3a\xe8\
\x33\xd5\x80\x6d\x53\x81\xee\x0e\x36\xdd\x0e\x5e\x92\x9d\xf3\x8e\
\xd9\xcd\x4f\x9e\xa3\x38\x1f\xc3\xa2\xb7\xd1\x35\x60\xec\x75\x5a\
\x27\xe4\x6c\x62\x05\x30\x6f\x18\x58\xf7\x01\xa3\x09\x90\x88\x89\
\x9f\x1d\x24\x53\x80\x9b\x16\x44\x2a\x06\xed\x80\x2b\x6f\x5e\x74\
\xde\xb9\x58\xcc\x04\xae\x6b\x83\xbc\x23\xce\xf3\x7f\x8b\xc0\x2e\
\x2f\x9e\xb4\x38\x5b\x67\xc0\xaf\x77\x43\xcb\x3f\x40\x17\xc5\x49\
\x09\x86\x31\xab\x23\x10\x8a\x80\x51\x8b\x75\x63\x3b\x4d\x43\x20\
\x7b\x0f\x24\xaf\x32\xe0\xac\x1b\x38\x0d\xb4\x81\xcf\x05\x39\x35\
\x30\xf8\x28\xf4\xf9\x12\x9a\x16\x40\xc4\x0b\x1a\x0d\x94\xd8\x05\
\x46\x57\x60\xf4\xb4\x20\xd5\xd6\x4b\x92\x81\x6d\xc0\xd2\x12\x08\
\x3c\xe7\x54\xe6\xc3\xcd\x29\x22\x18\x94\xf2\x3d\x09\xf8\x68\x24\
\x15\xe1\xe6\x00\x60\x3c\x2e\x34\x06\x98\xb0\xd7\xc1\xe9\x61\x60\
\x08\x34\x64\x40\xd6\x63\x60\xec\x02\xfc\x49\x58\x53\x23\xec\xb8\
\x39\xca\xee\x7e\x10\x31\xe1\xbc\x6a\x28\xfd\x15\x92\xc3\x70\x70\
\x3a\x54\x7b\xa0\x67\x08\x06\x6c\x00\xef\xcf\x6e\x62\xbd\x5d\x44\
\x0b\xc3\x44\x4a\x21\xad\x19\x8c\x9f\xbc\xf0\x4e\x1a\x54\x5e\x65\
\x0b\x13\x5d\x9c\x86\xef\x07\xde\xb7\x20\xfb\x10\x3c\xbc\x0d\x06\
\xc7\x61\x25\xb0\x22\x07\xea\x9e\xb0\x29\xc8\xa8\x05\x8d\x27\x48\
\x29\x1e\xca\x70\x73\x14\xa3\xcf\xab\xd0\xd2\x0b\xac\x54\x1a\x1a\
\x5e\xc2\x68\x8e\xa3\x0a\x13\x06\xaf\xb7\xc5\x96\xdf\x6d\x24\xd4\
\x7f\x0c\x1b\x7d\x30\xfd\x59\xe0\x95\x3c\x9b\x3e\x53\x92\x60\xc8\
\x4e\x8e\x7e\xda\xcc\xa6\x02\x1b\x30\xa5\x0e\xc1\xa4\x01\xed\x40\
\x7f\xc0\x72\x40\x97\xb2\x1d\xdc\x73\xbd\x18\xdb\x87\x43\x60\x18\
\xf6\xf5\x45\xa1\x73\x5a\x47\x1c\x85\x1f\x07\xb8\x11\x11\x0c\x63\
\x09\x5c\xf1\x5f\xd8\x52\x02\xc1\x73\x81\x09\x58\x4c\xc4\x34\x1f\
\x02\xb7\x0b\x2b\xf2\x10\x15\xf4\x27\x07\x51\xe5\x3a\x4b\xbf\xd4\
\x67\x20\xb0\x0d\x3c\x60\xe8\x35\xd4\x36\x13\x52\xd7\x39\xc7\x9a\
\x0a\xbc\x66\x6f\xc4\xe6\x75\xb0\x3c\x03\x1e\x5d\x09\xc5\x37\x26\
\x43\xa8\x14\xe8\x05\xae\xc3\x30\xb1\x8c\xda\xd7\xa2\xfc\xd6\xcb\
\xee\xcf\x7c\xa7\x80\x11\x0d\x90\xf5\x36\xf0\x41\x3e\x9c\xfc\xbb\
\xb3\x78\xb7\xf3\x37\x08\x29\xef\x40\x7b\x8a\x53\x72\x7f\xe0\x38\
\xe2\x7a\x0c\x42\xb6\xbe\xf8\x1f\x86\x60\x0c\xb8\x11\xc8\x43\x0c\
\x25\x46\x04\x8f\x71\x2d\xf4\xad\x27\x7c\xf8\x67\x5a\xc8\x23\x86\
\xa8\xc5\xc0\xed\xde\xce\xe0\x41\xf7\x81\x3e\x46\x65\x01\x14\x38\
\x81\x74\x1c\x6d\x17\xea\xfc\x1a\x29\x0d\x69\x80\x5b\x07\x4e\xa1\
\xfb\x85\xaa\xca\x90\xce\xc3\x0e\x58\x8c\x95\x98\x29\x65\x75\x51\
\x60\x17\xfa\x40\x68\x95\x50\x5d\x25\xd2\x39\xa9\x12\xe3\x24\x1e\
\x96\x78\x59\xe2\xff\x6c\x07\x3c\x7c\xac\xf4\x32\x52\xde\x60\x89\
\x47\x25\x5e\x93\xf2\xc6\x49\xfe\xcb\x24\x96\x4b\x3c\x23\xf1\x93\
\x94\x76\x8f\x64\x0e\x74\x2c\xfe\xcb\x12\xdf\xe8\x18\x52\x9c\x46\
\x29\x65\x9c\x54\x92\x24\xb1\x54\xad\x9c\x50\x35\x31\xad\x45\x6a\
\x20\x2e\x93\xad\xd0\x1e\x87\xa4\xa3\xf6\x49\xe4\xd4\xc1\x67\x57\
\x02\x8f\x03\xc7\x0b\x18\xd0\x2f\x9f\xe9\x47\xe0\xa5\xde\xf0\xc7\
\x3a\x60\x4c\xf8\x2f\xb5\x80\x40\x1b\x49\x6b\xa1\xaf\x20\x2f\x0a\
\x39\xb7\x02\xfb\x66\x01\xf7\x39\xb2\x1c\x85\xe4\x23\xe8\xa3\x65\
\xf0\xf0\x4f\xf0\xc2\x30\xa8\x9d\x65\xbf\x97\xbf\x0e\xd2\x76\x42\
\xf0\x5e\x70\x1d\x87\x82\x57\x6c\x80\x06\x93\x20\x2b\x44\x8c\xbe\
\x88\x5c\x70\x2f\x24\x9d\x3a\x76\x91\x49\x53\xfb\x67\x50\x97\x06\
\xa3\xee\x23\xd5\x5c\x00\xec\xe0\x24\xb0\x18\x13\x53\x33\xe1\x50\
\x2a\xc4\xdb\xed\x93\xef\xb1\x11\x2a\x3c\xd0\x7e\x1d\x30\xa8\x0a\
\x3a\xdd\x8c\x98\xe5\x61\x58\x1b\x7c\x92\x0d\xdf\x7f\x19\x87\x3b\
\x37\x41\xd6\xef\x90\x17\xc1\x2a\xb6\x59\xb2\x7b\x3b\x70\xc8\x07\
\xc6\x01\x60\x37\xa4\x7d\x41\x7c\xee\xeb\xb4\xfc\xb1\x14\xce\xc6\
\x61\x76\x16\xb4\x5f\x64\xab\x6c\xea\x7a\x98\xb3\x01\x4e\xcc\xb1\
\xbb\xac\x74\x83\xcd\x54\x64\x43\xef\x03\x70\xfe\x59\xe2\xb4\x12\
\x66\x02\x8c\xf8\x89\xac\xcc\x7b\x71\x11\x63\x1f\x59\xd4\xb4\x6f\
\x87\xce\x0c\x78\xe0\x1d\xba\x78\xdf\x60\x32\x27\xed\xaf\xea\x2c\
\x7a\x44\xa8\x79\x0b\xd2\x6c\xa4\xbb\xd1\x67\x31\xb4\x40\x48\xaf\
\x21\xf5\x44\x32\x0b\xa5\x9b\x4c\xad\xb0\xd0\xb3\x42\x2b\x85\x1a\
\xf6\x20\xbd\x88\xda\x8f\xa1\x4f\x84\x76\x75\x22\xcd\x44\x1a\x8d\
\x74\x77\x92\x6a\x8e\xd8\x01\xcb\x9a\xed\x95\xdc\xa5\x12\xb3\x24\
\x5e\xb7\x21\xf7\x86\x5b\x9a\xd4\xdd\xce\xf4\xec\x95\xa6\xb9\xa5\
\xbc\xa1\xb2\x08\x48\x7d\xc7\x4b\xf3\x3d\x92\xb1\x51\x41\x24\x15\
\x0c\x94\x6e\xf1\xaa\xd1\xa8\xd3\x7e\xa4\xbd\xc8\x4e\x9b\xf7\x64\
\x49\x77\x23\x0d\xef\xa3\x33\x54\xca\x0c\xb7\x9b\x04\x01\xcb\xeb\
\x58\x9f\x6f\x0d\x4a\xf6\xc0\x0a\xa0\x75\xa2\x23\xc4\x78\xe1\x93\
\x3c\xa6\xfd\xdb\x66\xa9\xe3\xc0\x67\xe7\xc2\x2f\x4f\xc2\xbe\x12\
\x9b\x1f\x9a\x12\xa0\x7a\x11\x34\xac\x87\x23\xef\x74\xe0\xcd\x84\
\x7e\xd7\x27\x63\x2c\x98\x06\xb1\x09\xf6\xce\x1b\xf5\xf0\xc8\x12\
\x08\xc5\x61\xcd\x9d\x0e\xe7\x6d\x86\x09\x31\xa8\x1d\x83\x41\x10\
\xda\xf2\x6c\x37\x61\x96\x11\x02\x38\x3b\x01\x3c\x11\xd2\x53\xbe\
\xc7\x72\x7e\x7b\x3b\x13\x89\xbf\xf3\x03\x54\xe6\x41\xef\x32\x0a\
\x86\xf5\xc0\xd4\x29\x11\x00\x1a\x8a\x80\x30\xd0\x2c\xce\x59\x0a\
\xe7\x00\x5f\xf4\x05\xa6\x01\x66\x9d\xed\xa6\xde\x82\x69\x9f\xd8\
\x26\xd3\x72\xf4\xb9\xcc\x11\x63\x0b\xd8\xef\x82\x3d\x3e\xe8\xd2\
\x00\x99\x17\x16\xc1\xf2\xa9\xf6\x26\x50\x0f\xac\x84\xe9\x6f\xd0\
\xf1\x58\x1b\x7c\x9b\xe1\x38\xe7\x3a\xb8\xf8\xbf\x50\x93\xe8\x10\
\x76\x13\x24\xc8\x76\x2f\x29\x7b\xc8\xa0\x02\xe2\xd7\xc3\x0f\x17\
\x62\x5e\x3e\x8b\x62\x3e\xa0\x17\x10\xc1\xe4\x20\x43\x09\xad\xdb\
\x08\xcd\x26\x8c\x01\x33\xa1\x4c\x8c\x05\x76\x25\x3a\x8a\x5e\x0c\
\xbe\xcd\xf0\x78\x15\x6c\x04\x0e\xdd\x08\x64\x05\x6c\xb5\x68\xea\
\x81\xe7\x49\x17\x33\xd7\xc3\x20\xcb\x76\xcc\x3e\xa7\x10\x0f\x10\
\x01\x86\x1e\x87\x8c\xcb\xdc\x70\xf4\xaf\xe0\xeb\x05\x1a\xe1\xf2\
\xfd\xe8\xe5\x0e\xfc\x2f\x02\xdb\x6f\x03\xae\xb0\xe9\x76\x64\x3d\
\x54\x25\x3a\x2a\x6f\xc1\x59\xcb\x66\xea\xdc\x1d\x58\x18\x74\xd0\
\x07\x6a\x27\x41\x69\x8c\x94\x21\x8f\x30\x90\x1a\x92\x9c\xdf\x5c\
\x1b\xef\x43\x78\xdd\x2a\xa8\xc9\xc5\xc4\x65\x92\x07\x18\x09\xb6\
\xa2\x33\xd4\xb6\xb1\x05\x6b\xed\x84\x5d\x56\x0a\x4c\x32\x00\xc0\
\x3a\x0f\xaa\xc6\x90\xfc\x9c\xcd\x03\x38\x11\x20\xd7\xb1\x80\x03\
\x42\x90\x71\xbd\x17\xfe\xbc\xd0\x71\x6e\x2e\x7b\x03\x2e\xdd\x4d\
\xe4\x83\x00\x34\x80\xb1\x74\x08\xc4\xaf\x43\xe4\x80\x6b\x05\x5c\
\x12\x84\xca\xae\x8e\x2f\xe9\x84\x90\xcb\xde\x9d\x92\x33\x44\xe9\
\xa0\x85\x24\xb0\x86\x43\x52\x12\xdc\xd5\x88\xc1\x6b\x74\xa3\x83\
\x14\x22\x54\x60\x50\x13\xbd\x04\xbe\xee\x83\xf9\xdb\x4d\x16\xdb\
\x80\xc3\x5e\x68\xbe\xd5\xd1\xa1\x69\x10\x1a\x69\xef\xf2\x7a\xe0\
\xd8\x0b\x31\x98\x52\x03\xae\x3f\xec\x33\xa8\xf0\x91\x7a\xc8\x2e\
\xa4\xc5\xd1\x69\x01\x3d\x7f\x04\x4e\x7b\x9c\x73\xaa\xb2\xd3\xcd\
\xf0\xd5\xb4\x7d\xd0\x48\x3c\x03\x8c\x4f\x81\xf6\x5c\xe0\x24\x06\
\xeb\x60\xfa\xff\xa0\x17\x50\x59\x07\xfc\x66\x7b\xf0\xdc\xd3\xb6\
\xb5\xca\xe9\xc0\xc7\x5e\x52\xc1\x16\xe1\x8f\x53\xed\xbd\xc9\x59\
\x4c\x8e\xf1\x23\x7e\x5c\x00\x24\xe2\x82\xce\xcb\x70\xd5\xbe\xc8\
\xbc\x20\x50\x09\x1c\x48\x80\xd6\x12\xd8\x3d\x1c\x0e\xe6\xdb\x4b\
\x31\x80\xb2\x64\x68\xba\x06\x7a\x8c\x6b\x23\x3a\xa0\x82\xf8\x8c\
\x08\x47\xc7\x42\xb3\xdb\x5e\xb2\xdf\xb6\x87\xe4\x1b\xe0\x2b\x8b\
\x42\xc3\x29\x48\xaa\x80\xd1\x95\x04\xdf\x0b\x53\x57\x0c\x1e\x13\
\x12\xf7\x00\x3f\xb9\x81\x1d\x90\xf6\x13\xfc\xb7\xda\x86\xf3\x91\
\x20\x1c\xfc\x1d\xac\x13\x70\xd9\x2f\x30\x0a\x70\x45\xe1\xfb\x10\
\x1e\x26\x03\x6d\x18\xfe\x15\x50\xd0\x08\xa5\x11\x4c\xef\x1a\x5c\
\x15\xd3\xa9\x26\x8b\x0c\x3c\x64\xf0\x2b\xc6\x34\xa1\x98\xd3\xe7\
\x25\x0e\x54\x5a\x1c\x93\x5d\xef\xc0\x26\xc1\x69\xe6\xae\x4e\x4b\
\xfe\x95\xef\xb3\x9d\xbe\xf8\x2b\xb8\x02\x0c\x88\x43\x5e\x04\x9a\
\x0c\x30\x3d\x90\x64\x42\xd0\x70\xd2\x6b\x1c\x5c\x07\xc0\xac\xb0\
\xe1\x6b\x0d\x00\xb3\xd5\x89\xd4\x01\x87\x92\xba\x3b\x8b\x71\x3b\
\xff\x6f\xea\x03\x4d\x1d\x30\xa4\x1a\x4e\x5a\xf6\xae\xf5\x07\x56\
\x26\x13\xab\x7e\x04\x77\x47\x0d\x78\xb7\x62\xfc\xd3\x42\x27\x0d\
\xe8\x16\x87\x87\x4f\x42\x61\x1d\x94\xf7\x87\x25\xe9\x7f\x99\x67\
\x18\xdb\x09\x17\x1e\x06\x6f\x1d\xb8\x9b\x0c\x4e\x8d\x16\x47\xbb\
\xc2\x19\xd3\x2e\x22\x0b\x28\x8c\x83\xab\x0d\x7a\x1d\x36\x71\x9d\
\x4e\xa2\xa3\x28\x08\xfd\xe2\x18\xc9\xd0\x6a\x40\x72\x04\x32\x0f\
\x01\x6d\x2e\xf0\x5a\x90\x21\xac\xae\x60\x76\x02\x27\x9c\x23\x6d\
\x72\x6e\x17\x9a\x9c\x4b\xfa\x46\x03\x12\x65\x53\xe3\x28\xc7\x9d\
\x1e\x73\x12\x60\x93\x09\xcd\x16\x04\x4d\xc8\xb3\x60\x53\x1b\x9a\
\x25\xd4\xf4\x15\x52\x96\x7d\x4d\x1a\x5e\x63\xcf\x3c\x1e\x93\x2d\
\x8e\xd6\xbd\x48\x45\x09\x92\xbb\xc4\xf6\x59\x29\xdd\xd5\xf2\xab\
\xa1\xaf\x85\xde\x77\x66\x23\xed\xfb\x90\x6e\x47\x32\x06\x4b\x3c\
\x27\x71\x97\x74\x5e\x9a\xac\x16\xd4\x20\x64\x1d\x40\xea\x9f\x2e\
\x31\x47\xe2\x6a\xe9\x46\xaf\x62\x6d\xce\xe5\x76\x7e\x77\x7b\x4e\
\x92\x3b\xda\xbe\x1f\x5e\x89\x74\xa5\x5f\x62\xae\x7d\xb5\x6a\x3c\
\x29\xbd\xe8\x91\xe6\x23\x3d\x85\xd4\xbb\x44\x31\x96\xa9\x99\xcd\
\x8a\xf1\x95\x94\x3b\x52\xee\x9e\x31\xbb\xd8\x8c\x97\x80\xc6\x5c\
\x68\xac\xa3\x35\xdf\x6e\xde\x5e\xc0\xf4\x27\xc1\x58\xdc\x17\xac\
\x73\x1c\x00\x45\xb0\x6e\x69\xe4\x8f\xc1\xc2\x72\xfa\x23\x82\x1d\
\x1a\x39\x05\x28\xc5\x01\x5e\x21\xec\xbd\x15\xe3\xb1\x77\xf1\x2c\
\x0a\x63\xd4\x00\x87\x46\x3b\x1e\x6c\x34\x7c\xeb\xc7\xf5\xaf\x4f\
\xed\xd3\xa8\x79\x9d\x18\x93\x70\xd7\x1d\x83\xdd\x83\xa0\xc0\x24\
\xb6\x6e\x31\x01\x6e\xc6\x04\x92\x38\x82\xcb\x5c\x08\x66\x14\xd6\
\x16\x51\x5b\xbe\x8d\x72\x32\xd8\x88\x97\x29\x40\xbf\xba\x7d\x98\
\x0d\x09\x10\x6f\x05\x8e\x02\x9e\x10\xc4\x60\x67\xa9\x9d\xfd\xaf\
\xde\x0d\xe6\x4a\x0f\x58\x3d\x1d\x90\x95\x13\x9b\xf6\x2d\x1f\xbd\
\x15\xe4\x88\xdf\x2e\xa0\x15\x38\x0b\x1c\x2f\x82\xd0\x5c\xa0\xf0\
\x57\x60\x9d\xcd\x63\x1a\x0a\xcb\x06\xd0\x50\x01\x91\x81\x40\xce\
\x61\xe7\x39\xb9\xd0\x39\xc8\xc6\x6e\x04\xc0\xc0\xcd\x69\x3b\x68\
\xe5\x00\x47\x52\x69\x89\x5e\x4e\x10\x48\x21\x88\xcb\x58\x08\xbe\
\x00\x6c\x01\xf6\x3e\x41\x2d\xb9\x44\xf1\x60\x81\x9d\x11\x93\xca\
\x31\xb7\xba\xe1\x82\x9f\x81\x02\x03\xa2\x99\x30\x14\x0e\xa4\xc0\
\x13\xc7\x21\x7b\x01\x70\x36\xcb\x09\x43\x89\xc4\x1f\x28\x63\xe1\
\x0a\x9b\xc9\x32\x9d\x30\xd5\xd5\xe9\x91\x46\xe0\xc4\x18\xd0\x83\
\x16\x78\xea\x1c\x21\x1c\x04\xa1\xfb\xc8\x9e\x9f\x41\x4d\x1e\xc4\
\x5e\x39\xee\xa4\xc2\x5c\xb0\xfa\x40\xb5\xcb\xce\x57\xb6\x94\xdb\
\x91\x36\x06\xd6\xc2\x4b\x39\xa1\x1c\x2a\x00\x11\x81\x0b\xde\x82\
\xad\x49\xc4\xbf\x7c\x93\x0e\x6e\x64\x37\x06\xb5\x18\x40\x9c\xae\
\xfc\x06\xe1\x2d\x98\x3b\x3c\x50\xb2\x1a\x88\x09\x12\x5a\x38\x36\
\x0b\xfa\x06\xa0\xe7\x5c\xe7\xc6\xa6\x3d\x13\xcc\x3f\x61\xc6\x87\
\xbc\x30\x3f\x42\xa6\x63\x2e\x52\x9d\x1b\x9b\x06\x87\xb5\xf2\x1d\
\xf2\x39\x3b\x1b\x98\x71\x04\xdb\x17\xec\x06\x7a\x90\xba\x60\x22\
\x5d\x57\x83\x31\x59\x90\xfe\x81\x8d\x41\x2b\x1b\xfe\xf4\xdb\x4d\
\x4e\x93\xc3\x8b\x95\x50\x0b\x66\xe7\x20\xba\x63\x51\x08\x98\xb9\
\x0f\xc3\x40\x37\xd6\xca\x77\xe8\xe0\x16\xc2\x24\x72\x35\x01\x7a\
\x02\x73\xdc\xaf\xe2\x4e\x9a\x00\xd9\x41\x38\xf8\x2b\xd2\x30\xa4\
\x41\x48\x2f\xdb\x01\xa9\x7d\x09\xd2\x40\xaf\xe4\x2a\x92\xe8\xa7\
\xce\xfb\x0d\xbd\x1e\x43\xcb\x85\x2a\xcb\x50\xe0\x15\x14\x7e\xd3\
\xd0\xd7\x16\xfa\xde\x42\x91\xcf\x91\x35\x07\x35\x6f\x40\xe5\x42\
\x2d\x9d\xc8\x7a\xd0\x70\x02\xd8\x42\x89\xb7\xa5\x82\x0b\x14\xd9\
\x87\x34\x0b\x89\xc5\xf6\x08\xe1\xda\x22\xbb\xb1\x79\x59\xe2\x4f\
\xc9\x7b\x8b\xf4\x3c\xb2\x78\x4d\x47\x88\xab\x95\x93\xd2\x2d\x48\
\x09\x97\xc9\xa2\x5a\xe2\xa8\xe2\x74\xd8\x73\x94\x84\x71\xd2\x84\
\x54\x29\xad\x44\x1a\x93\x22\x1a\xd7\x20\x8d\xb0\x67\x22\x95\xb5\
\xa8\x7a\x1f\x52\xae\xcb\x59\xc4\x20\xb5\x5d\xe3\xd5\x8f\xad\x68\
\x6f\x33\x0a\xbf\x66\x48\x05\xc9\x12\x57\x4a\xfe\x21\xaa\x2c\x47\
\x3b\x9a\x91\x8a\x0d\xc9\xb8\x44\x72\x4f\x53\xec\xfe\x02\x55\x34\
\x22\xeb\x30\xd2\xa4\x44\xc9\x78\xca\x19\xf6\xcc\x97\x86\x95\x48\
\xfb\x90\x0a\xa6\x48\xfc\x20\x79\x2f\x96\x16\xe3\x4c\xbc\xbe\x92\
\xce\x19\x2e\xcd\xec\xa9\x38\x47\xd5\x4e\x4c\x56\xef\xbb\xa4\xe1\
\xdd\x24\xbe\x51\x05\x71\xd5\x10\x57\x98\xbd\xd2\xe8\x3e\xd2\xb9\
\x1e\x69\x58\x9a\x94\xf0\x90\x4e\x25\xaf\x11\x81\xa9\xd9\xd2\x3f\
\x3c\x6a\x6d\x41\x3f\xcb\xa1\xd0\xc4\x04\xc9\x3f\x48\xb1\xaf\xd0\
\x1c\xa1\xc6\xdf\x4c\xa9\xb7\x47\xa2\x54\xe2\x42\x89\xfb\xa4\xa4\
\x73\xb4\xa6\x02\xad\x6e\x41\xea\xe6\x93\xb8\xd5\x99\x0d\xfe\x4b\
\xea\x31\x5c\xf1\x5f\x91\x75\x10\xe9\xbc\x14\x27\xc6\xbe\x69\x9f\
\xc4\x7d\x49\xd2\x55\x99\x4e\xbc\x9d\x2a\x3d\x89\xc4\x74\x89\xf7\
\xa5\x31\xbd\xa5\xae\x97\x49\xac\x52\x88\x36\xa9\x47\x81\xfd\x3a\
\x35\x8a\x52\x27\xb1\x5c\x16\x6f\x48\x4f\xa1\x78\xca\x75\x6a\x4a\
\x78\x49\x35\x74\x68\x8e\x4b\x42\x59\x3e\x59\xef\xa1\xda\x00\x8a\
\x4f\x4f\x96\x28\x96\xae\x40\xc1\xfd\xf6\x04\x2a\xb0\x14\xc9\x3f\
\xde\x59\xe8\x34\x89\x4b\xa4\x94\x6e\xaa\x5d\x6a\xea\x45\xa1\x2f\
\x2c\x64\x6d\x41\x2a\xcd\x75\xf4\xe3\x11\x89\xd5\x92\xeb\x7d\xe9\
\xd2\x54\xe9\x17\xa4\x61\x45\x12\x1f\x4a\x54\x2a\xc6\x46\x29\x6d\
\x8c\xc4\x9b\xb2\x58\x2b\xa5\xa7\x39\x8b\x3d\x29\x75\x4f\x56\x94\
\xb5\x3a\x4c\x4c\x01\x0e\x4a\xc6\x33\x8a\xd2\xa4\x38\x01\x1d\x24\
\xa6\xf6\xb1\xcb\xa4\xf3\x5d\x8a\xe6\x4e\x51\x1d\x3f\xeb\x3b\xa4\
\xd5\xc4\xf4\x11\x12\xf1\xcf\x91\x1e\x47\x9a\x99\x2d\xe5\xf8\xa5\
\x2b\x4c\xe9\x5b\x54\x17\x45\xd6\x03\x6e\xc9\xdf\x5d\xe2\x5c\x7b\
\x66\xe8\xc9\x97\xae\x34\xf4\xe9\x09\x7b\x28\xba\x4a\x68\xb1\xd0\
\xd9\x16\xd4\xbc\x1d\xe9\x2a\xbf\xc4\x15\x12\xf3\xd5\x4a\xab\x2c\
\x8e\x4b\xb9\x23\xa5\x67\x90\xae\x2a\x94\x58\xa9\x06\xa4\x20\x4b\
\xed\xcb\x0b\xf6\x4a\xbc\xab\x38\xfb\x55\x81\x65\x4f\x79\xf9\x46\
\xed\x04\x74\x9a\x6a\x1d\x24\xa4\xad\x48\x11\x3a\x15\x34\x7f\x94\
\xdc\x53\x15\x49\xdd\xab\x36\xef\xef\xfa\xa2\xf0\xa0\x16\x76\x3d\
\xa2\x08\xeb\x74\x77\x9e\xe4\x9a\xdb\x9f\x79\xc6\xda\x14\xd8\x38\
\x19\x92\x8f\xc1\xe8\x30\xd1\x2b\x0d\x52\x9e\xea\x86\xf1\x45\x02\
\x14\x89\xc8\x05\x89\x54\xbd\xb8\x8f\xa5\x8f\x07\xf8\xfc\x21\xe1\
\xca\xb0\xc5\x32\xd9\x21\xcd\xfa\x20\x44\x4a\x81\x1b\xa2\xf8\x87\
\x9f\x80\xd0\x16\x12\x2a\x4f\x41\xd4\x87\xd1\x31\x02\xca\x2c\x22\
\xb3\x77\x11\x9d\xba\x12\xef\xba\x18\x66\x74\x24\xee\x3e\x9f\x41\
\x53\x00\xb8\x80\x28\x25\xf8\x68\xc6\x4b\x39\x71\xd2\x30\xc8\xe6\
\x77\xba\x10\x26\xca\x20\x76\xe1\x3f\x6f\x01\xee\xd0\x49\xde\xf0\
\xcd\x23\xd2\x69\xb1\xc2\x5f\xcc\x65\x9d\xd5\x84\x5b\x45\xa3\xbf\
\x81\x98\x75\x02\xde\xdf\x79\x95\x46\x6d\x7b\x4f\xd7\x1c\x9a\xad\
\xc0\x2f\x48\x2f\x21\x8d\xf4\x48\x5d\x8b\xa4\x3b\x0d\x35\x1c\xb2\
\x73\xfa\x1b\x16\xda\xd8\x89\x82\xb5\xa8\xea\x18\x7a\xc1\x42\xcf\
\x0b\xad\x13\xfa\x3d\x8c\xe2\x9b\x0d\x75\x9c\x34\xa4\xa3\xc8\x6a\
\xb4\x19\x50\x05\xdd\x25\x63\x92\xc4\x08\x89\x19\xd2\xf8\x02\xe9\
\xff\x21\x79\xee\x90\x8c\xcf\x25\xe3\x4a\xbb\x7f\x8c\x6a\xc9\x78\
\x47\x32\x26\x48\xac\x90\xd8\xa7\xbd\x48\x11\x9e\x95\xd8\xa9\x72\
\xa2\xb2\x58\xa7\x36\x0e\x4a\x2c\xd7\x7a\xb3\x5c\xa1\xae\x77\xa9\
\xf9\xfa\xb1\xba\xb6\xfc\x2e\x69\xd0\x10\x19\x0d\x7c\xab\xe5\x13\
\x5d\xdc\x64\x3e\x40\xda\xaf\x03\xc0\xbf\x0e\xba\x75\xf2\xf5\xe7\
\xa2\xc7\x06\x38\x5a\x6b\x32\xfe\x50\x1a\x19\xd5\x51\x68\x4f\x83\
\x84\x28\xd4\xd6\xd1\xf9\x3c\x6c\x19\x0f\x7d\x2c\xe8\xfe\x0a\xb8\
\xde\x99\x02\x6d\x57\x41\xe6\x87\x90\xe1\x42\x97\x54\x41\x71\x19\
\x9d\x57\x99\xf8\xdf\x4b\x41\x8b\x2e\x27\x16\xbe\x06\x4f\xc6\x6b\
\xe0\xa9\x85\xba\xc7\x61\xf8\xbb\x70\x32\x8c\x96\xb5\x12\x49\x08\
\xe2\x5b\x1e\x82\x77\xee\x21\xce\x2d\x1c\xa2\x3b\x3d\x88\x62\xb2\
\x95\xdd\x5c\xc6\xf9\x9e\x8d\xec\x1b\xf8\x15\xc5\x5d\x8a\xd9\xfc\
\xfd\x97\x4c\xce\x3c\x97\xfd\xf7\x1f\xa7\xab\xd9\x40\xc3\x2b\xb7\
\x61\xac\xfc\x3d\x41\x57\x7f\xdf\xdd\x1e\xe6\xac\x05\x4a\x4c\xea\
\x17\x59\x64\xee\xcb\xc1\x35\x0f\x38\xde\x1d\x54\x04\xf4\x71\xc2\
\xcf\x20\x3b\x00\x4d\x3a\x49\xfd\xe7\x65\xac\x4f\x86\x1b\xde\x05\
\xe3\x89\x31\xc4\x9b\x5f\x76\xac\x6c\x22\x2e\x8e\x41\xff\x77\xed\
\xbc\x9f\x93\x83\x36\x8e\xc2\xe0\x12\x3a\xb8\x80\x24\xbe\xb0\x83\
\xd7\xcd\x4b\x09\x2f\x81\x76\x13\x52\x0c\xf0\xec\x01\xf3\x43\xe0\
\xb7\xf1\x54\xec\xfb\x37\xf9\xb1\x12\x02\x34\x93\xe9\xdf\x0d\xe6\
\xd7\x90\x60\xc1\xed\x2b\xa8\x5e\xf7\x22\x4b\x0a\x07\x91\x5b\xe6\
\xa1\x3c\x2d\xc6\x4b\x67\x42\xb8\xae\x6e\x7e\x73\x5e\x72\xaf\x6d\
\xa4\xbf\xdf\x1b\x7a\xf9\x61\xdb\x34\x92\x7e\x18\x86\xf9\x51\x09\
\x34\x8c\x04\xa6\xdb\x73\x0c\x8e\x42\xd1\x01\xc8\xfc\x9e\xc8\xa4\
\xa3\x54\x4e\x6f\x64\x76\x29\xdc\x0e\x64\xee\x04\xea\xea\x31\x6b\
\x4e\x60\x5a\x51\x4c\xd7\x6f\xa0\x6e\x50\x6f\x40\xdd\x5d\x58\xa7\
\xfb\x62\x5a\x3e\xc4\x4c\x0c\xd2\x70\xf1\x7f\xd0\x77\x33\x9c\x13\
\xc4\x5d\x0f\xfe\x3a\x70\x6f\x07\xe3\xb8\xe3\x6c\x7a\x54\x91\xbe\
\x25\x9b\x5d\x4c\x22\x89\x8d\x24\x45\x0b\x68\x8d\x9c\x8f\x72\xf7\
\xd1\xb2\xeb\x72\x9a\xae\xf6\xb0\x6a\xfd\x9d\x7c\xf8\xa4\xc1\x23\
\xef\x1c\xa4\xa5\x7d\x0a\xff\x1f\xa7\x48\xb3\x27\x67\x17\xe2\x1e\
\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x05\
\x00\x73\x5e\x63\
\x00\x6c\
\x00\x6f\x00\x67\x00\x6f\x00\x73\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x10\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x10\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x74\x20\x97\x84\xbd\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 1.109375 | 1 |
deepensemble/layers/dense.py | pdoren/correntropy-and-ensembles-in-deep-learning | 1 | 12797780 | import theano.tensor as T
from .layer import Layer
__all__ = ['Dense']
class Dense(Layer):
""" Typical Layer of MLP.
.. math:: Layer(x) = activation(Wx + b)
where :math:`x \\in \\mathbb{R}^{n_{output}}`, :math:`W \\in \\mathbb{R}^{n_{output} \\times n_{input}}` and
:math:`b \\in \\mathbb{R}^{n_{output}}`.
Parameters
----------
n_input : int or tuple[]
Dimension of input.
n_output : int or tuple[]
Dimension of output.
activation : callback
Activation function.
"""
def __init__(self, n_input=None, n_output=None, activation=None):
input_shape = n_input if isinstance(n_input, tuple) else (None, n_input)
output_shape = n_output if isinstance(n_output, tuple) else (None, n_output)
super(Dense, self).__init__(input_shape=input_shape, output_shape=output_shape, non_linearity=activation)
def output(self, x, prob=True):
""" Return output of layers
Parameters
----------
x : theano.tensor.matrix
Input sample
prob : bool
Flag for changing behavior of some layers.
Returns
-------
theano.tensor.matrix
Returns the output layers according to above equation:
.. math:: Layer(x) = activation(Wx + b)
"""
if x.ndim > 2:
x = x.flatten(2)
lin_output = T.dot(x, self.get_W()) + self.get_b().dimshuffle('x', 0)
# noinspection PyCallingNonCallable
return (
lin_output if self._non_linearity is None
else self._non_linearity(lin_output)
)
| 3.03125 | 3 |
sztuczna_inteligencja/2-lab/komandos.py | Magikis/Uniwersity | 12 | 12797781 | from sokobanASTAR import taxiDistance
from IO_SI import *
from sokoban import *
targets = set()
board = []
_dirs = {'U': (-1, 0), 'D': (1, 0), 'R': (0, 1), 'L': (0, -1)}
d = {}
def showBoard(state):
for i in range(len(board)):
for j in range(len(board[i])):
tup = (i, j)
if tup in state and tup in targets:
print('B', end='')
elif tup in state:
print('S', end='')
elif tup in targets:
print('G', end='')
elif board[i][j] != '#':
print(' ', end='')
else:
print('#', end='')
print()
print()
def getMoves():
return ['U', 'D', 'L', 'R']
def isWall(pos):
(i, j) = pos
return board[i][j] == '#'
def getInitialState(countPaths=True):
# state => tuple(boxesTuple, keeperPos)
global targets, board
board = readBoard()
targets = set(getIndexs(board, ['G', 'B']))
state = set(getIndexs(board, ['B', 'S']))
if countPaths:
for t in targets:
shortestPathToGoal(t)
return tuple(sorted(state))
def isWin(state):
for s in state:
if s not in targets:
return False
return True
def apllyMove(state, move):
newState = set()
for x in state:
pos = combineMove(x, _dirs[move])
if isWall(pos):
newState.add(x)
else:
newState.add(pos)
return tuple(sorted(newState))
def genNewStates(state):
states = []
for move in getMoves():
states.append((apllyMove(state, move), move))
return states
targetsD = {}
def shortestPathToGoal(pos):
targetsD[pos] = 0
visited = set([pos])
queue = deque([(pos, 0)])
while queue:
(state, steps) = queue.popleft()
aviableSteps = [combineMove(state, _dirs[move]) for move in getMoves()
if not isWall(combineMove(state, _dirs[move]))]
for nextState in aviableSteps:
if nextState in visited:
continue
visited.add(nextState)
if nextState in targetsD:
targetsD[nextState] = min(steps + 1, targetsD[nextState])
else:
targetsD[nextState] = steps + 1
queue.append((nextState, steps + 1))
return
# def countShortestPathToTarget(G):
# for v in G:
# res = []
# for t in targets:
# res.append(d[(v, t)])
# targetsD[v] = min(res)
def h(state):
minDists = []
minDists = [targetsD[s] for s in state]
return max(minDists) + (len(state) // 12)
def hUnOpt(state):
minDists = [targetsD[s] for s in state]
minDists = sorted(minDists)
res = 0
for i in range(len(minDists)):
res += minDists[i] * ((i + 1) ) ** 2
return res
# def takeV():
# res = []
# for i in range(len(board)):
# for j in range(len(board[i])):
# v = (i, j)
# if not isWall(v):
# res.append(v)
# return res
# def warshalFloyd(G):
# for v1 in G:
# for v2 in G:
# d[(v1, v2)] = float('inf')
# for v in G:
# for move in [_dirs[move] for move in getMoves()]:
# vN = tuple(map(sum, zip(v, move)))
# if not isWall(vN):
# d[(v, vN)] = 1
# for u in G:
# for v1 in G:
# for v2 in G:
# if d[(v1, v2)] > d[(v1, u)] + d[(u, v2)]:
# d[(v1, v2)] = d[(v1, u)] + d[(u, v2)]
| 2.453125 | 2 |
apps/connections/statistics.py | Houston-ARTCC/zhu-core | 1 | 12797782 | from datetime import timedelta
from django.db.models import Sum, Q, DurationField
from django.db.models.functions import Coalesce, Cast
from django.utils import timezone
from .models import ControllerSession
from ..users.models import User, Status
def annotate_hours(query):
"""
Annotates given QuerySet with controlling hours for the
current (curr_hours), previous (prev_hours), and
penultimate (prev_prev_hours) months.
"""
MONTH_NOW = timezone.now().month
YEAR_NOW = timezone.now().year
CURR_MONTH = (Q(sessions__start__month=MONTH_NOW)
& Q(sessions__start__year=YEAR_NOW))
PREV_MONTH = (Q(sessions__start__month=MONTH_NOW - 1 if MONTH_NOW > 1 else 12)
& Q(sessions__start__year=YEAR_NOW if MONTH_NOW > 1 else YEAR_NOW - 1))
PREV_PREV_MONTH = (Q(sessions__start__month=MONTH_NOW - 2 if MONTH_NOW > 2 else 12 if MONTH_NOW > 1 else 11)
& Q(sessions__start__year=YEAR_NOW if MONTH_NOW > 2 else YEAR_NOW - 1))
return query.annotate(
curr_hours=Coalesce(Sum('sessions__duration', filter=CURR_MONTH), Cast(timedelta(), DurationField())),
prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_MONTH), Cast(timedelta(), DurationField())),
prev_prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_PREV_MONTH), Cast(timedelta(), DurationField())),
)
def get_user_hours():
"""
Returns query set of active users annotated with controlling
hours for the current (curr_hours), previous (prev_hours),
and penultimate (prev_prev_hours) months.
"""
return annotate_hours(User.objects.exclude(status=Status.NON_MEMBER))
def get_top_controllers():
"""
Returns query set of active users annotated with controlling
hour sums for the current month (hours) sorted by most
controlling hours (controllers with no hours are not included).
"""
SAME_MONTH = Q(sessions__start__month=timezone.now().month)
SAME_YEAR = Q(sessions__start__year=timezone.now().year)
users = User.objects.exclude(status=Status.NON_MEMBER)
users = users.annotate(hours=Sum('sessions__duration', filter=SAME_MONTH & SAME_YEAR))
return users.exclude(hours__isnull=True).order_by('-hours')
def get_top_positions():
SAME_MONTH = Q(start__month=timezone.now().month)
SAME_YEAR = Q(start__year=timezone.now().year)
sessions = ControllerSession.objects.filter(SAME_MONTH & SAME_YEAR)
position_durations = {}
for session in sessions:
position = session.facility + '_' + session.level
if position in position_durations:
position_durations[position] += session.duration
else:
position_durations[position] = session.duration
sorted_positions = sorted(position_durations, key=position_durations.get, reverse=True)
return [{'position': position, 'hours': position_durations[position]} for position in sorted_positions]
def get_daily_statistics(year, user=None):
"""
Returns a query dictionary of every day of the
given year annotated with the controlling hours
for that day.
"""
sessions = ControllerSession.objects.filter(start__year=year)
if user:
sessions = sessions.filter(user=user)
return sessions.extra({'day': 'date(start)'}).values('day').annotate(value=Sum('duration'))
| 2.453125 | 2 |
format_raw_info.py | lihd1003/Booking.com-Scraper | 1 | 12797783 | <reponame>lihd1003/Booking.com-Scraper
from ast import literal_eval
import json
from os import listdir
import pandas as pd
def save_raw_listing_info(folder_in, folder_out):
files = listdir(folder_in)
all_info = []
for f in files:
if "json" not in f or "info" not in f:
continue
with open(folder_in + f, "r") as f:
all_info += json.load(f)
with open(folder_out + "INFO.json", "w") as f:
json.dump(all_info, f, indent=2)
attributes = list(all_info[0].keys())
attributes.remove("room_types")
df_dict = {}
for attr in attributes:
df_dict[attr] = []
for info in all_info:
for attr in attributes:
df_dict[attr].append(info[attr])
pd.DataFrame(df_dict).to_csv(folder_out + "/INFO.csv", index=False)
def save_raw_avalibility(folder_in, folder_out, date_in, date_out):
all_b = [folder_in + e for e in listdir(folder_in) if date_in in e]
collect = []
for file in all_b:
f = open(file, "r")
collect += json.load(f)
with open(folder_out + "AVALIBILITY"+date_in.replace("-","")+"_"+date_out.replace("-","")+".json", "w") as f:
json.dump(collect, f, indent=2)
| 2.640625 | 3 |
neuromaps/tests/test_stats.py | VinceBaz/neuromaps | 0 | 12797784 | # -*- coding: utf-8 -*-
"""
For testing neuromaps.stats functionality
"""
import numpy as np
import pytest
from neuromaps import stats
@pytest.mark.xfail
def test_compare_images():
assert False
def test_permtest_metric():
rs = np.random.default_rng(12345678)
x, y = rs.random(size=(2, 100))
r, p = stats.permtest_metric(x, y)
assert np.allclose([r, p], [0.0345815411043023, 0.7192807192807192])
r, p = stats.permtest_metric(np.c_[x, x[::-1]], np.c_[y, y])
assert np.allclose(r, [0.0345815411043023, 0.03338608427980476])
assert np.allclose(p, [0.7192807192807192, 0.7472527472527473])
@pytest.mark.parametrize('x, y, expected', [
# basic one-dimensional input
(range(5), range(5), (1.0, 0.0)),
# broadcasting occurs regardless of input order
(np.stack([range(5), range(5, 0, -1)], 1), range(5),
([1.0, -1.0], [0.0, 0.0])),
(range(5), np.stack([range(5), range(5, 0, -1)], 1),
([1.0, -1.0], [0.0, 0.0])),
# correlation between matching columns
(np.stack([range(5), range(5, 0, -1)], 1),
np.stack([range(5), range(5, 0, -1)], 1),
([1.0, 1.0], [0.0, 0.0]))
])
def test_efficient_pearsonr(x, y, expected):
assert np.allclose(stats.efficient_pearsonr(x, y), expected)
def test_efficient_pearsonr_errors():
with pytest.raises(ValueError):
stats.efficient_pearsonr(range(4), range(5))
assert all(np.isnan(a) for a in stats.efficient_pearsonr([], []))
| 2.1875 | 2 |
image_classifier.py | springcoil/deep-learning-tutorial-pydata2016 | 0 | 12797785 | <reponame>springcoil/deep-learning-tutorial-pydata2016<gh_stars>0
from __future__ import print_function
# os.environ['THEANO_FLAGS'] = 'device=gpu1'
import numpy as np
import theano
import theano.tensor as T
import lasagne
import trainer, utils
class TemperatureSoftmax (object):
"""
A softmax function with a temperature setting; increasing it smooths the resulting probabilities.
"""
def __init__(self, temperature=1.0):
self._temperature = theano.shared(lasagne.utils.floatX(temperature), 'temperature')
@property
def temperature(self):
return 1.0 / self._temperature.get_value()
@temperature.setter
def temperature(self, value):
self._temperature.set_value(lasagne.utils.floatX(1.0 / value))
def __call__(self, x):
return lasagne.nonlinearities.softmax(x * self._temperature)
class AbstractClassifier (object):
@classmethod
def for_model(cls, network_build_fn, params_path=None, *args, **kwargs):
"""
Construct a classifier, given a network building function
and an optional path from which to load parameters.
:param network_build_fn: network builder function of the form `fn(input_var, **kwargs) -> lasagne_layer`
that constructs a network in the form of a Lasagne layer, given an input variable (a Theano variable)
:param params_path: [optional] path from which to load network parameters
:return: a classifier instance
"""
# Prepare Theano variables for inputs and targets
input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
# Build the network
print("Building model and compiling functions...")
network = network_build_fn(input_var=input_var, **kwargs)
# If a parameters path is provided, load them
if params_path is not None:
utils.load_model(params_path, network)
return cls(input_var, target_var, network, *args, **kwargs)
@classmethod
def for_network(cls, network, *args, **kwargs):
"""
Construct a classifier instance, given a pre-built network.
:param network: pre-built network, in the form of a Lasagne layer
:param args:
:param kwargs:
:return:
"""
# Construct
input_var = utils.get_network_input_var(network)
target_var = T.ivector('targets')
return cls(input_var, target_var, network, *args, **kwargs)
class ImageClassifier (AbstractClassifier):
def __init__(self, input_var, target_var, final_layer, updates_fn=None):
"""
Constructor - construct an `ImageClassifier` instance given variables for
input, target and a final layer (a Lasagne layer)
:param input_var: input variable, a Theano variable
:param target_var: target variable, a Theano variable
:param final_layer: final layer, a Lasagne layer
:param updates_fn: [optional] a function of the form `fn(cost, params) -> updates` that
generates update expressions given the cost and the parameters to update using
an optimisation technique e.g. Nesterov Momentum:
`lambda cost, params: lasagne.updates.nesterov_momentum(cost, params,
learning_rate=0.002, momentum=0.9)`
"""
self.input_var = input_var
self.target_var = target_var
self.final_layer = final_layer
self.softmax = TemperatureSoftmax()
network = lasagne.layers.NonlinearityLayer(final_layer, self.softmax)
self.network = network
# TRAINING
# Get an expression representing the network's output
prediction = lasagne.layers.get_output(network)
# Create a loss expression for training, i.e., a scalar objective we want
# to minimize (for our multi-class problem, it is the cross-entropy loss):
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
# We could add some weight decay as well here, see lasagne.regularization.
# Create update expressions for training, i.e., how to modify the
# parameters at each training step. Here, we'll use Stochastic Gradient
# Descent (SGD) with Nesterov momentum, but Lasagne offers plenty more.
params = lasagne.layers.get_all_params(network, trainable=True)
if updates_fn is None:
updates = lasagne.updates.nesterov_momentum(
loss.mean(), params, learning_rate=0.01, momentum=0.9)
else:
updates = updates_fn(loss.mean(), params)
# EVALUATION - VALIDATION, TEST, PREDICTION
# Create prediction expressions; use deterministic forward pass (disable
# dropout layers)
eval_prediction = lasagne.layers.get_output(network, deterministic=True)
# Create evaluation loss expression
eval_loss = lasagne.objectives.categorical_crossentropy(eval_prediction,
target_var)
# Create an expression for error count
test_err = T.sum(T.neq(T.argmax(eval_prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
self._train_fn = theano.function([input_var, target_var], loss.sum(), updates=updates)
# Compile a function computing the validation loss and error:
self._val_fn = theano.function([input_var, target_var], [eval_loss.sum(), test_err])
# Compile a function computing the predicted probability
self._predict_prob_fn = theano.function([input_var], eval_prediction)
# Construct a trainer
self.trainer = trainer.Trainer()
# Provide with training function
self.trainer.train_with(train_batch_fn=self._train_fn,
train_epoch_results_check_fn=self._check_train_epoch_results)
# Evaluate with evaluation function, the second output value - error rate - is used for scoring
self.trainer.evaluate_with(eval_batch_fn=self._val_fn, validation_score_fn=1)
# Set the epoch logging function
self.trainer.report(epoch_log_fn=self._epoch_log)
# Tell the trainer to store parameters when the validation score (error rate) is best
# self.trainer.retain_best_scoring_state_of_updates(updates)
self.trainer.retain_best_scoring_state_of_network(network)
def _check_train_epoch_results(self, epoch, train_epoch_results):
if np.isnan(train_epoch_results).any():
return 'Training loss of NaN'
else:
return None
def _epoch_log(self, epoch_number, delta_time, train_results, val_results, test_results):
"""
Epoch logging callback, passed to the `self.trainer.report()`
"""
items = []
items.append('Epoch {}/{} took {:.2f}s'.format(epoch_number + 1, self.trainer.num_epochs, delta_time))
items.append('train loss={:.6f}'.format(train_results[0]))
if val_results is not None:
items.append('val loss={:.6f}, val err={:.2%}'.format(val_results[0], val_results[1]))
if test_results is not None:
items.append('test err={:.2%}'.format(test_results[1]))
return ', '.join(items)
@property
def n_upper_layers(self):
return len(self.upper_layers)
def predict_prob(self, X, batchsize=500, temperature=None, batch_xform_fn=None):
"""
Predict probabilities for input samples
:param X: input samples
:param batchsize: [optional] mini-batch size default=500
:param temperature: [optional] softmax temperature
:return:
"""
y = []
if temperature is not None:
self.softmax.temperature = temperature
for batch in self.trainer.batch_iterator(X, batchsize=batchsize, shuffle=False):
if batch_xform_fn is not None:
batch = batch_xform_fn(batch)
y_batch = self._predict_prob_fn(batch[0])
y.append(y_batch)
y = np.concatenate(y, axis=0)
if temperature is not None:
self.softmax.temperature = 1.0
return y
def predict_cls(self, X, batchsize=500, batch_xform_fn=None):
prob = self.predict_prob(X, batchsize=batchsize, batch_xform_fn=batch_xform_fn)
return np.argmax(prob, axis=1)
| 2.75 | 3 |
mugimugi_client_api/search_item/parody.py | JeanMarc-Moly/mugimugi_client_api | 0 | 12797786 | <gh_stars>0
from dataclasses import dataclass
from typing import ClassVar
from mugimugi_client_api_entity import Parody
from mugimugi_client_api_entity import SearchParody as Root
from mugimugi_client_api_entity.enum import ElementPrefix, ItemType
from .abstract import SearchItem
@dataclass
class SearchParody(SearchItem):
ROOT: ClassVar[type] = Root
TYPE: ClassVar[ItemType] = ItemType.PARODY
PREFIX: ClassVar[ElementPrefix] = Parody.PREFIX
| 2.125 | 2 |
src/adasigpy/adasigpy.py | borley1211/aspy | 0 | 12797787 | <reponame>borley1211/aspy<filename>src/adasigpy/adasigpy.py
import numpy as np
from .domain.method import Method, init_w
from .interface.filter import AdaptiveSignalProcesserABC
class AdaptiveSignalProcesser(AdaptiveSignalProcesserABC):
def __init__(self, model, shape, mu, w_init, lambda_):
self.method = Method.methods[model]
self.mu = mu
self.w = init_w(w_init, shape)
self.lambda_ = lambda_
def adopt(self, d, x):
len_d, len_x = len(d), len(x)
if len_d != len_x:
raise ValueError(
f"2 arrays should have same length. But now, 'd' has {len_d} and 'x' has {len_x}."
)
self.method(d, x, self.w, self.mu, self.lambda_)
def update(self, d, x):
len_d, len_x = len(d), len(x)
if len_d != len_x:
raise ValueError(
f"2 arrays should have same length. But now, 'd' has {len_d} and 'x' has {len_x}."
)
w_delta = self.method(d, x, self.w, self.mu, self.lambda_)
self.w = self.w + w_delta
return self.w
| 2.515625 | 3 |
checkcel/checkerator.py | mboudet/validator | 0 | 12797788 | from openpyxl import Workbook
from checkcel.validators import OntologyValidator, SetValidator, LinkedSetValidator
from openpyxl.utils import get_column_letter
from checkcel.checkplate import Checkplate
class Checkerator(Checkplate):
def __init__(
self,
output,
**kwargs
):
super(Checkerator, self).__init__(**kwargs)
self.output = output
def generate(self):
wb = Workbook()
current_data_column = 1
current_ontology_column = 1
current_set_column = 1
current_readme_row = 1
readme_sheet = wb.active
readme_sheet.title = "README"
data_sheet = wb.create_sheet(title="Data")
ontology_sheet = None
set_sheet = None
set_columns = {}
for column_name, validator in self.validators.items():
readme_sheet.cell(column=1, row=current_readme_row, value=validator.describe(column_name))
current_readme_row += 1
data_sheet.cell(column=current_data_column, row=1, value=column_name)
if isinstance(validator, OntologyValidator):
if not ontology_sheet:
ontology_sheet = wb.create_sheet(title="Ontologies")
data_validation = validator.generate(get_column_letter(current_data_column), get_column_letter(current_ontology_column), ontology_sheet)
current_ontology_column += 1
elif isinstance(validator, SetValidator):
# Total size, including separators must be < 256
if sum(len(i) for i in validator.valid_values) + len(validator.valid_values) - 1 > 256:
if not set_sheet:
set_sheet = wb.create_sheet(title="Sets")
data_validation = validator.generate(get_column_letter(current_data_column), column_name, get_column_letter(current_set_column), set_sheet)
current_set_column += 1
else:
data_validation = validator.generate(get_column_letter(current_data_column))
set_columns[column_name] = get_column_letter(current_data_column)
elif isinstance(validator, LinkedSetValidator):
if not set_sheet:
set_sheet = wb.create_sheet(title="Sets")
data_validation = validator.generate(get_column_letter(current_data_column), set_columns, column_name, get_column_letter(current_set_column), set_sheet, wb)
current_set_column += 1
set_columns[column_name] = get_column_letter(current_data_column)
else:
data_validation = validator.generate(get_column_letter(current_data_column))
if data_validation:
data_sheet.add_data_validation(data_validation)
current_data_column += 1
for sheet in wb.worksheets:
for column_cells in sheet.columns:
length = (max(len(self.as_text(cell.value)) for cell in column_cells) + 2) * 1.2
sheet.column_dimensions[get_column_letter(column_cells[0].column)].width = length
wb.save(filename=self.output)
def as_text(self, value):
return str(value) if value is not None else ""
| 2.65625 | 3 |
utils.py | spchung/whenbots | 0 | 12797789 | import datetime
# some wrapper classes for binance websockets and api responses
class WsKline:
# takes in a websocket payload on init -> https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-streams
def __init__(self, ws_kline):
self.eventTime=datetime.datetime.fromtimestamp( float(ws_kline['E'])/1000 ).strftime("%m-%d-%Y %H:%M:%S")
self.symbol=ws_kline['s']
self.klineStartTime=datetime.datetime.fromtimestamp( float(ws_kline['k']['t'])/1000).strftime("%m-%d-%Y %H:%M:%S")
self.klineCloseTime=datetime.datetime.fromtimestamp( float(ws_kline['k']['c'])/1000).strftime("%m-%d-%Y %H:%M:%S")
self.interval=ws_kline['k']['i']
self.openPrice=float(ws_kline['k']['o'])
self.closePrice=float(ws_kline['k']['c'])
self.highPrice=float(ws_kline['k']['h'])
self.lowPrice=float(ws_kline['k']['l'])
self.baseAssetVolume=float(ws_kline['k']['v'])
self.numberOfTrades=int(ws_kline['k']['n'])
self.klineClosed=ws_kline['k']['x']
self.quoteAssetVolume=float(ws_kline['k']['q'])
self.takerBaseAssetVolume=float(ws_kline['k']['V'])
self.takerQuoteAssetVolume=float(ws_kline['k']['Q'])
def toDict(self):
d = dict()
d['time'] = self.klineCloseTime
d['open'] = self.openPrice
d['high'] = self.highPrice
d['low'] = self.lowPrice
d['close'] = self.closePrice
return d
| 2.671875 | 3 |
continuity/tasks.py | scast/continuity-docker | 0 | 12797790 | <filename>continuity/tasks.py
from dockermap.map.config import ClientConfiguration
from dockermap.api import MappingDockerClient
from fabric.api import (cd, env, execute, hide, local, prefix, prompt, puts,
roles, run, sudo, task)
from fabric.utils import abort
from fabric.colors import cyan, green, red, yellow
from fabric.contrib.console import confirm
from fabric.contrib.files import exists
@task
def build_image(image='production'):
if env.docker.build(**env.images[image]['build']) is None:
abort(red('Failed to build image {image}'.format(image=image)))
else:
print green('Successfully built image {image}'.format(image=image))
@task
def build(action='check'):
'''Step 1. Build artifacts (docker images) for all environments.'''
force = action == 'force'
check_changed = action == 'check'
with cd(env.project_path):
remote, dest_branch = env.remote_ref.split('/', 1)
changed_files = []
if check_changed:
with hide('running', 'stdout'):
changed_files = env.run('git diff-index --cached --name-only '
'{working_ref}'.format(**env),
capture=True).splitlines()
with open('images_rebuilt', 'w') as f:
for environment in env.manager.get_rebuild_steps('common',
changed_files,
force=force):
build_image(environment)
f.write('{}\n'.format(environment))
@task
def run_tests():
'''Step 2. Run tests and keep it real.'''
# Start the testing runner
container_map = env.container_map
map_client = env.map_client
info = map_client.startup('runner')
container_id = info[0][1]['Id']
# Wait on it to return
exit_status = env.docker.wait(container=container_id,
timeout=600)
# Clean up
logs = env.docker.logs(container=container_id)
for container in container_map.containers:
map_client.shutdown(container)
# Abort or succeed.
print logs
if exit_status == 0:
print green('All tests passed.')
else:
abort(red('Some tests failed!'))
@task
def push():
'''Step 3. Merge and push artifacts.'''
# TODO: push to registry
# Perform clean up
env.run('rm -rf images_rebuilt')
@task
def deploy():
'''Step 4. Deploy artifacts.'''
for cname in env.containers:
env.map_client.shutdown(cname)
env.map_client.startup(cname)
| 2.09375 | 2 |
control_produccion/migrations/0008_auto_20160808_1812.py | asapper/lito-produccion | 0 | 12797791 | <filename>control_produccion/migrations/0008_auto_20160808_1812.py
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-08-09 00:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('control_produccion', '0007_auto_20160623_1052'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='order_date_created',
),
migrations.RemoveField(
model_name='order',
name='order_machine',
),
migrations.RemoveField(
model_name='order_process',
name='order_process_datetime_finished',
),
migrations.RemoveField(
model_name='order_process',
name='order_process_datetime_pause_start',
),
migrations.RemoveField(
model_name='order_process',
name='order_process_datetime_started',
),
migrations.RemoveField(
model_name='order_process',
name='order_process_is_paused',
),
migrations.RemoveField(
model_name='order_process',
name='order_process_seconds_paused',
),
migrations.RemoveField(
model_name='order_process',
name='order_process_user_finished',
),
migrations.RemoveField(
model_name='order_process',
name='order_process_user_started',
),
migrations.AddField(
model_name='order',
name='order_sh_id',
field=models.PositiveSmallIntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='process',
name='process_group_sh_id',
field=models.PositiveSmallIntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='process',
name='process_sh_id',
field=models.PositiveSmallIntegerField(default=0),
preserve_default=False,
),
]
| 1.429688 | 1 |
api/get_numpy_array_from_dbase.py | fmidev/ml-feature-db | 0 | 12797792 | <gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
from configparser import ConfigParser
#from lib import mlfb
#from lib import mlfb_test4
from lib import mlfb
def main():
# Example script to use mlfb class
#a = mlfb_test4.mlfb_test4(1)
#a = mlfb.mlfb_test4(1)
a = mlfb.mlfb(1)
#input1=999
#input1=99.88
#input1=99,66
#input1=99.55
#input1='atest9988'
#input1=97
#input1='atest1188'
# (type_in, 'null', '20180226T165000',666,'testpara1',665))
#type_in,time_in,location_id_in,parameter_in,value_in
input_type='4test2288'
input_source='null'
input_time='20180226T165000'
input_location_id=455
input_parameter='test2para'
input_value=441
#a.insert_row_trains_1('test99')
#a.insert_row_trains_1(input_type,input_source,input_time,input_location_id,input_parameter,input_value)
#input_location_id=5
input_location_id=1
#input_parameter='temperature'
input_parameter='temperature'
input_value=-9
# get rows
a.get_rows_from_postgre_to_numpy(input_parameter,input_value)
if __name__=='__main__':
main() | 2.109375 | 2 |
setup.py | RLMarvin/RLBotTraining | 0 | 12797793 | <gh_stars>0
import setuptools
import importlib
# Avoid native import statements as we don't want to depend on the package being created yet.
def load_module(module_name, full_path):
spec = importlib.util.spec_from_file_location(module_name, full_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
version = load_module("rlbottraining.version", "rlbottraining/version.py")
paths = load_module("rlbottraining.paths", "rlbottraining/paths.py")
with open("README.md", "r") as readme_file:
long_description = readme_file.read()
setuptools.setup(
name='rlbottraining',
packages=setuptools.find_packages(),
install_requires=[
'rlbot>=1.25.0',
'docopt',
'watchdog',
'numpy',
],
python_requires='>=3.7.0',
version=version.__version__,
description='A framework for writing training for Rocket League bots.',
long_description=long_description,
long_description_content_type="text/markdown",
author='RLBot Community',
author_email='<EMAIL>',
url='https://github.com/RLBot/RLBotTraining',
keywords=['rocket-league', 'training', 'train'],
license='MIT License',
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: Microsoft :: Windows",
],
entry_points={
# Allow people to run `rlbottraining` instead of `python -m rlbottraining`
'console_scripts': ['rlbottraining = rlbottraining.__main__:main']
},
package_data={
'rlbottraining': [
f'{paths._match_config_dir}/*.cfg',
f'{paths._example_bot_dir}/*/*.cfg',
str(paths._website_static_source),
str(paths._example_rl_custom_training_json),
]
},
)
| 1.671875 | 2 |
check_normals.py | panmari/seeing3d | 56 | 12797794 | <reponame>panmari/seeing3d<filename>check_normals.py
#!/usr/bin/env python
"""
see if normals are plausible by looking at their norm.
"""
import sys
import Image
import numpy as np
import matplotlib.pyplot as pl
img_fname = sys.argv[1]
img = Image.open(img_fname)
imgarr = np.array(img).astype(np.float)/255.
print imgarr.shape
imgarr2 = 2.*imgarr - 1.
imgarr3 = (imgarr2*imgarr2).sum(2)
mask = ~(imgarr3==3.0)
imgarr3 *= mask
imgarr3 = np.sqrt(imgarr3)
print imgarr3.min(), imgarr3.max()
pl.matshow(imgarr3)
pl.colorbar()
pl.show()
#print imgarr2.min(), imgarr2.max()
| 2.46875 | 2 |
Instances/Final-Task/processFile.py | namespace-Pt/Algorithm | 0 | 12797795 | path_save = r'D:\Data\Class_data\Alg_data\FinalTask\l_1.txt'
path = r'D:\Data\Class_data\Alg_data\FinalTask\F.txt'
with open(path,'r',encoding='utf-8',errors='ignore') as f:
string = ''
# for line in f:
# string += line.strip()
# except:
# continue
string = f.read()
print(len(string))
g = open(path_save,'w')
g.write(string)
g.close() | 2.8125 | 3 |
convertmask/utils/auglib/optional/resize.py | wwdok/mask2json | 27 | 12797796 | <filename>convertmask/utils/auglib/optional/resize.py
'''
lanhuage: python
Descripttion:
version: beta
Author: xiaoshuyui
Date: 2020-10-26 08:31:13
LastEditors: xiaoshuyui
LastEditTime: 2020-11-20 14:12:40
'''
import os
import xml.etree.ElementTree as ET
import numpy as np
import skimage
from convertmask.utils.methods.logger import logger
from convertmask.utils.xml2yolo.xml2yolo import convert as x2yVert
from convertmask.utils.yolo2xml.yolo2xml import convert as y2xVert
def resize_img(img: np.ndarray, heightFactor=1, widthFactor=1):
if isinstance(img,str):
img = skimage.io.imread(img)
imgShape = img.shape
resizedImg = skimage.transform.resize(
img, (int(heightFactor * imgShape[0]), int(widthFactor * imgShape[1])))
return np.array(resizedImg * 255).astype(np.uint8)
# def resize_xml(xmlpath:str, heightFactor=1, widthFactor=1):
# pass
def resizeScript(img, xmlpath: str, heightFactor=1, widthFactor=1,flag=True):
if isinstance(img, str) and os.path.exists(img):
oriImg = skimage.io.imread(img)
elif isinstance(img, np.ndarray):
oriImg = img
else:
logger.error('Input error!')
return
in_file = open(xmlpath)
tree = ET.parse(in_file)
root = tree.getroot()
parentPath, xmlFilename = os.path.split(xmlpath)
xf, _ = os.path.splitext(xmlFilename)
savePath = parentPath + os.sep + xf + '_reshape.xml'
root.find('filename').text = xf + '_reshape.jpg'
root.find('path').text = parentPath + os.sep + xf + '_reshape.jpg'
resizeImg = resize_img(oriImg, heightFactor, widthFactor)
resizeImgShape = resizeImg.shape
width = int(resizeImgShape[1])
height = int(resizeImgShape[0])
size = root.find('size')
size.find('width').text = str(width)
size.find('height').text = str(height)
for obj in root.iter('object'):
xmlbox = obj.find('bndbox')
b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text),
float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text))
# print('===========================')
# print(b)
bb = x2yVert((oriImg.shape[1], oriImg.shape[0]), b)
x, y, w, h = bb[0], bb[1], bb[2], bb[3]
# print(x, y, w, h)
# w = w
# h = h
bbox = y2xVert((resizeImgShape[1],resizeImgShape[0]), x, y, w, h)
# print(bbox)
# print('===========================')
xmlbox.find('xmin').text = str(int(bbox[0]))
xmlbox.find('ymin').text = str(int(bbox[2]))
xmlbox.find('xmax').text = str(int(bbox[1]))
xmlbox.find('ymax').text = str(int(bbox[3]))
if flag: # save file
tree.write(savePath)
in_file.close()
return resizeImg, savePath
else:
return tree
| 2.5625 | 3 |
docker_loader/image.py | msiedlarek/docker-loader | 4 | 12797797 | <filename>docker_loader/image.py<gh_stars>1-10
import gzip
import shutil
import logging
logger = logging.getLogger(__name__)
class Image:
def __init__(self, client, id):
self.client = client
self.id = id
self.repository_tag = None
def __str__(self):
return self.repository_tag or self.id[:12]
def tag(self, repository, tag=None, force=None, **kwargs):
if tag is None:
tag = 'latest'
if force is None and tag == 'latest':
force = True
logger.info("Tagging image {image} as {repository}:{tag}...".format(
image=self,
repository=repository,
tag=tag
))
self.client.tag(
self.id,
repository=repository,
tag=tag,
force=force,
**kwargs
)
self.repository_tag = ':'.join((repository, tag))
return self
def remove(self, **kwargs):
logger.info("Removing image: {}".format(self))
self.client.remove_image(self.id, **kwargs)
def get(self):
if self.repository_tag:
return self.client.get_image(self.repository_tag)
else:
return self.client.get_image(self.id)
def save(self, path, compress=False):
logger.info("Saving image {image} to: {file}".format(
image=self,
file=path
))
if compress:
open_func = gzip.open
else:
open_func = open
with open_func(path, 'wb') as output:
shutil.copyfileobj(self.get(), output)
return self
| 2.515625 | 3 |
authors/apps/comments/models.py | MuhweziDeo/Ah-backend-xmen | 4 | 12797798 | from django.dispatch import receiver
from django.db.models.signals import pre_save
from django.db import models
from authors.apps.articles.models import Article
from authors.apps.profiles.models import Profile
from simple_history.models import HistoricalRecords
class Comment(models.Model):
"""
Handles CRUD on a comment that has been made on article
"""
body=models.TextField(max_length=500)
createdAt=models.DateTimeField(auto_now_add=True)
updatedAt=models.DateTimeField(auto_now=True)
highlight_start = models.PositiveIntegerField(null=True, blank=True)
highlight_end = models.PositiveIntegerField(null=True, blank=True)
highlight_text = models.TextField(max_length=500, null=True)
author=models.ForeignKey(Profile,on_delete=models.CASCADE, related_name='authored_by')
article=models.ForeignKey(Article,on_delete=models.CASCADE, related_name='article')
comment_history = HistoricalRecords()
class Meta:
ordering=['-createdAt']
def __str__(self):
return self.body
class CommentReply(models.Model):
"""
Handles replying on a specific comment by made on an article
"""
comment=models.ForeignKey(Comment,on_delete=models.CASCADE,related_name='replies')
reply_body=models.TextField()
repliedOn=models.DateTimeField(auto_now_add=True)
updatedOn=models.DateTimeField(auto_now=True)
author=models.ForeignKey(Profile,on_delete=models.CASCADE)
reply_history = HistoricalRecords()
class Meta:
ordering=['repliedOn']
def __str__(self):
return self.reply_body
class CommentLike(models.Model):
"""
Handles liking of a specific user by an authenticated user
"""
comment=models.ForeignKey(Comment,on_delete=models.CASCADE)
like_status=models.BooleanField()
liked_by=models.ForeignKey(Profile,on_delete=models.CASCADE)
def __str__(self):
return "like by {}".format(self.liked_by)
class CommentReplyLike(models.Model):
"""
Holds data for liking reply made a comment
"""
liked=models.BooleanField()
reply_like_by=models.ForeignKey(Profile,on_delete=models.CASCADE)
comment_reply=models.ForeignKey(CommentReply,on_delete=models.CASCADE)
def __str__(self):
return "reply liked by {}".format(self.reply_like_by)
| 2.21875 | 2 |
example_tf_loader_def.py | chunish/c- | 0 | 12797799 | # _*_ coding: utf-8 _*_
import os
import time
import numpy as np
import tensorflow as tf
class loader(object):
def __init__(self, batch_size=1, img_width=0, img_height=0):
self.batch_size = batch_size
self.img_w = img_width
self.img_h = img_height
self.batch_total = 0
def load_csv(self, csv_path):
paths = []
labels = []
only_path = True
with open(csv_path, 'rb') as ff:
for i,d in enumerate(ff):
if only_path:
path = d[:-1]
label = i
else:
path, label = d.split(',')
path = path[:-1]
paths.append(path)
labels.append(label)
self.batch_total = len(labels) // self.batch_size + 1
return paths, labels
def run(self, csv_path):
paths_list, labels_list = self.load_csv(csv_path)
img_queue, label_queue = tf.train.slice_input_producer([paths_list, labels_list], shuffle=True)
img_content = tf.read_file(img_queue)
img_data = tf.image.decode_jpeg(img_content, channels=3)
img_resize = tf.image.resize_images(img_data, [self.img_w, self.img_h])
img_standard = tf.image.per_image_standardization(img_resize)
img_batch, label_batch = tf.train.batch([img_standard, label_queue], batch_size=self.batch_size)
return img_batch, label_batch
if __name__ == '__main__':
IMG_WIDTH = 1200
IMG_HEIGHT = 1600
MAX_EPOCH = 1000
img_path = '/home/kcadmin/datasets/img_list.csv'
data = loader(batch_size=90, img_width=IMG_WIDTH, img_height=IMG_HEIGHT)
ibatch, lbatch = data.run(csv_path=img_path)
with tf.Session() as sess:
coord = tf.train.Coordinator()
thread = tf.train.start_queue_runners(sess=sess, coord=coord)
for epoch in range(MAX_EPOCH):
for batch in range(data.batch_total):
i, l = sess.run([ibatch, lbatch])
print '{}/{}, {}/{}: {},{}'.format(batch, data.batch_total, epoch, MAX_EPOCH, len(l), i.shape)
coord.request_stop()
coord.join(thread)
| 2.59375 | 3 |
deepnlpf/core/execute.py | deepnlpf/deepnlpf | 3 | 12797800 | <filename>deepnlpf/core/execute.py<gh_stars>1-10
# -*- coding: utf-8 -*-
import deepnlpf.log as log
class Execute (object):
""" Execute Scripts External in Outher Language Programation. """
def __init__(self):
pass
def run_r(self, script, *args):
import rpy2.robjects as ro
r = ro.r
r.source(script)
return r.main(*args)
def run_java(self, jar_file, *args):
try:
import subprocess
return subprocess.check_output(['java', '-jar', jar_file, *args], shell=False)
except Exception as err:
log.logger.error(err) | 2.546875 | 3 |
website/basics/forms.py | apoorv-x12/Django-Arunachala | 0 | 12797801 | from django import forms
class InterestForm(forms.Form):
amount=forms.FloatField(label='Amount')
rate=forms.FloatField(label="Interest rate" ,min_value=5 ,max_value=50)
| 2.625 | 3 |
streamz_ml/tests/test_stream_estimator.py | remiadon/streamz-ml | 7 | 12797802 | <filename>streamz_ml/tests/test_stream_estimator.py
import pytest
from itertools import product
from streamz_ml import StreamEstimator
import numpy as np
from streamz import Stream
import pandas as pd
from streamz.dataframe import DataFrame, Series
from streamz.utils_test import wait_for, await_for
def test_no_fit_method():
class MyEstimator():
def predict(self, X):
return X
class MyStreamingEstimator(MyEstimator, StreamEstimator): pass
with pytest.raises(TypeError):
my_est = MyStreamingEstimator()
def test_no_predict_method():
class MyEstimator():
def fit(self, X):
return X
class MyStreamingEstimator(MyEstimator, StreamEstimator): pass
with pytest.raises(TypeError):
my_est = MyStreamingEstimator()
def test_check_method():
class MyEstimator(StreamEstimator):
def partial_fit(self, X, y): pass
def predict(self, X): pass
my_est = MyEstimator()
with pytest.raises(AttributeError):
my_est._check_method('score')
my_est._check_method('partial_fit')
my_est._check_method('predict')
def test_stream_inputs():
wrong_Xs = (pd.DataFrame(), None, pd.np.array([]), list())
wrong_ys = (pd.DataFrame(), pd.Series(), None, pd.np.array([]), list())
wrong_entries = product(wrong_Xs, wrong_ys)
for X, y in wrong_entries:
with pytest.raises(AssertionError):
StreamEstimator._check_stream_inputs(X, y)
class MyEstimator():
def __init__(self):
self.fit_ctr = 0
self.predict_ctr = 0
def partial_fit(self, X, y):
self.fit_ctr += 1
return self
def predict(self, X):
self.predict_ctr += 1
return np.ones(X.shape[0])
class MyStreamingEstimator(MyEstimator, StreamEstimator): pass
def test_stream_partial_fit():
X_example, y_example = pd.DataFrame({'name': [], 'amount': []}), pd.Series([])
X_stream, y_stream = Stream(), Stream()
X, y = DataFrame(X_stream, example=X_example), Series(y_stream, example=y_example)
model = MyStreamingEstimator()
fit_results = model.stream_partial_fit(X, y)
fit_ctr_list = []
fit_results.map(lambda model: model.fit_ctr).sink(fit_ctr_list.append)
n_fits = 10
for i in range(n_fits):
X_stream.emit(X_example)
y_stream.emit(y_example)
predicate = lambda: (list(range(1, n_fits + 1)) == fit_ctr_list)
wait_for(predicate, .2)
def test_stream_predict():
n_rows = 100
X_example = pd.DataFrame({'name': [None] * n_rows, 'amount': [None] * n_rows})
X_stream = Stream()
X = DataFrame(X_stream, example=X_example)
model = MyStreamingEstimator()
example_data = pd.Series(pd.np.ones(X_example.shape[0]))
pred_series = model.stream_predict(X, y_example=pd.Series(example_data))
pred_df = model.stream_predict(X, y_example=pd.DataFrame(data=example_data))
pred_series_list, pred_df_list = [], []
pred_series.stream.sink(pred_series_list.append)
pred_df.stream.sink(pred_df_list.append)
n_fits = 10
for i in range(n_fits):
X_stream.emit(X_example)
ctr_predicate = lambda: (model.predict_ctr == n_fits)
target_predictions = np.ones((X_example.shape[0], n_fits))
pred_series_predicate = \
lambda: pd.np.array_equal(pd.np.concatenate(pred_series_list).reshape(-1), target_predictions.reshape(-1))
pred_df_predicate = \
lambda: pd.np.array_equal(pd.np.concatenate(pred_df_list).reshape(-1), target_predictions.reshape(-1))
await_for(ctr_predicate, .1)
await_for(pred_series_predicate, .1)
await_for(pred_df_predicate, .1)
def test_score_stream():
class MyEstimator(StreamEstimator):
def partial_fit(self, X, y): pass
def predict(self, X): pass
def score(self, X, y):
return 1
n_rows = 20
X_example, y_example = pd.DataFrame({'name': [None] * n_rows, 'amount': [None] * n_rows}), pd.Series([])
X_stream, y_stream = Stream(), Stream()
X, y = DataFrame(X_stream, example=X_example), Series(y_stream, example=y_example)
model = MyEstimator()
score_stream = model.stream_score(X, y)
score_list = list()
score_stream.stream.sink(score_list.append)
score_predicate = lambda: score_list == [1] * n_rows
await_for(score_predicate, .1) | 2.421875 | 2 |
tests/test_geckodriver.py | ramonmedeiros/geckodriver-binary | 0 | 12797803 | from selenium import webdriver
import geckodriver_binary # Adds geckodriver binary to path
def test_driver():
driver = webdriver.Firefox()
driver.get("http://www.python.org")
assert "Python" in driver.titl
driver.quit()
| 2.53125 | 3 |
Connector/info/__init__.py | bridgedragon/NodeChain | 0 | 12797804 | <reponame>bridgedragon/NodeChain
#!/usr/bin/python
from . import endpoints
| 0.832031 | 1 |
bapsflib/utils/exceptions.py | BaPSF/bapsflib | 11 | 12797805 | # This file is part of the bapsflib package, a Python toolkit for the
# BaPSF group at UCLA.
#
# http://plasma.physics.ucla.edu/
#
# Copyright 2017-2018 <NAME> and contributors
#
# License: Standard 3-clause BSD; see "LICENSES/LICENSE.txt" for full
# license terms and contributor agreement.
#
"""Exceptions specific to `bapsflib`."""
__all__ = [
"HDFMappingError",
"HDFReadControlError",
"HDFReadDigiError",
"HDFReadMSIError",
"HDFReadError",
]
class HDFMappingError(Exception):
"""Exception for failed HDF5 mappings"""
def __init__(self, device_name: str, why=""):
super().__init__(f"'{device_name}' mapping failed: {why}")
class HDFReadError(Exception):
"""Exception for failed HDF5 reading"""
pass
class HDFReadDigiError(HDFReadError):
"""Exception for failed HDF5 reading of digitizer."""
pass
class HDFReadControlError(HDFReadError):
"""Exception for failed HDF5 reading of digitizer."""
pass
class HDFReadMSIError(HDFReadError):
"""Exception for failed HDF5 reading of digitizer."""
pass
| 2.078125 | 2 |
src/create_db.py | keshik6/deep-image-retrieval | 31 | 12797806 | <gh_stars>10-100
from tqdm import tqdm
import torch
import gc
import os
import numpy as np
from sklearn.metrics import cohen_kappa_score
from model import TripletNet, create_embedding_net
from dataset import QueryExtractor, EmbeddingDataset
from torchvision import transforms
import torchvision.models as models
from torch.utils.data import DataLoader
from utils import perform_pca_on_single_vector
def create_embeddings_db_pca(model_weights_path, img_dir, fts_dir):
"""
Given a model weights path, this function creates a triplet network, loads the parameters and generates the dimension
reduced (using pca) vectors and save it in the provided feature directory.
Args:
model_weights_path : path of trained weights
img_dir : directory that holds the images
fts_dir : directory to store the embeddings
Returns:
None
Eg run:
create_embeddings_db_pca("./weights/oxbuild-exp-3.pth", img_dir="./data/oxbuild/images/", fts_dir="./fts_pca/oxbuild/")
"""
# Create cuda parameters
use_cuda = torch.cuda.is_available()
np.random.seed(2019)
torch.manual_seed(2019)
device = torch.device("cuda" if use_cuda else "cpu")
print("Available device = ", device)
# Create transforms
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
transforms_test = transforms.Compose([transforms.Resize(460),
transforms.FiveCrop(448),
transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
transforms.Lambda(lambda crops: torch.stack([transforms.Normalize(mean=mean, std=std)(crop) for crop in crops])),
])
# Creat image database
if "paris" in img_dir:
print("> Blacklisted images must be removed")
blacklist = ["paris_louvre_000136.jpg",
"paris_louvre_000146.jpg",
"paris_moulinrouge_000422.jpg",
"paris_museedorsay_001059.jpg",
"paris_notredame_000188.jpg",
"paris_pantheon_000284.jpg",
"paris_pantheon_000960.jpg",
"paris_pantheon_000974.jpg",
"paris_pompidou_000195.jpg",
"paris_pompidou_000196.jpg",
"paris_pompidou_000201.jpg",
"paris_pompidou_000467.jpg",
"paris_pompidou_000640.jpg",
"paris_sacrecoeur_000299.jpg",
"paris_sacrecoeur_000330.jpg",
"paris_sacrecoeur_000353.jpg",
"paris_triomphe_000662.jpg",
"paris_triomphe_000833.jpg",
"paris_triomphe_000863.jpg",
"paris_triomphe_000867.jpg",]
files = os.listdir(img_dir)
for blacklisted_file in blacklist:
files.remove(blacklisted_file)
QUERY_IMAGES = [os.path.join(img_dir, file) for file in sorted(files)]
else:
QUERY_IMAGES = [os.path.join(img_dir, file) for file in sorted(os.listdir(img_dir))]
# Create dataset
eval_dataset = EmbeddingDataset(img_dir, QUERY_IMAGES, transforms=transforms_test)
eval_loader = DataLoader(eval_dataset, batch_size=1, num_workers=0, shuffle=False)
# Create embedding network
resnet_model = create_embedding_net()
model = TripletNet(resnet_model)
model.load_state_dict(torch.load(model_weights_path))
model.to(device)
model.eval()
# Create features
with torch.no_grad():
for idx, image in enumerate(tqdm(eval_loader)):
# Move image to device and get crops
image = image.to(device)
bs, ncrops, c, h, w = image.size()
# Get output
output = model.get_embedding(image.view(-1, c, h, w))
output = output.view(bs, ncrops, -1).mean(1).cpu().numpy()
# Perform pca
output = perform_pca_on_single_vector(output)
# Save fts
img_name = (QUERY_IMAGES[idx].split("/")[-1]).replace(".jpg", "")
save_path = os.path.join(fts_dir, img_name)
np.save(save_path, output.flatten())
del output, image
gc.collect()
# if __name__ == '__main__':
# create_embeddings_db_pca("./weights/oxbuild-exp-3.pth", img_dir="./data/oxbuild/images/", fts_dir="./fts_pca/oxbuild/") | 2.203125 | 2 |
ansible_nwd/utilities/utilities.py | VasseurLaurent/ansible-nwd | 8 | 12797807 | <reponame>VasseurLaurent/ansible-nwd
import glob
import os
def get_list_files(path,list_files):
for filename in glob.iglob(path + '**/*', recursive=True):
if os.path.isfile(filename):
relative_paths = os.path.relpath(filename, path)
list_files.append(relative_paths)
return list_files
| 2.5625 | 3 |
random surface growth/generate_anim_with_one_amoeba.py | ricsirke/simulations | 0 | 12797808 | <gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
N = 100
world = np.zeros((N, N))
def get_neighs(i,j):
neighs = []
if i+1 < N:
neighs.append([i+1,j])
if j+1 < N:
neighs.append([i,j+1])
if 0 <= i-1:
neighs.append([i-1,j])
if 0 <= j-1:
neighs.append([i,j-1])
return neighs
starting_point_coord = int(np.floor(N/2))
starting_point = [starting_point_coord, starting_point_coord]
amoebe = [starting_point]
amoebe_mark = 1
world[0,0] = amoebe_mark
perimeters = get_neighs(*starting_point)
####################################################################
fig = plt.figure()
im = plt.imshow(world)
pos_x = 0
pos_y = -8
player_score_text_handle = plt.text(pos_x, pos_y, "blocks: 0")
perimeter_score_text_handle = plt.text(0, -3, "perimeter blocks: 0")
def animate(i):
global perimeters, world, im
random_index = np.random.randint(0, len(perimeters))
random_perimeter = perimeters.pop(random_index)
print(random_perimeter)
neighs = get_neighs(*random_perimeter)
# filter inner points
new_perimeters = []
for neigh in neighs:
if world[neigh[0], neigh[1]] != amoebe_mark and neigh not in perimeters:
new_perimeters.append(neigh)
#######
perimeters = perimeters + new_perimeters
world[random_perimeter[0], random_perimeter[1]] = amoebe_mark
im.set_array(world)
player_score_text_handle.set_text("player: " + str(i))
perimeter_score_text_handle.set_text("perimeter:" + str(len(perimeters)))
interval_ms = 50
anim = FuncAnimation(fig, animate, frames=2000, interval = interval_ms, repeat = False)
plt.axis('off')
anim.save("anim.mp4")
####################################################################
#plt.show()
| 3.203125 | 3 |
system/models/collect.py | u-n-i-c-o-rn/jimi | 2 | 12797809 | <gh_stars>1-10
import time
from core.models import action, conduct, webui
from core import helpers, logging, cache, settings
class _collect(action._action):
limit = int()
# class _properties(webui._properties):
# def generate(self,classObject):
# formData = []
# formData.append({"type" : "input", "schemaitem" : "_id", "textbox" : classObject._id})
# formData.append({"type" : "input", "schemaitem" : "name", "textbox" : classObject.name})
# formData.append({"type" : "input", "schemaitem" : "limit", "textbox" : classObject.limit, "tooltip" : "Defines the number of events to collect before resuming"})
# formData.append({"type" : "checkbox", "schemaitem" : "enabled", "checked" : classObject.enabled})
# formData.append({"type" : "checkbox", "schemaitem" : "log", "checked" : classObject.log})
# formData.append({"type" : "input", "schemaitem" : "comment", "textbox" : classObject.comment})
# return formData
def __init__(self):
self.events = []
def doAction(self,data):
try:
if "skip" in data["flowData"]:
del data["flowData"]["skip"]
return { "result" : True, "rc" : 0 }
except KeyError:
pass
self.events.append(data["flowData"]["event"])
self.data = data
if self.limit != 0 and self.limit < len(self.events):
self.continueFlow()
# Returning false to stop flow continue
return { "result" : False, "rc" : 9 }
def continueFlow(self):
if self.events:
tempDataCopy = conduct.copyData(self.data)
tempDataCopy["flowData"]["event"] = self.events
tempDataCopy["flowData"]["skip"] = 1
self.events = []
tempDataCopy["flowData"]["eventStats"] = { "first" : True, "current" : 0, "total" : 1, "last" : True }
self.data["persistentData"]["system"]["conduct"].triggerHandler(self.data["flowData"]["flow_id"],tempDataCopy,flowIDType=True)
def postRun(self):
self.continueFlow()
| 2.03125 | 2 |
face_applications_dlib/face_headpose_dlib.py | AlbertaBeef/vitis_ai_python_examples | 1 | 12797810 | <reponame>AlbertaBeef/vitis_ai_python_examples<gh_stars>1-10
'''
Copyright 2021 Avnet Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# USAGE
# python face_headpose_dlib.py [--input 0] [--detthreshold 0.55] [--nmsthreshold 0.35]
from ctypes import *
from typing import List
import cv2
import numpy as np
import vart
import pathlib
import xir
import os
import math
import threading
import time
import sys
import argparse
from imutils.video import FPS
sys.path.append(os.path.abspath('../'))
sys.path.append(os.path.abspath('./'))
from vitis_ai_vart.facedetect import FaceDetect
from vitis_ai_vart.facelandmark import FaceLandmark
from vitis_ai_vart.utils import get_child_subgraph_dpu
import dlib
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=False,
help = "input camera identifier (default = 0)")
ap.add_argument("-d", "--detthreshold", required=False,
help = "face detector softmax threshold (default = 0.55)")
ap.add_argument("-n", "--nmsthreshold", required=False,
help = "face detector NMS threshold (default = 0.35)")
args = vars(ap.parse_args())
if not args.get("input",False):
inputId = 0
else:
inputId = int(args["input"])
print('[INFO] input camera identifier = ',inputId)
if not args.get("detthreshold",False):
detThreshold = 0.55
else:
detThreshold = float(args["detthreshold"])
print('[INFO] face detector - softmax threshold = ',detThreshold)
if not args.get("nmsthreshold",False):
nmsThreshold = 0.35
else:
nmsThreshold = float(args["nmsthreshold"])
print('[INFO] face detector - NMS threshold = ',nmsThreshold)
# Initialize Vitis-AI/DPU based face detector
densebox_xmodel = "/usr/share/vitis_ai_library/models/densebox_640_360/densebox_640_360.xmodel"
densebox_graph = xir.Graph.deserialize(densebox_xmodel)
densebox_subgraphs = get_child_subgraph_dpu(densebox_graph)
assert len(densebox_subgraphs) == 1 # only one DPU kernel
densebox_dpu = vart.Runner.create_runner(densebox_subgraphs[0],"run")
dpu_face_detector = FaceDetect(densebox_dpu,detThreshold,nmsThreshold)
dpu_face_detector.start()
# Initialize Vitis-AI/DPU based face landmark
landmark_xmodel = "/usr/share/vitis_ai_library/models/face_landmark/face_landmark.xmodel"
landmark_graph = xir.Graph.deserialize(landmark_xmodel)
landmark_subgraphs = get_child_subgraph_dpu(landmark_graph)
assert len(landmark_subgraphs) == 1 # only one DPU kernel
landmark_dpu = vart.Runner.create_runner(landmark_subgraphs[0],"run")
dpu_face_landmark = FaceLandmark(landmark_dpu)
dpu_face_landmark.start()
# Initialize DLIB based face detector
dlib_face_detector = dlib.get_frontal_face_detector()
# Initialize DLIB based face landmark
dlib_landmark_model = "./models/shape_predictor_68_face_landmarks.dat"
dlib_face_landmark = dlib.shape_predictor(dlib_landmark_model)
# algorithm selection
use_dlib_detection = False
use_dlib_landmarks = True
print("[INFO] face detection = VART")
print("[INFO] face landmarks = DLIB")
# Initialize the camera input
print("[INFO] starting camera input ...")
cam = cv2.VideoCapture(inputId)
cam.set(cv2.CAP_PROP_FRAME_WIDTH,640)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT,480)
if not (cam.isOpened()):
print("[ERROR] Failed to open camera ", inputId )
exit()
# 3D model points.
model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip
(0.0, -330.0, -65.0), # Chin
(-225.0, 170.0, -135.0), # Left eye left corner
(225.0, 170.0, -135.0), # Right eye right corne
(-150.0, -150.0, -125.0), # Left Mouth corner
(150.0, -150.0, -125.0) # Right mouth corner
])
# Camera internals
ret,frame = cam.read()
size=frame.shape
focal_length = size[1]
center = (size[1]/2, size[0]/2)
camera_matrix = np.array(
[[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]], dtype = "double"
)
print("[INFO] Camera Matrix :\n {0}".format(camera_matrix));
# start the FPS counter
fps = FPS().start()
# init the real-time FPS display
rt_fps_count = 0;
rt_fps_time = cv2.getTickCount()
rt_fps_valid = False
rt_fps = 0.0
rt_fps_message = "FPS: {0:.2f}".format(rt_fps)
rt_fps_x = 10
rt_fps_y = size[0]-10
# loop over the frames from the video stream
while True:
# Update the real-time FPS counter
if rt_fps_count == 0:
rt_fps_time = cv2.getTickCount()
# Capture image from camera
ret,frame = cam.read()
dlib_image = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
faces = []
if use_dlib_detection == False:
# Vitis-AI/DPU based face detector
faces = dpu_face_detector.process(frame)
#print(faces)
if use_dlib_detection == True:
# DLIB based face detector
dlib_faces = dlib_face_detector(dlib_image, 0)
for face in dlib_faces:
faces.append( (face.left(),face.top(),face.right(),face.bottom()) )
#print(faces)
# loop over the faces
for i,(left,top,right,bottom) in enumerate(faces):
# draw a bounding box surrounding the object so we can
# visualize it
cv2.rectangle( frame, (left,top), (right,bottom), (0,255,0), 2)
# extract the face ROI
startX = int(left)
startY = int(top)
endX = int(right)
endY = int(bottom)
#print( startX, endX, startY, endY )
widthX = endX-startX
heightY = endY-startY
face = frame[startY:endY, startX:endX]
if use_dlib_landmarks == False:
# extract face landmarks
landmarks = dpu_face_landmark.process(face)
# calculate coordinates for full frame
for i in range(5):
landmarks[i,0] = startX + landmarks[i,0]*widthX
landmarks[i,1] = startY + landmarks[i,1]*heightY
# draw landmarks
#for i in range(5):
# x = int(landmarks[i,0])
# y = int(landmarks[i,1])
# cv2.circle( frame, (x,y), 3, (255,255,255), 2)
# prepare 2D points
image_points = np.array([
(landmarks[2,0], landmarks[2,1]), # Nose tip
(landmarks[2,0], landmarks[2,1]), # Chin (place-holder for now)
(landmarks[0,0], landmarks[0,1]), # Left eye left corner
(landmarks[1,0], landmarks[1,1]), # Right eye right corne
(landmarks[3,0], landmarks[3,1]), # Left Mouth corner
(landmarks[4,0], landmarks[4,1]) # Right mouth corner
], dtype="double")
# estimate approximate location of chin
# let's assume that the chin location will behave similar as the nose location
eye_center_x = (image_points[2][0] + image_points[3][0])/2;
eye_center_y = (image_points[2][1] + image_points[3][1])/2;
nose_offset_x = (image_points[0][0] - eye_center_x);
nose_offset_y = (image_points[0][1] - eye_center_y);
mouth_center_x = (image_points[4][0] + image_points[5][0])/2;
mouth_center_y = (image_points[4][1] + image_points[5][1])/2;
image_points[1] = (mouth_center_x + nose_offset_x, mouth_center_y + nose_offset_y);
#print(image_points)
if use_dlib_landmarks == True:
# extract face landmarks with DLIB
dlib_rect = dlib.rectangle( startX,startY,endX,endY )
dlib_landmarks = dlib_face_landmark(dlib_image,dlib_rect)
# draw landmarks
#for i in range(dlib_landmarks.num_parts):
# x = int(dlib_landmarks.part(i).x)
# y = int(dlib_landmarks.part(i).y)
# cv2.circle( frame, (x,y), 3, (255,255,255), 2)
# prepare 2D points
image_points = np.array([
(dlib_landmarks.part(30).x, dlib_landmarks.part(30).y), # Nose tip
(dlib_landmarks.part( 8).x, dlib_landmarks.part( 8).y), # Chin
(dlib_landmarks.part(36).x, dlib_landmarks.part(36).y), # Left eye left corner
(dlib_landmarks.part(45).x, dlib_landmarks.part(45).y), # Right eye right corne
(dlib_landmarks.part(48).x, dlib_landmarks.part(48).y), # Left Mouth corner
(dlib_landmarks.part(54).x, dlib_landmarks.part(54).y) # Right mouth corner
], dtype="double")
#print(image_points)
# calculate head pose
dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)
#print "Rotation Vector:\n {0}".format(rotation_vector)
#print "Translation Vector:\n {0}".format(translation_vector)
# Project a 3D point (0, 0, 1000.0) onto the image plane.
# We use this to draw a line sticking out of the nose
(nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs)
for p in image_points:
#cv2.circle(face, (int(p[0]), int(p[1])), 3, (0,0,255), -1)
#cv2.circle(face, (int(p[0]), int(p[1])), 3, (255,255,255), 2)
cv2.circle(frame, (int(p[0]), int(p[1])), 3, (255,255,255), 2)
# draw head pose vector
p1 = ( int(image_points[0][0]), int(image_points[0][1]))
p2 = ( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
cv2.line(frame, p1, p2, (255,0,0), 2)
# Display Status
status = "Status :"
status = status + " FaceDetect="
if use_dlib_detection == True:
status = status + "DLIB"
else:
status = status + "VART"
status = status + " Landmark="
if use_dlib_landmarks == True:
status = status + "DLIB"
else:
status = status + "VART"
if rt_fps_valid == True:
status = status + " " + rt_fps_message
cv2.putText(frame, status, (rt_fps_x,rt_fps_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA)
# Display the processed image
cv2.imshow("Head Pose Estimation", frame)
key = cv2.waitKey(1) & 0xFF
# Update the FPS counter
fps.update()
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# if the `d` key was pressed, toggle between detection algorithms
if key == ord("d"):
use_dlib_detection = not use_dlib_detection
if use_dlib_detection == True:
print("[INFO] face detection = DLIB")
else:
print("[INFO] face detection = VART")
# if the `l` key was pressed, toggle between landmark algorithms
if key == ord("l"):
use_dlib_landmarks = not use_dlib_landmarks
if use_dlib_landmarks == True:
print("[INFO] face landmarks = DLIB")
else:
print("[INFO] face landmarks = VART")
# Update the real-time FPS counter
rt_fps_count = rt_fps_count + 1
if rt_fps_count >= 10:
t = (cv2.getTickCount() - rt_fps_time)/cv2.getTickFrequency()
rt_fps_valid = True
rt_fps = 10.0/t
rt_fps_message = "FPS: {0:.2f}".format(rt_fps)
#print("[INFO] ",rt_fps_message)
rt_fps_count = 0
# Stop the timer and display FPS information
fps.stop()
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] elapsed FPS: {:.2f}".format(fps.fps()))
# Stop the face detector
dpu_face_detector.stop()
del densebox_dpu
dpu_face_landmark.stop()
del landmark_dpu
# Cleanup
cv2.destroyAllWindows()
| 2.1875 | 2 |
PyTeacher/bgui/progress_bar.py | Banshee1221/PyRob | 40 | 12797811 | from .gl_utils import *
from .widget import Widget, BGUI_DEFAULT
class ProgressBar(Widget):
"""A solid progress bar.
Controlled via the 'percent' property which assumes percent as a 0-1 floating point number."""
theme_section = 'ProgressBar'
theme_options = {
'FillColor1': (0.0, 0.42, 0.02, 1.0),
'FillColor2': (0.0, 0.42, 0.02, 1.0),
'FillColor3': (0.0, 0.42, 0.02, 1.0),
'FillColor4': (0.0, 0.42, 0.02, 1.0),
'BGColor1': (0, 0, 0, 1),
'BGColor2': (0, 0, 0, 1),
'BGColor3': (0, 0, 0, 1),
'BGColor4': (0, 0, 0, 1),
'BorderSize': 1,
'BorderColor': (0, 0, 0, 1),
}
def __init__(self, parent, name=None, percent=1.0, sub_theme='', aspect=None, size=[1, 1], pos=[0, 0], options=BGUI_DEFAULT):
"""
:param parent: the widget's parent
:param name: the name of the widget
:param percent: the initial percent
:param sub_theme: sub type of theme to use
:param aspect: constrain the widget size to a specified aspect ratio
:param size: a tuple containing the width and height
:param pos: a tuple containing the x and y position
:param options: various other options
"""
Widget.__init__(self, parent, name, aspect, size, pos, sub_theme, options)
theme = self.theme
self.fill_colors = [
theme['FillColor1'],
theme['FillColor2'],
theme['FillColor3'],
theme['FillColor4'],
]
self.bg_colors = [
theme['BGColor1'],
theme['BGColor2'],
theme['BGColor3'],
theme['BGColor4'],
]
self.border_color = theme['BorderColor']
self.border = theme['BorderSize']
self._percent = percent
@property
def percent(self):
return self._percent
@percent.setter
def percent(self, value):
self._percent = max(0.0, min(1.0, value))
def _draw(self):
"""Draw the progress bar"""
# Enable alpha blending
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
# Enable polygon offset
glEnable(GL_POLYGON_OFFSET_FILL)
glPolygonOffset(1.0, 1.0)
mid_x = self.gl_position[0][0] + (self.gl_position[1][0] - self.gl_position[0][0]) * self._percent
# Draw fill
glBegin(GL_QUADS)
glColor4f(self.fill_colors[0][0], self.fill_colors[0][1], self.fill_colors[0][2], self.fill_colors[0][3])
glVertex2f(self.gl_position[0][0], self.gl_position[0][1])
glColor4f(self.fill_colors[1][0], self.fill_colors[1][1], self.fill_colors[1][2], self.fill_colors[1][3])
glVertex2f(mid_x, self.gl_position[1][1])
glColor4f(self.fill_colors[2][0], self.fill_colors[2][1], self.fill_colors[2][2], self.fill_colors[2][3])
glVertex2f(mid_x, self.gl_position[2][1])
glColor4f(self.fill_colors[3][0], self.fill_colors[3][1], self.fill_colors[3][2], self.fill_colors[3][3])
glVertex2f(self.gl_position[3][0], self.gl_position[3][1])
glEnd()
# Draw bg
glBegin(GL_QUADS)
glColor4f(self.bg_colors[0][0], self.bg_colors[0][1], self.bg_colors[0][2], self.bg_colors[0][3])
glVertex2f(mid_x, self.gl_position[0][1])
glColor4f(self.bg_colors[1][0], self.bg_colors[1][1], self.bg_colors[1][2], self.bg_colors[1][3])
glVertex2f(self.gl_position[1][0], self.gl_position[1][1])
glColor4f(self.bg_colors[2][0], self.bg_colors[2][1], self.bg_colors[2][2], self.bg_colors[2][3])
glVertex2f(self.gl_position[2][0], self.gl_position[2][1])
glColor4f(self.bg_colors[3][0], self.bg_colors[3][1], self.bg_colors[3][2], self.bg_colors[3][3])
glVertex2f(mid_x, self.gl_position[3][1])
glEnd()
# Draw outline
glDisable(GL_POLYGON_OFFSET_FILL)
r, g, b, a = self.border_color
glColor4f(r, g, b, a)
glPolygonMode(GL_FRONT, GL_LINE)
glLineWidth(self.border)
glBegin(GL_QUADS)
for i in range(4):
glVertex2f(self.gl_position[i][0], self.gl_position[i][1])
glEnd()
glPolygonMode(GL_FRONT, GL_FILL)
Widget._draw(self)
| 2.859375 | 3 |
study_sample/enchance_oop/use_type/enhance_oop_type.py | dantefung/Python-Codebase | 0 | 12797812 | ############
# use_type
############
# type()
'''
动态语言和静态语言最大的不同,就是函数和类的定义,【不是编译时定义的,而是运行时动态创建的。】
'''
# 比方说我们要定义一个Hello的class,就写一个hello.py模块:
'''
class Hello(object):
def hello(self, name='world'):
print('Hello, %s.' % name)
'''
# 当Python解释器载入hello模块时,就会依次执行该模块的所有语句,执行结果就是动态创建出一个
# Hello的class对象,测试如下:
from hello import Hello
h = Hello()
h.hello()
print(type(Hello))
print(type(h))
'''
type()函数可以查看一个类型或变量的类型,Hello是一个class,它的类型就是type,而h是一个实例,它的类型就是class Hello.
我们说class的定义是运行时动态创建的,而创建class的方法就是使用type()函数。
type()函数既可以返回一个对象的类型,又可以创建出新的类型,比如,我们可以通过type()函数
创建出Hello类,而无需通过 class Hello(object)...的定义:
'''
def fn(self, name='world'): # 先定义函数
print('Hello, %s.' % name)
Hello = type('Hello', (object,), dict(hello=fn)) # 创建Hello class
'''
要创建一个class对象,type()函数一次传入3个参数:
1.class的名称;
2.继承的父类集合,注意Python支持多重继承,如果只有一个父类,别忘了tuple的单元素的写法;
3.class的方法名称与函数绑定,这里我们把函数fn绑定到方法名hello上。
通过type()函数创建的类和直接写class是完全一样的,因为Python解释器遇到class定义时,
仅仅是扫描一下class定义的语法,然后调用type()函数创建出class。
正常情况下,我们用class Xxx..来定义类,但是,type()函数也允许我们动态创建出类来,也就是说
动态语言本身支持动态创建类,这和静态语言有非常大的不同,要在静态语言运行时期创建类,必须构造源代码字符串
再调用编译器,或者借助一些工具生成字节码实现,本质上都是动态编译,会非常复杂。
'''
#################
# metaclass
#################
'''
除了使用type()动态创建类以外,要控制类的创建行为,还可以使用metaclass.
metaclass,直译为元类,简单的解释就是:
当我们定义了类以后,就可以根据这个类创建出实例,所以: 先定义类,然后创建实例。
但是如果我们想创建出类呢?那就必须根据metaclass创建出类,所以:先定义metaclass,然后创建类。
连接起来就是: 先定义metaclass,就可以创建类,最后创建实例。
所以,metaclass允许你创建类或者修改类。换句话说,你可以把类看成是metaclass创建出来的“实例”。
metaclass是Python面向对象里最难理解,也是最难使用的魔术代码。正常情况下,你不会碰到需要使用metaclass的情况,
所以,以下内容看不懂也没关系,因为基本上你不会用到。
我们先看一个简单的例子,这个metaclass可以给我们自定义的MyList增加一个add方法:
定义ListMetaclass,按照默认习惯,metaclass的类名重视以Metaclass结尾,以便清楚地标识这是一个metaclass:
'''
# metaclass是类的模板,所以必须从'type'类型派生:
class ListMetaclass(type):
def __new__(cls, name, bases, attrs):
attrs['add'] = lambda self, value: self.append(value)
return type.__new__(cls, name, bases, attrs)
'''
有了ListMetaclass,我们在定义类的时候还是指示要使用ListMetaclass来定制类,传入关键字参数metaclass:
'''
class MyList(list, metaclass=ListMetaclass):
pass
'''
当我们传入关键字参数metaclass时,魔术就生效了,
它指示Python解释器在创建MyList时,
要通过ListMetaclass.__new__()来创建。
在此,我们可以修改类的定义,比如,加上新的方法,然后,返回修改后的定义。
__new__()方法接收到的参数依次是:
1.当前准备创建的类的对象
2.类的名字
3.类继承的父类集合
4.类的方法集合
测试一下MyList是否可以调用add()方法:
'''
L = MyList()
L.add(1)
print(L)
'''
而普通的list没有add()方法:
'''
# L2 = list()
# L2.add(1)
'''
动态修改有什么意义?直接在MyList定义中写上add()方法不是更简单吗?
正常情况下,确实应该直接写,通过metaclass修改纯属变态。
但是,总会遇到需要通过metaclass修改类定义的。ORM就是一个典型的例子。
ORM全称“Object Relational Mapping”,即对象-关系映射,就是把关系数据库的一行映射为一个对象,也就是一个类对应一个表,这样,写代码更简单,不用直接操作SQL语句。
要编写一个ORM框架,所有的类都只能动态定义,因为只有使用者才能根据表的结构定义出对应的类来。
让我们来尝试编写一个ORM框架。
编写底层模块的第一步,就是先把调用接口写出来。比如,使用者如果使用这个ORM框架,想定义一个User类来操作对应的数据库表User,我们期待他写出这样的代码:
class User(Model):
# 定义类的属性到列的映射:
id = IntegerField('id')
name = StringField('username')
email = StringField('email')
password = StringField('password')
# 创建一个实例:
u = User(id=12345, name='Michael', email='<EMAIL>', password='<PASSWORD>')
# 保存到数据库:
u.save()
其中,父类Model和属性类型StringField、IntegerField是由ORM框架提供的,剩下的魔术方法比如save()全部由metaclass自动完成。虽然metaclass的编写会比较复杂,但ORM的使用者用起来却异常简单。
现在,我们就按上面的接口来实现该ORM。
首先来定义Field类,它负责保存数据库表的字段名和字段类型:
'''
class Field(object):
def __init__(self, name, column_type):
self.name = name
self.column_type = column_type
def __str__(self):
return '<%s:%s>' % (self.__class__.__name__, self.name)
'''
在Field的基础上,进一步定义各种类型的Field,比如StringField,IntegerField等等:
'''
class StringField(Field):
def __init__(self, name):
super(StringField, self).__init__(name, 'varchar(100)')
class IntegerField(Field):
def __init__(self, name):
super(IntegerField, self).__init__(name, 'bigint')
'''
下一步,就是编写最复杂的ModelMetaclass了:
'''
| 4.40625 | 4 |
tests/snc/agents/general_heuristics/test_priority_agents.py | dmcnamee/snc | 5 | 12797813 | import numpy as np
import pytest
import snc.environments.job_generators.discrete_review_job_generator \
as drjg
import snc.environments.controlled_random_walk as crw
import snc.environments.state_initialiser as si
import snc.agents.general_heuristics.random_nonidling_agent \
as random_nonidling_agent
import snc.agents.general_heuristics.longest_buffer_priority_agent \
as longest_priority_agent
import snc.agents.general_heuristics.custom_activity_priority_agent \
as custom_priority_agent
def get_null_env_params(state, num_resources=None, buffer_processing_matrix=None,
constituency_matrix=None):
num_buffers = state.shape[0]
arrival_rate = np.ones_like(state)
if num_resources is None:
num_resources = num_buffers
if buffer_processing_matrix is None:
buffer_processing_matrix = -np.triu(np.ones((num_buffers, num_resources)))
if constituency_matrix is None:
constituency_matrix = np.zeros((num_resources, num_resources))
time_interval = 1
return {
"cost_per_buffer": np.zeros_like(state),
"capacity": np.zeros_like(state),
"constituency_matrix": constituency_matrix,
"job_generator": drjg.DeterministicDiscreteReviewJobGenerator(
arrival_rate, buffer_processing_matrix, sim_time_interval=time_interval
),
"state_initialiser": si.DeterministicCRWStateInitialiser(state),
"job_conservation_flag": True,
"list_boundary_constraint_matrices": None,
}
def test_random_heuristic_agent_starving():
# Single server queue
safety_stock = 10.0
state = 5 * np.ones((1, 1))
env_params = get_null_env_params(state)
env_params["constituency_matrix"] = np.ones((1, 1))
env_params["list_boundary_constraint_matrices"] = [np.ones((1, 1))]
env = crw.ControlledRandomWalk(**env_params)
agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert action == np.zeros((1, 1))
def test_random_heuristic_agent():
# Single server queue
safety_stock = 1.0
state = 1.1 * np.ones((1, 1))
env_params = get_null_env_params(state)
env_params["constituency_matrix"] = np.ones((1, 1))
env_params["list_boundary_constraint_matrices"] = [np.ones((1, 1))]
env = crw.ControlledRandomWalk(**env_params)
agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert action == np.ones((1, 1))
def test_random_heuristic_agent_multiple_buffers_eye_condition_starving():
# Station scheduling three buffers, each of them having to be above safety stock
safety_stock = 10.0
state = 5 * np.ones((3, 1))
env_params = get_null_env_params(state)
env_params["constituency_matrix"] = np.ones((1, 3))
env_params["list_boundary_constraint_matrices"] = [np.eye(3)]
env = crw.ControlledRandomWalk(**env_params)
agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.all(action == np.zeros((3, 1)))
def test_random_heuristic_agent_multiple_buffers_eye_condition():
# Station scheduling three buffers, each of them having to be above safety stock
safety_stock = 1.0
state = 1.1 * np.ones((3, 1))
env_params = get_null_env_params(state)
env_params["constituency_matrix"] = np.ones((1, 3))
env_params["list_boundary_constraint_matrices"] = [np.eye(3)]
env = crw.ControlledRandomWalk(**env_params)
agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.sum(action) == 1
def test_random_heuristic_agent_multiple_buffers_sum_condition_starving():
# Station scheduling three buffers, the sum of their size having to be above safety stock
safety_stock = 10.0
state = 3 * np.ones((3, 1))
env_params = get_null_env_params(state)
env_params["constituency_matrix"] = np.ones((1, 3))
env_params["list_boundary_constraint_matrices"] = [np.ones((1, 3))]
env = crw.ControlledRandomWalk(**env_params)
agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.all(action == np.zeros((3, 1)))
def test_random_heuristic_agent_multiple_buffers_sum_condition():
# Station scheduling three buffers, the sum of their size having to be above safety stock
safety_stock = 10.0
state = 5 * np.ones((3, 1))
env_params = get_null_env_params(state)
env_params["constituency_matrix"] = np.ones((1, 3))
env_params["list_boundary_constraint_matrices"] = [np.ones((1, 3))]
env = crw.ControlledRandomWalk(**env_params)
agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.sum(action) == 1
def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond_starving():
# Two stations, each one scheduling two buffers, each of them having to be above safety stock.
safety_stock = 10.0
state = 5 * np.ones((4, 1))
env_params = get_null_env_params(state)
env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]])
env_params["list_boundary_constraint_matrices"] = [np.hstack((np.eye(2), np.zeros((2, 2)))),
np.hstack((np.zeros((2, 2)), np.eye(2)))]
env = crw.ControlledRandomWalk(**env_params)
agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.all(action == np.zeros((4, 1)))
def test_random_heuristic_agent_multiple_buffers_multiple_resources_eye_cond():
# Two stations, each one scheduling two buffers, each of them having to be above safety stock.
safety_stock = 9.9
state = 10 * np.ones((4, 1))
env_params = get_null_env_params(state)
env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]])
env_params["list_boundary_constraint_matrices"] = [np.hstack((np.eye(2), np.zeros((2, 2)))),
np.hstack((np.zeros((2, 2)), np.eye(2)))]
env = crw.ControlledRandomWalk(**env_params)
agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.sum(action) == 2
def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_starving():
# Two stations, each one scheduling two buffers, the sum of their size having to be above
# safety stock.
safety_stock = 10
state = 4 * np.ones((4, 1))
env_params = get_null_env_params(state)
env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1, 0, 0]]),
np.array([[0, 0, 1, 1]])]
env = crw.ControlledRandomWalk(**env_params)
agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.all(action == np.zeros((4, 1)))
def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond():
# Two stations, each one scheduling two buffers, the sum of their size having to be above safety
# stock.
safety_stock = 9.9
state = 5 * np.ones((4, 1))
env_params = get_null_env_params(state)
env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1, 0, 0]]),
np.array([[0, 0, 1, 1]])]
env = crw.ControlledRandomWalk(**env_params)
agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.sum(action) == 2
def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_1_starve():
# Two stations, each one scheduling two buffers, the sum of their size having to be above safety
# stock.
safety_stock = 9.9
state = np.array([4, 5, 5, 5])[:, None]
env_params = get_null_env_params(state)
env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1, 0, 0]]),
np.array([[0, 0, 1, 1]])]
env = crw.ControlledRandomWalk(**env_params)
agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.sum(action[2:4]) == 1 and np.all(action[0:2] == np.zeros((2, 1)))
def test_random_heuristic_agent_multiple_buffers_multiple_resources_sum_cond_2_starve():
# Two stations, each one scheduling two buffers, the sum of their size having to be above safety
# stock.
safety_stock = 9.9
state = np.array([5, 5, 5, 4])[:, None]
env_params = get_null_env_params(state)
env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1, 0, 0]]),
np.array([[0, 0, 1, 1]])]
env = crw.ControlledRandomWalk(**env_params)
agent = random_nonidling_agent.RandomNonIdlingAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.sum(action[0:2]) == 1 and np.all(action[2:4] == np.zeros((2, 1)))
def test_priority_nonidling_heuristic_agent_starving():
# Single server queue
buffer_processing_matrix = - np.ones((1, 1))
safety_stock = 10.0
state = 5 * np.ones((1, 1))
env_params = get_null_env_params(
state, buffer_processing_matrix=buffer_processing_matrix)
env_params["constituency_matrix"] = np.ones((1, 1))
env_params["list_boundary_constraint_matrices"] = [np.ones((1, 1))]
env = crw.ControlledRandomWalk(**env_params)
agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert action == np.zeros((1, 1))
def test_priority_nonidling_heuristic_agent():
# Single server queue
buffer_processing_matrix = - np.ones((1, 1))
safety_stock = 4.0
state = 5 * np.ones((1, 1))
env_params = get_null_env_params(
state, buffer_processing_matrix=buffer_processing_matrix)
env_params["constituency_matrix"] = np.ones((1, 1))
env_params["list_boundary_constraint_matrices"] = [np.ones((1, 1))]
env = crw.ControlledRandomWalk(**env_params)
agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock, name="LPAAgent")
action = agent.map_state_to_actions(state)
assert action == np.ones((1, 1))
def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_starving():
# One station scheduling two buffers, one larger than the other, but both below safety stock.
buffer_processing_matrix = - np.eye(2)
safety_stock = 10.0
state = np.array([9, 5])[:, None]
env_params = get_null_env_params(
state, buffer_processing_matrix=buffer_processing_matrix)
env_params["constituency_matrix"] = np.ones((1, 2))
env_params["list_boundary_constraint_matrices"] = [np.eye(2)]
env = crw.ControlledRandomWalk(**env_params)
agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.all(action == np.zeros((2, 1)))
def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_small_one_starve():
# One station scheduling two buffers, one larger than the other. Only the large one is above
# safety stock.
buffer_processing_matrix = - np.eye(2)
safety_stock = 10.0
state = np.array([9, 11])[:, None]
env_params = get_null_env_params(
state, buffer_processing_matrix=buffer_processing_matrix)
env_params["constituency_matrix"] = np.ones((1, 2))
env_params["list_boundary_constraint_matrices"] = [np.eye(2)]
env = crw.ControlledRandomWalk(**env_params)
agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.all(action == np.array([0, 1])[:, None])
def test_priority_nonidling_heuristic_agent_multi_buffers_eye_cond_small_one_starve_reverse_ord():
# One station scheduling two buffers, one larger than the other. Only the large one is above
# safety stock, swap order with respect to previous test.
buffer_processing_matrix = - np.eye(2)
safety_stock = 10.0
state = np.array([11, 10])[:, None]
env_params = get_null_env_params(
state, buffer_processing_matrix=buffer_processing_matrix)
env_params["constituency_matrix"] = np.ones((1, 2))
env_params["list_boundary_constraint_matrices"] = [np.eye(2)]
env = crw.ControlledRandomWalk(**env_params)
agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.all(action == np.array([1, 0])[:, None])
def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition():
# One station scheduling two buffers, one larger than the other, both above safety stock.
buffer_processing_matrix = - np.eye(2)
safety_stock = 10.0
state = np.array([30, 20])[:, None]
env_params = get_null_env_params(
state, buffer_processing_matrix=buffer_processing_matrix)
env_params["constituency_matrix"] = np.ones((1, 2))
env_params["list_boundary_constraint_matrices"] = [np.eye(2)]
env = crw.ControlledRandomWalk(**env_params)
agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.all(action == np.array([1, 0])[:, None])
def test_priority_nonidling_heuristic_agent_multiple_buffers_eye_condition_reverse_order():
# One station scheduling two buffers, one larger than the other, both above safety stock (swap
# order with previous test).
buffer_processing_matrix = - np.eye(2)
safety_stock = 10.0
state = np.array([20, 30])[:, None]
env_params = get_null_env_params(
state, buffer_processing_matrix=buffer_processing_matrix)
env_params["constituency_matrix"] = np.ones((1, 2))
env_params["list_boundary_constraint_matrices"] = [np.eye(2)]
env = crw.ControlledRandomWalk(**env_params)
agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.all(action == np.array([0, 1])[:, None])
def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_eye_condition():
# One station scheduling two buffers, both equal and above safety stock.
buffer_processing_matrix = - np.eye(2)
safety_stock = 10.0
state = np.array([11, 11])[:, None]
env_params = get_null_env_params(
state, buffer_processing_matrix=buffer_processing_matrix)
env_params["constituency_matrix"] = np.ones((1, 2))
env_params["list_boundary_constraint_matrices"] = [np.eye(2)]
env = crw.ControlledRandomWalk(**env_params)
agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.sum(action) == 1
def test_priority_nonidling_heuristic_agent_multiple_buffers_multiple_resources_sum_cond():
# Two stations, each one scheduling two buffers. The stations are connected in serial, such that
# buffer 1 is connected with buffer 3, and 2 with 4.
# Kind of condition doesn't matter since the largest buffer has to be above safety stock in this
# agent.
buffer_processing_matrix = np.array([[-1, 0, 0, 0],
[0, -1, 0, 0],
[1, 0, -1, 0],
[0, 1, 0, -1]])
safety_stock = 10
state = np.array([30, 20, 20, 30])[:, None]
env_params = get_null_env_params(
state, buffer_processing_matrix=buffer_processing_matrix)
env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1, 0, 0]]),
np.array([[0, 0, 1, 1]])]
env = crw.ControlledRandomWalk(**env_params)
agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.all(action == np.array([1, 0, 0, 1])[:, None])
def test_priority_nonidling_heuristic_agent_multi_buffers_and_resources_sum_cond_reverse_order():
# Two stations, each one scheduling two buffers. The stations are connected in serial, such that
# buffer 1 is connected with buffer 3, and 2 with 4.
# Kind of condition doesn't matter since the largest buffer has to be above safety stock in this
# agent.
buffer_processing_matrix = np.array([[-1, 0, 0, 0],
[0, -1, 0, 0],
[1, 0, -1, 0],
[0, 1, 0, -1]])
safety_stock = 10
state = np.array([20, 30, 30, 20])[:, None]
env_params = get_null_env_params(
state, buffer_processing_matrix=buffer_processing_matrix)
env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1, 0, 0]]),
np.array([[0, 0, 1, 1]])]
env = crw.ControlledRandomWalk(**env_params)
agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.all(action == np.array([0, 1, 1, 0])[:, None])
def test_priority_nonidling_heuristic_agent_multiple_buffers_and_resources_sum_cond_2_starve():
# Two stations, each one scheduling two buffers. The stations are connected in serial, such that
# buffer 1 is connected with buffer 3, and 2 with 4.
# Kind of condition doesn't matter since the largest buffer has to be above safety stock in this
# agent.
buffer_processing_matrix = np.array([[-1, 0, 0, 0],
[0, -1, 0, 0],
[1, 0, -1, 0],
[0, 1, 0, -1]])
safety_stock = 10
state = np.array([30, 20, 9, 5])[:, None]
env_params = get_null_env_params(
state, buffer_processing_matrix=buffer_processing_matrix)
env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1, 0, 0]]),
np.array([[0, 0, 1, 1]])]
env = crw.ControlledRandomWalk(**env_params)
agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.all(action == np.array([1, 0, 0, 0])[:, None])
def test_priority_nonidling_heuristic_agent_multiple_largest_buffers_multiple_resources_sum_cond():
# Two stations, each one scheduling two buffers. The stations are connected in serial, such that
# buffer 1 is connected with buffer 3, and 2 with 4.
# Kind of condition doesn't matter since the largest buffer has to be above safety stock in this
# agent.
buffer_processing_matrix = np.array([[-1, 0, 0, 0],
[0, -1, 0, 0],
[1, 0, -1, 0],
[0, 1, 0, -1]])
safety_stock = 10
state = np.array([30, 30, 9, 5])[:, None]
env_params = get_null_env_params(
state, buffer_processing_matrix=buffer_processing_matrix)
env_params["constituency_matrix"] = np.array([[1, 1, 0, 0], [0, 0, 1, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1, 0, 0]]),
np.array([[0, 0, 1, 1]])]
env = crw.ControlledRandomWalk(**env_params)
agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert np.sum(action[0:2]) == 1 and np.all(action[2:4] == np.array([0, 0])[:, None])
def test_priority_nonidling_heuristic_agent_multiple_activities_buffers_and_resources():
# Two stations, each one scheduling two buffers. The stations are connected in serial, such that
# buffer 1 is connected with buffer 3, and 2 with 4.
# Kind of condition doesn't matter since the largest buffer has to be above safety stock in this
# agent.
buffer_processing_matrix = np.array([[-1, 0, -1, 0],
[0, -1, 0, 0],
[1, 0, -1, 0],
[0, 1, 0, -1]])
safety_stock = 10
state = np.array([30, 20, 5, 20])[:, None]
env_params = get_null_env_params(
state, buffer_processing_matrix=buffer_processing_matrix)
env_params["constituency_matrix"] = np.array([[1, 1, 1, 0], [0, 0, 1, 1]])
env_params["list_boundary_constraint_matrices"] = [np.array([[1, 1, 0, 0], [0, 0, 1, 0]]),
np.array([[0, 0, 1, 1]])]
env = crw.ControlledRandomWalk(**env_params)
agent = longest_priority_agent.LongestBufferPriorityAgent(env, safety_stock)
action = agent.map_state_to_actions(state)
assert (action[0] + action[2] == 1) and (action[1]
== 0) and (action[3] == 1)
def test_priority_heuristic_agent_init_all_resources_given():
priorities = {0: 0, 1: 2, 2: 5}
state = np.array([[10.], [10.], [10.]])
buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.],
[0., -1., -1., 0., 0., 0., 0.],
[0., 0., 0., -1., -1., -1., -1.]])
constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0., 0., 0.],
[0., 0., 0., 1., 1., 1., 1.]])
env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix,
constituency_matrix=constituency_matrix)
env = crw.ControlledRandomWalk(**env_params)
agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities)
assert agent.priorities == priorities
def test_priority_heuristic_agent_init_not_all_resources_given():
priorities = {0: 0, 2: 5}
expected_priorities = {0: 0, 1: None, 2: 5}
state = np.array([[10.], [10.], [10.]])
buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.],
[0., -1., -1., 0., 0., 0., 0.],
[0., 0., 0., -1., -1., -1., -1.]])
constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0., 0., 0.],
[0., 0., 0., 1., 1., 1., 1.]])
env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix,
constituency_matrix=constituency_matrix)
env = crw.ControlledRandomWalk(**env_params)
agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities)
assert agent.priorities == expected_priorities
def test_priority_heuristic_agent_init_wrong_activity_given():
priorities = {0: 0, 2: 1}
state = np.array([[10.], [10.], [10.]])
buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.],
[0., -1., -1., 0., 0., 0., 0.],
[0., 0., 0., -1., -1., -1., -1.]])
constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0., 0., 0.],
[0., 0., 0., 1., 1., 1., 1.]])
env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix,
constituency_matrix=constituency_matrix)
env = crw.ControlledRandomWalk(**env_params)
with pytest.raises(AssertionError):
_ = custom_priority_agent.CustomActivityPriorityAgent(env, priorities)
def test_priority_heuristic_agent_sample_random_action_empty_possible_actions():
priorities = {0: 0, 1: 2, 2: 5}
state = np.array([[10.], [10.], [0.]])
buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.],
[0., -1., -1., 0., 0., 0., 0.],
[0., 0., 0., -1., -1., -1., -1.]])
constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0., 0., 0.],
[0., 0., 0., 1., 1., 1., 1.]])
env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix,
constituency_matrix=constituency_matrix)
env = crw.ControlledRandomWalk(**env_params)
agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities)
action = np.array([[1], [0], [1], [0], [0], [0], [0]])
activities = np.array([3, 4, 5, 6])
updated_action = agent.sample_random_actions(state=state, action=action, activities=activities)
assert np.all(action == updated_action)
def test_priority_heuristic_agent_sample_random_action_one_possible_action():
priorities = {0: 0, 1: 2, 2: 5}
state = np.array([[10.], [0.], [10.]])
buffer_processing_matrix = np.array([[-1., 0., -1., 0., 0., 0., 0.],
[0., -1., 0., 0., 0., 0., 0.],
[0., 0., 0., -1., -1., -1., -1.]])
constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0., 0., 0.],
[0., 0., 0., 1., 1., 1., 1.]])
env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix,
constituency_matrix=constituency_matrix)
env = crw.ControlledRandomWalk(**env_params)
agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities)
action = np.array([[1], [0], [0], [0], [1], [0], [0]])
expected_action = np.array([[1], [0], [1], [0], [1], [0], [0]])
activities = np.array([1, 2])
updated_action = agent.sample_random_actions(state=state, action=action, activities=activities)
assert np.all(expected_action == updated_action)
def test_priority_heuristic_agent_sample_random_action_multiple_possible_actions():
np.random.seed(42)
priorities = {0: 0, 1: 2, 2: 5}
state = np.array([[10.], [10.], [10.]])
buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.],
[0., -1., -1., 0., 0., 0., 0.],
[0., 0., 0., -1., -1., -1., -1.]])
constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0., 0., 0.],
[0., 0., 0., 1., 1., 1., 1.]])
env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix,
constituency_matrix=constituency_matrix)
env = crw.ControlledRandomWalk(**env_params)
agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities)
action = np.array([[1], [0], [1], [0], [0], [0], [0]])
expected_action = np.array([[1], [0], [1], [0.25], [0.25], [0.25], [0.25]])
activities = np.array([3, 4, 5, 6])
num_sim = int(1e4)
updated_action = np.zeros((buffer_processing_matrix.shape[1], num_sim))
for i in np.arange(num_sim):
updated_action[:, [i]] = agent.sample_random_actions(state=state, action=action,
activities=activities)
average_updated_action = np.sum(updated_action, axis=1) / float(num_sim)
np.testing.assert_array_almost_equal(average_updated_action.reshape(-1, 1), expected_action,
decimal=2)
def test_priority_heuristic_agent_map_state_to_actions_no_priorities():
np.random.seed(42)
priorities = {}
state = np.array([[10.], [10.], [10.]])
buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.],
[0., -1., -1., 0., 0., 0., 0.],
[0., 0., 0., -1., -1., -1., -1.]])
constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0., 0., 0.],
[0., 0., 0., 1., 1., 1., 1.]])
env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix,
constituency_matrix=constituency_matrix)
env = crw.ControlledRandomWalk(**env_params)
agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities)
expected_action = np.array([[1], [0.5], [0.5], [0.25], [0.25], [0.25], [0.25]])
num_sim = int(1e4)
actions = np.zeros((buffer_processing_matrix.shape[1], num_sim))
for i in np.arange(num_sim):
actions[:, [i]] = agent.map_state_to_actions(state=state)
average_action = np.sum(actions, axis=1) / float(num_sim)
np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_action,
decimal=2)
def test_priority_heuristic_agent_map_state_to_actions_full_priorities_empty_buffer():
np.random.seed(41)
priorities = {0: 0, 1: 2, 2: 5}
state = np.array([[10.], [10.], [0.]])
buffer_processing_matrix = np.array([[-1., 0., 0., -1., -1., 0., -1.],
[0., -1., -1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., -1., 0.]])
constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0., 0., 0.],
[0., 0., 0., 1., 1., 1., 1.]])
constituency_matrix_original = constituency_matrix.copy()
env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix,
constituency_matrix=constituency_matrix)
env = crw.ControlledRandomWalk(**env_params)
agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities)
expected_average_action = np.array([[1], [0.], [1.], [0.33], [0.33], [0.], [0.33]])
num_sim = 5e4
actions = np.zeros((buffer_processing_matrix.shape[1], int(num_sim)))
for i in np.arange(int(num_sim)):
actions[:, [i]] = agent.map_state_to_actions(state=state)
average_action = np.sum(actions, axis=1) / num_sim
np.testing.assert_array_almost_equal(average_action.reshape(-1, 1), expected_average_action,
decimal=2)
assert np.all(constituency_matrix_original == constituency_matrix)
assert np.all(constituency_matrix_original == env.constituency_matrix)
def test_priority_heuristic_agent_map_state_to_actions_full_priorities_full_buffer():
priorities = {0: 0, 1: 2, 2: 5}
state = np.array([[10.], [10.], [10.]])
buffer_processing_matrix = np.array([[-1., 0., 0., 0., 0., 0., 0.],
[0., -1., -1., 0., 0., 0., 0.],
[0., 0., 0., -1., -1., -1., -1.]])
constituency_matrix = np.array([[1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0., 0., 0.],
[0., 0., 0., 1., 1., 1., 1.]])
env_params = get_null_env_params(state=state, buffer_processing_matrix=buffer_processing_matrix,
constituency_matrix=constituency_matrix)
env = crw.ControlledRandomWalk(**env_params)
agent = custom_priority_agent.CustomActivityPriorityAgent(env, priorities)
expected_action = np.array([[1], [0], [1], [0], [0], [1], [0]])
action = agent.map_state_to_actions(state=state)
assert np.all(action == expected_action)
| 1.960938 | 2 |
src/dataloader/transforms.py | Rahul-fix/stereo-depth | 0 | 12797814 | import torch
from torchvision.transforms import Compose,Normalize,RandomCrop,RandomResizedCrop,Resize,RandomHorizontalFlip, ToTensor
from torchvision import transforms
def get_transforms():
normalize = Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
transform = Compose([normalize])
return transform
| 2.140625 | 2 |
src/linear_model.py | vignesh-pagadala/linear-regression | 0 | 12797815 | import numpy as np
import matplotlib.pyplot as plt
import pprint
def missingIsNan(s):
return np.nan if s == b'?' else float(s)
def makeStandardize(X):
means = X.mean(axis = 0)
stds = X.std(axis = 0)
def standardize(origX):
return (origX - means) / stds
def unstandardize(stdX):
return stds * stdX + means
return (standardize, unstandardize)
if __name__ == '__main__':
# 1. Load the data.
data = np.loadtxt("Data\\auto-mpg.data", usecols = range(8), converters = {3: missingIsNan})
# 2. 'Clean' the data.
Cdata = data[~np.isnan(data).any(axis = 1)]
# 3. Split it into input (X) and target (T)
# Target = mpg (first column)
# Input = remaining - columns 2 to 7
T = Cdata[:, 0:1]
X = Cdata[:, 1:]
# 4. Append column of 1s to X
# X1 = np.insert(X, 0, 1, 1)
# 4. Split the data into training (80 %) and testing data (20 %)
nRows = X.shape[0]
nTrain = int(round(0.8*nRows))
nTest = nRows - nTrain
# Shuffle row numbers
rows = np.arange(nRows)
np.random.shuffle(rows)
trainIndices = rows[:nTrain]
testIndices = rows[nTrain:]
# Check that training and testing sets are disjoint
# print(np.intersect1d(trainIndices, testIndices))
Xtrain = X[trainIndices, :]
Ttrain = T[trainIndices, :]
Xtest = X[testIndices, :]
Ttest = T[testIndices, :]
# 5. Standardize
(standardize, unstandardize) = makeStandardize(Xtrain)
XtrainS = standardize(Xtrain)
XtestS = standardize(Xtest)
# 6. Tack column of 1s
XtrainS1 = np.insert(XtrainS, 0, 1, 1)
XtestS1 = np.insert(XtestS, 0, 1, 1)
# 7. Find weights (solve for w)
w = np.linalg.lstsq(XtrainS1.T @ XtrainS1, XtrainS1.T @ Ttrain, rcond = None)[0]
# 8. Predict
predict = XtestS1 @ w
# 9. Compute RSME
rsme = np.sqrt(np.mean((predict - Ttest)**2))
print(rsme)
| 3 | 3 |
practice1/5.1.py | StanislavDanilov/python3_course | 0 | 12797816 | <gh_stars>0
day = int(input(": "))
m = int(input(": "))
year = int(input(": "))
if m == 2:
m = 12
elif m == 1:
m = 11
else:
m -= 2
c = year%100
print((day + ((13*m -1) //5) + year + year + (year //4 + c//4 - 2*c + 777)) % 7 )
#not job !!!!
| 3.578125 | 4 |
dev/debug/sqllite_client.py | anderslaunerbaek/abc-core | 0 | 12797817 | import logging
from abc_core.database.sqllite_client import SQLLite
from abc_core.utils.logger_client import get_basis_logger_config
def main():
logging.basicConfig(**get_basis_logger_config())
db = SQLLite(filename="../../data/application.db")
res = db.select("SELECT * FROM blogs1")
print(res)
# for i in range(1):
# db.insert(
# query="INSERT INTO blogs VALUES (?,?,?,?,?);",
# data=(f'private-blog{i+10}', '2021-03-07', 'Secret blog' ,'This is a secret',3)
# )
# res = db.select("SELECT * FROM blogs WHERE public >= 3")
# print(res)
# (id text not null primary key, date text, title text, content text, public integer
db.close_connection()
if __name__ == "__main__":
main()
| 3.03125 | 3 |
prvsnlib/context.py | acoomans/prvsn | 0 | 12797818 | <gh_stars>0
class ProvisioningContext:
def __init__(self):
self.runbook = None
self.role = None
context = ProvisioningContext() | 1.53125 | 2 |
Image Recognition/utils/BayesianModels/BayesianSqueezeNet.py | EvenStrangest/PyTorch-BayesianCNN | 1 | 12797819 | import torch.nn as nn
from utils.BBBlayers import BBBConv2d, FlattenLayer, BBBLinearFactorial
class BBBSqueezeNet(nn.Module):
"""
SqueezeNet with slightly modified Fire modules and Bayesian layers.
"""
def __init__(self, outputs, inputs):
super(BBBSqueezeNet, self).__init__()
self.conv1 = BBBConv2d(inputs, 64, kernel_size=3, stride=2)
self.soft1 = nn.Softplus()
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)
# Fire module 1
self.squeeze1 = BBBConv2d(64, 16, kernel_size=1)
self.squeeze_activation1 = nn.Softplus()
self.expand3x3_1 = BBBConv2d(16, 128, kernel_size=3, padding=1)
self.expand3x3_activation1 = nn.Softplus()
# Fire module 2
self.squeeze2 = BBBConv2d(128, 16, kernel_size=1)
self.squeeze_activation2 = nn.Softplus()
self.expand3x3_2 = BBBConv2d(16, 128, kernel_size=3, padding=1)
self.expand3x3_activation2 = nn.Softplus()
self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)
# Fire module 3
self.squeeze3 = BBBConv2d(128, 32, kernel_size=1)
self.squeeze_activation3 = nn.Softplus()
self.expand3x3_3 = BBBConv2d(32, 256, kernel_size=3, padding=1)
self.expand3x3_activation3 = nn.Softplus()
# Fire module 4
self.squeeze4 = BBBConv2d(256, 32, kernel_size=1)
self.squeeze_activation4 = nn.Softplus()
self.expand3x3_4 = BBBConv2d(32, 256, kernel_size=3, padding=1)
self.expand3x3_activation4 = nn.Softplus()
self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)
# Fire module 5
self.squeeze5 = BBBConv2d(256, 48, kernel_size=1)
self.squeeze_activation5 = nn.Softplus()
self.expand3x3_5 = BBBConv2d(48, 384, kernel_size=3, padding=1)
self.expand3x3_activation5 = nn.Softplus()
# Fire module 6
self.squeeze6 = BBBConv2d(384, 48, kernel_size=1)
self.squeeze_activation6 = nn.Softplus()
self.expand3x3_6 = BBBConv2d(48, 384, kernel_size=3, padding=1)
self.expand3x3_activation6 = nn.Softplus()
# Fire module 7
self.squeeze7 = BBBConv2d(384, 64, kernel_size=1)
self.squeeze_activation7 = nn.Softplus()
self.expand3x3_7 = BBBConv2d(64, 512, kernel_size=3, padding=1)
self.expand3x3_activation7 = nn.Softplus()
# Fire module 8
self.squeeze8 = BBBConv2d(512, 64, kernel_size=1)
self.squeeze_activation8 = nn.Softplus()
self.expand3x3_8 = BBBConv2d(64, 512, kernel_size=3, padding=1)
self.expand3x3_activation8 = nn.Softplus()
self.drop1 = nn.Dropout(p=0.5)
self.conv2 = BBBConv2d(512, outputs, kernel_size=1)
self.soft2 = nn.Softplus()
self.flatten = FlattenLayer(13 * 13 * 100)
self.fc1 = BBBLinearFactorial(13 * 13 * 100, outputs)
layers = [self.conv1, self.soft1, self.pool1,
self.squeeze1, self.squeeze_activation1, self.expand3x3_1, self.expand3x3_activation1,
self.squeeze2, self.squeeze_activation2, self.expand3x3_2, self.expand3x3_activation2,
self.pool2,
self.squeeze3, self.squeeze_activation3, self.expand3x3_3, self.expand3x3_activation3,
self.squeeze4, self.squeeze_activation4, self.expand3x3_4, self.expand3x3_activation4,
self.pool3,
self.squeeze5, self.squeeze_activation5, self.expand3x3_5, self.expand3x3_activation5,
self.squeeze6, self.squeeze_activation6, self.expand3x3_6, self.expand3x3_activation6,
self.squeeze7, self.squeeze_activation7, self.expand3x3_7, self.expand3x3_activation7,
self.squeeze8, self.squeeze_activation8, self.expand3x3_8, self.expand3x3_activation8,
self.drop1, self.conv2, self.soft2, self.flatten, self.fc1]
self.layers = nn.ModuleList(layers)
def probforward(self, x):
'Forward pass with Bayesian weights'
kl = 0
for layer in self.layers:
if hasattr(layer, 'convprobforward') and callable(layer.convprobforward):
x, _kl, = layer.convprobforward(x)
kl += _kl
elif hasattr(layer, 'fcprobforward') and callable(layer.fcprobforward):
x, _kl, = layer.fcprobforward(x)
kl += _kl
else:
x = layer(x)
logits = x
print('logits', logits)
return logits, kl
| 2.25 | 2 |
src/python/validators/test_email_validator.py | phoffmeister/polyglot-registration | 3 | 12797820 | <filename>src/python/validators/test_email_validator.py
import unittest
from email_validator import EmailValidator
class TestEmailValidator(unittest.TestCase):
def test_validates_document(self):
email_validator = EmailValidator()
test_data = [
("<EMAIL>", True),
("<EMAIL>", True),
("<EMAIL>", True),
("<EMAIL>", True),
("<EMAIL>", False),
("<EMAIL>", False),
("<EMAIL>", False),
(".<EMAIL>", False),
("<EMAIL>.", False),
("person@", False)
]
for item in test_data:
self.assertEqual(item[1], email_validator.is_valid(item[0]))
| 3.390625 | 3 |
apps/account/admin.py | JoseTorquato/projeto_enif | 0 | 12797821 | from django.contrib import admin
from .models import Balance
class BalanceAdmin(admin.ModelAdmin):
list_display = ('balance',)
admin.site.register(Balance) | 1.484375 | 1 |
convlab/modules/nlu/multiwoz/error.py | ngduyanhece/ConvLab | 405 | 12797822 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
"""
class ErrorNLU:
"""Base model for generating NLU error."""
def __init__(self, act_type_rate=0.0, slot_rate=0.0):
"""
Args:
act_type_rate (float): The error rate applied on dialog act type.
slot_rate (float): Error rate applied on slots.
"""
self.set_error_rate(act_type_rate, slot_rate)
def set_error_rate(self, act_type_rate, slot_rate):
"""
Set error rate parameter for error model.
Args:
act_type_rate (float): The error rate applied on dialog act type.
slot_rate (float): Error rate applied on slots.
"""
self.act_type_rate = act_type_rate
self.slot_rate = slot_rate
def apply(self, dialog_act):
"""
Apply the error model on dialog act.
Args:
dialog_act (tuple): Dialog act.
Returns:
dialog_act (tuple): Dialog act with noise.
"""
#TODO
return
| 2.484375 | 2 |
cogs/weather.py | mmmalk/saberbot | 0 | 12797823 | import discord
from discord.ext import commands
import json, requests, io, re
class Weather:
"""Weather class handles weather using openweather api
params:
attributes:
apikey: api key for openweather
config_location: configuration location for saberbot
locations: json file containing the openweathermap location data
"""
def __init__(self, bot):
self.bot = bot
self.conf = self.bot.config["weather"]
self.apikey = self.conf["apikey"]
with open(self.conf["citylist"]) as jsonfile:
self.locations_json = json.loads(jsonfile.read())
def parsequery(self, *args):
"""parses list of argument to string"""
querystring = ""
keywords = {}
print(args)
for arg in args:
if "=" in arg:
larg = arg.split("=")
keywords[larg[0]] = larg[1]
continue
querystring += f" {str(arg)}"
querystring = querystring.lstrip()
return querystring, keywords
def get_location_id(self, location, country):
print(location)
for item in self.locations_json:
if item["name"] == location:
if not country or item["country"]== country.upper():
return str(item["id"])
return None
def get_data(self, id, url_string):
"""params: id - location id
returns: data - dictionary object containing json response"""
response = requests.get(url_string)
data = json.loads(response.text)
return data
def CtoF(self, c):
return (9/5)*c+32
@commands.command(pass_context=True)
@commands.cooldown(1, 5.0, commands.BucketType.server)
async def weather(self, ctx, *args):
"""Search for weather by city and optionally a country
usage: !weather <city>, optionally specify country=<ID>, for example !weather London country=UK"""
relevant = {}
location, keywords = self.parsequery(*args)
if keywords:
country = keywords["country"]
else:
country = ""
regex = re.compile("([^\w\s{1}]|\d|_|\s+)") #\W_ didn't work in testing for some reason?
location = re.sub(regex, "", location) #transform location into string with spaces
l = []
l.append(country)
l.append(location)
print(l)
location_id = self.get_location_id(location, country)
if location_id != None:
weather_url=f"https://api.openweathermap.org/data/2.5/weather?id={location_id}&units=metric&APPID={self.apikey}"
forecast_url=f"https://api.openweathermap.org/data/2.5/forecast/?id={location_id}&cnt=1&units=metric&APPID={self.apikey}"
weatherdata = self.get_data(location_id, weather_url)
forecastdata = self.get_data(location_id, forecast_url)
country = weatherdata["sys"]["country"]
print(weatherdata)
relevant["today"] = {"desc" : weatherdata["weather"][0]["description"], "temp" : weatherdata["main"]["temp"]}
relevant["tomorrow"] = {"desc" : forecastdata["list"][0]["weather"][0]["description"], "temp" : forecastdata["list"][0]["main"]["temp"]}
await self.bot.send_message(ctx.message.channel, f"weather for {location}, {country}: today {relevant['today']['desc']} {int(relevant['today']['temp'])} °C / {int(self.CtoF(relevant['today']['temp']))} °F")
await self.bot.send_message(ctx.message.channel, f"tomorrow: {relevant['tomorrow']['desc']}, {int(relevant['tomorrow']['temp'])} °C / {int(self.CtoF(relevant['tomorrow']['temp']))} °F")
else:
await self.bot.send_message(ctx.message.channel, f"Sorry, I don't know where {location} is")
def setup(bot):
bot.add_cog(Weather(bot))
| 3.234375 | 3 |
pyspark_db_utils/ch/__init__.py | osahp/pyspark_db_utils | 7 | 12797824 | <gh_stars>1-10
from .write_to_ch import write_to_ch
from .read_from_ch import read_from_ch
| 1.179688 | 1 |
winearb/articles/admin.py | REBradley/WineArb | 1 | 12797825 | from django.contrib import admin
from .models import Article
class ArticleAdmin(admin.ModelAdmin):
model = Article
admin.site.register(Article)
| 1.539063 | 2 |
startup/33-CBFhandler.py | NSLS-II-LIX/profile_collection | 0 | 12797826 | import os
from databroker.assets.handlers_base import HandlerBase
from databroker.assets.base_registry import DuplicateHandler
import fabio
# for backward compatibility, fpp was always 1 before Jan 2018
#global pilatus_fpp
#pilatus_fpp = 1
# this is used by the CBF file handler
from enum import Enum
class triggerMode(Enum):
software_trigger_single_frame = 1
software_trigger_multi_frame = 2
external_trigger = 3
fly_scan = 4
#external_trigger_multi_frame = 5 # this is unnecessary, difference is fpp
#global pilatus_trigger_mode
#global default_data_path_root
#global substitute_data_path_root
#global CBF_replace_data_path
#pilatus_trigger_mode = triggerMode.software_trigger_single_frame
# if the cbf files have been moved already
#CBF_replace_data_path = False
class PilatusCBFHandler(HandlerBase):
specs = {'AD_CBF'} | HandlerBase.specs
froot = data_file_path.gpfs
subdir = None
trigger_mode = triggerMode.software_trigger_single_frame
# assuming that the data files always have names with these extensions
std_image_size = {
'SAXS': (1043, 981),
'WAXS1': (619, 487),
'WAXS2': (1043, 981) # orignal WAXS2 was (619, 487)
}
def __init__(self, rpath, template, filename, frame_per_point=1, initial_number=1):
print(f'Initializing CBF handler for {self.trigger_mode} ...')
self._template = template
self._fpp = frame_per_point
self._filename = filename
self._initial_number = initial_number
self._image_size = None
self._default_path = os.path.join(rpath, '')
self._path = ""
for k in self.std_image_size:
if template.find(k)>=0:
self._image_size = self.std_image_size[k]
if self._image_size is None:
raise Exception(f'Unrecognized data file extension in filename template: {template}')
for fr in data_file_path:
if self._default_path.find(fr.value)==0:
self._dir = self._default_path[len(fr.value):]
return
raise Exception(f"invalid file path: {self._default_path}")
def update_path(self):
# this is a workaround for data that are save in /exp_path then moved to /nsls2/xf16id1/exp_path
if not self.froot in data_file_path:
raise Exception(f"invalid froot: {self.froot}")
self._path = self.froot.value+self._dir
print(f"updating path, will read data from {self._path} ...")
def get_data(self, fn):
""" the file may not exist
"""
try:
img = fabio.open(fn)
data = img.data
if data.shape!=self._image_size:
print(f'got incorrect image size from {fn}: {data.shape}') #, return an empty frame instead.')
except:
print(f'could not read {fn}, return an empty frame instead.')
data = np.zeros(self._image_size)
#print(data.shape)
return data
def __call__(self, point_number):
start = self._initial_number #+ point_number
stop = start + 1
ret = []
tplt = self._template.replace("6.6d", "06d") # some early templates are not correctly formatted
tl = tplt.replace(".", "_").split("_")
# e.g. ['%s%s', '%06d', 'SAXS', 'cbf'], ['%s%s', '%06d', 'SAXS', '%05d', 'cbf']
# resulting in file names like test_000125_SAXS.cbf vs test_000125_SAXS_00001.cbf
if self.trigger_mode != triggerMode.software_trigger_single_frame and self._fpp>1:
# the template needs to have two number fileds
if len(tl)==4:
tl = tl[:-1]+["%05d"]+tl[-1:]
elif len(tl)==5:
tl = tl[:-2]+tl[-1:]
self._template = "_".join(tl[:-1])+"."+tl[-1]
print("CBF handler called: start=%d, stop=%d" % (start, stop))
print(" ", self._initial_number, point_number, self._fpp)
print(" ", self._template, self._path, self._initial_number)
self.update_path()
if self.subdir is not None:
self._path += f"{self.subdir}/"
if self.trigger_mode == triggerMode.software_trigger_single_frame or self._fpp == 1:
fn = self._template % (self._path, self._filename, self._initial_number+point_number)
ret.append(self.get_data(fn))
elif self.trigger_mode in [triggerMode.software_trigger_multi_frame,
triggerMode.fly_scan]:
for i in range(self._fpp):
fn = self._template % (self._path, self._filename, start, point_number+i)
ret.append(self.get_data(fn))
elif self.trigger_mode==triggerMode.external_trigger:
fn = self._template % (self._path, self._filename, start, point_number)
ret.append(self.get_data(fn))
return np.array(ret).squeeze()
db.reg.register_handler('AD_CBF', PilatusCBFHandler, overwrite=True)
| 2.015625 | 2 |
api/serializers.py | V-Holodov/pets_accounting | 0 | 12797827 | <reponame>V-Holodov/pets_accounting
from rest_framework import serializers
from api.models import Pet, PetPhoto
class PetPhotoSerializer(serializers.ModelSerializer):
"""Serialization of pet photos."""
url = serializers.ImageField(source="photo")
class Meta:
model = PetPhoto
fields = ("id", "url")
def to_internal_value(self, data):
resource_data = data["file"]
return super().to_internal_value(resource_data)
class PetSerializer(serializers.ModelSerializer):
"""Pet sterilization."""
photos = PetPhotoSerializer(many=True, read_only=True)
class Meta:
model = Pet
fields = ("id", "name", "age", "type", "photos", "created_at")
class PhotoLoadSerializer(serializers.Serializer):
"""Deserialization of the uploaded pet photo."""
file = serializers.ImageField()
class IdsSerializer(serializers.Serializer):
"""Deserialization of pet IDs."""
ids = serializers.ListField(
child=serializers.IntegerField(min_value=1), allow_empty=False
)
| 2.171875 | 2 |
utils.py | marka17/vqvae | 11 | 12797828 | from typing import Tuple, Dict
import random
import numpy as np
import torch
from torchvision import datasets, transforms
from sklearn.metrics.pairwise import cosine_distances
from matplotlib import pyplot as plt
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
CIFAR10_ANNOTATION = {
0: 'airplane',
1: 'automobile',
2: 'bird',
3: 'cat',
4: 'deer',
5: 'dog',
6: 'frog',
7: 'horse',
8: 'ship',
9: 'truck'
}
def plot_cifar_image(image, label=""):
plt.title(label)
plt.imshow(image.permute(1, 2, 0).numpy())
plt.show()
class AccumulateStats:
def __enter__(self):
pass
def __exit__(self):
pass
def __call__(self):
pass
class AverageMeter(object):
"""
Computes and stores the average and current value
"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class MeterLogger:
def __init__(self, meters: Tuple[str], writer: SummaryWriter):
self.average_meters: Dict[str, AverageMeter] = {k: AverageMeter() for k in meters}
self._writer = writer
def update(self, name: str, val, n=1):
self.average_meters[name].update(val, n)
def reset(self):
for meter in self.average_meters.values():
meter.reset()
def write(self, step, prefix):
for name, meter in self.average_meters.items():
tag = prefix + '/' + name
self._writer.add_scalar(tag, meter.avg, step)
class ImageLogger:
def __init__(self, writer: SummaryWriter, mean=None, std=None):
self._writer = writer
self.mean = mean
self.std = std
if self.mean is not None:
self.mean = torch.tensor(self.mean).reshape(1, 3, 1, 1)
if self.std is not None:
self.std = torch.tensor(self.std).reshape(1, 3, 1, 1)
def write(self, images, reconstruction, step, prefix):
images = images.cpu()
reconstruction = reconstruction.cpu()
if self.mean is not None and self.std is not None:
images = images * self.std + self.mean
reconstruction = reconstruction * self.std + self.mean
image_tag = prefix + '/' + 'image'
self._writer.add_images(image_tag, images, step)
reconstruction_tag = prefix + '/' + 'reconstruction'
self._writer.add_images(reconstruction_tag, reconstruction, step)
class VQEmbeddingLogger:
def __init__(self, writer: SummaryWriter):
self._writer = writer
def write(self, embeddings, step):
embeddings = embeddings.detach().cpu().numpy()
sim = cosine_distances(embeddings)
self._writer.add_image('cos_sim_vq_embeddings', sim, step, dataformats='HW')
def double_soft_orthogonality(weights: torch.Tensor):
a = torch.norm(weights @ weights.t() - torch.eye(weights.shape[0]).to(weights.device)) ** 2
b = torch.norm(weights.t() @ weights - torch.eye(weights.shape[1]).to(weights.device)) ** 2
return a + b
def set_random_seed(seed: int, cuda: bool = False):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if cuda:
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.random.manual_seed(seed)
torch.backends.cudnn.deterministic = True
| 2.640625 | 3 |
tests/fson_files/src/event_A_B.py | NVinity/fson | 0 | 12797829 | #&&&#
source_file_name = 'data/A/event.txt'
sink_file_name = 'data/B/event.txt'
#
with open(source_file_name, "r") as source_file:
data = source_file.read()
key = 'b'
#
context = {'b': 2}
value = context[key]
#
print(value)
#&&&#
print(data)
#
with open(sink_file_name, "w") as sink_file:
sink_file.write(data)
| 2.6875 | 3 |
councilmatic_core/migrations/0013_auto_20160707_1957.py | tor-councilmatic/django-councilmatic | 1 | 12797830 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-07 23:57
from __future__ import unicode_literals
from django.db import migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('councilmatic_core', '0012_auto_20160707_1859'),
]
operations = [
migrations.AlterField(
model_name='bill',
name='extras',
field=jsonfield.fields.JSONCharField(default='{}', max_length=255),
),
migrations.AlterField(
model_name='event',
name='extras',
field=jsonfield.fields.JSONCharField(default='{}', max_length=255),
),
migrations.AlterField(
model_name='organization',
name='extras',
field=jsonfield.fields.JSONCharField(default='{}', max_length=255),
),
migrations.AlterField(
model_name='person',
name='extras',
field=jsonfield.fields.JSONCharField(default='{}', max_length=255),
),
]
| 1.703125 | 2 |
pip/commands/show.py | mindw/pip | 0 | 12797831 | from __future__ import absolute_import
from email.parser import FeedParser
import logging
import os
from pip.basecommand import Command
from pip.status_codes import SUCCESS, ERROR
from pip._vendor import pkg_resources
logger = logging.getLogger(__name__)
class ShowCommand(Command):
"""Show information about one or more installed packages."""
name = 'show'
usage = """
%prog [options] <package> ..."""
summary = 'Show information about installed packages.'
def __init__(self, *args, **kw):
super(ShowCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-f', '--files',
dest='files',
action='store_true',
default=False,
help='Show the full list of installed files for each package.')
self.cmd_opts.add_option(
'--index',
dest='index',
metavar='URL',
default='https://pypi.python.org/pypi',
help='Base URL of Python Package Index (default %default)')
self.cmd_opts.add_option(
'-p', '--pypi',
dest='pypi',
action='store_true',
default=False,
help='Show PyPi version')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
logger.warning('ERROR: Please provide a package name or names.')
return ERROR
query = args
if options.pypi:
with self._build_session(options) as session:
results = search_packages_info(query, options.index, session)
else:
results = search_packages_info(query, options.index)
if not print_results(results, options.files):
return ERROR
return SUCCESS
def _format_package(requirement):
r = requirement
installed_ver = '-'
try:
d = pkg_resources.get_distribution(r.project_name)
installed_ver = str(d.version)
except pkg_resources.DistributionNotFound:
pass
return "%s [%s]" % (r, installed_ver)
def search_packages_info(query, index_url=None, session=None):
"""
Gather details from installed distributions. Print distribution name,
version, location, and installed files. Installed files requires a
pip generated 'installed-files.txt' in the distributions '.egg-info'
directory.
"""
installed = dict(
[(p.key, p) for p in pkg_resources.working_set])
query_names = [name.lower() for name in query]
distributions = [installed[pkg] for pkg in query_names if pkg in installed]
for dist in distributions:
required_by = []
for _, p in installed.items():
r = next((r for r in p.requires() if r.key == dist.key), None)
if r:
required_by.append("%s %s" % (p.project_name, r.specifier))
else:
for e in p.extras:
r = next(
(r for r in p.requires([e]) if r.key == dist.key), None
)
if r:
required_by.append(
"%s[%s] %s" % (p.project_name, e, r.specifier))
extras = {}
for e in dist.extras:
reqs = set(dist.requires([e])) - set(dist.requires())
extras[e] = map(_format_package, reqs)
if session:
from pip.download import PipXmlrpcTransport
from pip._vendor.six.moves import xmlrpc_client
transport = PipXmlrpcTransport(index_url, session)
pypi = xmlrpc_client.ServerProxy(index_url, transport)
pypi_releases = pypi.package_releases(dist.project_name)
pypi_version = pypi_releases[0] if pypi_releases else 'UNKNOWN'
else:
pypi_version = None
requires = [_format_package(r_) for r_ in dist.requires()]
package = {
'name': dist.project_name,
'version': dist.version,
'pypi_version': pypi_version,
'location': dist.location,
'requires': requires,
'required_by': required_by,
'extras': extras
}
file_list = None
metadata = None
if isinstance(dist, pkg_resources.DistInfoDistribution):
# RECORDs should be part of .dist-info metadatas
if dist.has_metadata('RECORD'):
lines = dist.get_metadata_lines('RECORD')
paths = [l.split(',')[0] for l in lines]
paths = [os.path.join(dist.location, p) for p in paths]
file_list = [os.path.relpath(p, dist.location) for p in paths]
if dist.has_metadata('METADATA'):
metadata = dist.get_metadata('METADATA')
else:
# Otherwise use pip's log for .egg-info's
if dist.has_metadata('installed-files.txt'):
paths = dist.get_metadata_lines('installed-files.txt')
paths = [os.path.join(dist.egg_info, p) for p in paths]
file_list = [os.path.relpath(p, dist.location) for p in paths]
if dist.has_metadata('PKG-INFO'):
metadata = dist.get_metadata('PKG-INFO')
if dist.has_metadata('entry_points.txt'):
entry_points = dist.get_metadata_lines('entry_points.txt')
package['entry_points'] = entry_points
installer = None
if dist.has_metadata('INSTALLER'):
for line in dist.get_metadata_lines('INSTALLER'):
if line.strip():
installer = line.strip()
break
package['installer'] = installer
# @todo: Should pkg_resources.Distribution have a
# `get_pkg_info` method?
feed_parser = FeedParser()
feed_parser.feed(metadata)
pkg_info_dict = feed_parser.close()
for key in ('metadata-version', 'summary',
'home-page', 'author', 'author-email', 'license'):
package[key] = pkg_info_dict.get(key)
# It looks like FeedParser can not deal with repeated headers
classifiers = []
for line in metadata.splitlines():
if not line:
break
# Classifier: License :: OSI Approved :: MIT License
if line.startswith('Classifier: '):
classifiers.append(line[len('Classifier: '):])
package['classifiers'] = classifiers
if file_list:
package['files'] = sorted(file_list)
yield package
def print_results(distributions, list_all_files):
"""
Print the informations from installed distributions found.
"""
results_printed = False
for dist in distributions:
results_printed = True
logger.info("---")
logger.info("Metadata-Version: %s", dist.get('metadata-version'))
logger.info("Name: %s", dist['name'])
logger.info("Version: %s", dist['version'])
if dist['pypi_version']:
logger.info("PyPi Version: %s", dist['pypi_version'])
logger.info("Summary: %s", dist.get('summary'))
logger.info("Home-page: %s", dist.get('home-page'))
logger.info("Author: %s", dist.get('author'))
logger.info("Author-email: %s", dist.get('author-email'))
if dist['installer'] is not None:
logger.info("Installer: %s", dist['installer'])
logger.info("License: %s", dist.get('license'))
logger.info("Location: %s", dist['location'])
logger.info("Classifiers:")
for classifier in dist['classifiers']:
logger.info(" %s", classifier)
logger.info("Requires:")
for line in sorted(dist['requires']):
logger.info(" %s", line)
for extra_name, deps in dist['extras'].items():
logger.info("Extra Require [%s]:", extra_name)
for line in sorted(deps):
logger.info(" %s", line.strip())
logger.info("Required by(%d):", len(dist['required_by']))
for line in sorted(dist['required_by']):
logger.info(" %s", line.strip())
if list_all_files:
logger.info("Files:")
if 'files' in dist:
for line in dist['files']:
logger.info(" %s", line.strip())
else:
logger.info("Cannot locate installed-files.txt")
if 'entry_points' in dist:
logger.info("Entry-points:")
for line in dist['entry_points']:
logger.info(" %s", line.strip())
return results_printed
| 2.328125 | 2 |
swiftst/node/__init__.py | btorch/swift-setuptools | 0 | 12797832 | <reponame>btorch/swift-setuptools
""" Location for deploying swift nodes """
| 0.644531 | 1 |
course_link/__init__.py | arindampradhan/course_link | 0 | 12797833 | __version__ = "0.2.0"
__author__='<NAME>',
__author_email__='<EMAIL>',
__maintainer__='<NAME>',
__maintainer_email__='<EMAIL>',
__url__='https://github.com/arindampradhan/course_link', | 1.132813 | 1 |
AutomationKit_InfrastructureServices@SVU/helpers/BuildConfig/Pcre/PcreConfig.py | dipAch/Infrastructure-Automation-Kit | 0 | 12797834 | <filename>AutomationKit_InfrastructureServices@SVU/helpers/BuildConfig/Pcre/PcreConfig.py<gh_stars>0
##############################################################
# Module Import Section.
# Make all the necessary imports here.
##############################################################
import helpers.BuildConfig.Logger.LoggerConfig
##############################################################
# Get the configuration options set for the PCRE build
# and install on the target host.
# Set the PCRE Download URL.
DOWNLOAD_URL = 'ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-8.40.tar.gz'
# Get this value from the above Downloads URI.
# It would be present before the `EXTENSION` type of the Archived Source Package in the URI.
# Use it to enable the configure time `PREFIX` and `DOC_DIR` options.
PCRE_VERSION = '8.40'
# `PCRE` build environment details.
ENVIRONMENT = {
'BUILD_TARGET': '__PCRE__',
'DEPENDENCY' : None,
'BUILD_TYPE' : 'Generic [Automated]',
'DESCRIPTION' : "Build Automation for setting up `HTTPD` Dependency components.",
'OS_SUPPORT' : {
'Unix': ['Solaris'],
'Linux': ['Ubuntu', 'RHEL']
}
}
# `THREAD` name that executes the download logic for `PCRE`.
PCRE_DOWNLOADER_THREAD_NAME = 'PCRE::DOWNLOADER::THREAD'
# `THREAD` name that executes the untar logic for `PCRE`.
PCRE_UNTAR_THREAD_NAME = 'PCRE::UNTAR::THREAD'
# Package type name convention to
# be followed within the `TAR` final
# extract directory.
# Below is the `TAR` extraction directory for `PCRE`.
PCRE_TAR_EXTRACT_PACKAGE_TYPE_LOCATION = 'Pcre/'
# Location to keep the `PCRE` binary.
# Change this location parameter to
# suit your environment standards.
PCRE_BINARY_LOCATION = '/usr/local/pcre-'
# `DOCDIR` location for the `PCRE` install.
PCRE_DOCDIR_LOCATION = '/usr/share/doc/pcre-'
# `PCRE` component name to be used
# program wide.
# Used to reference or label `PCRE`
# related nuances.
PCRE_COMPONENT_NAME = 'Pcre'
# `PCRE_TAR` component name to be used
# program wide.
# This is the identifier returned by
# the DownloadManager Module.
# It references the `PCRE_TAR` package.
PCRE_TAR_COMPONENT_NAME = 'Pcre_Tar'
# The `PCRE_TAR` extract name to be referenced after the
# untarring operation.
PCRE_TAR_EXTRACT_COMPONENT_NAME = 'Pcre_Tar_ExtractName'
# Set the `DOCDIR` flag as an entry in the `DOCDIR` dictionary.
PCRE_DOCDIR = {'docdir_options': '--docdir=' + PCRE_DOCDIR_LOCATION}
# Define the Configure time Options that needs to be set.
# For more Options, just add the directive to the below list.
ENABLE_OPTIONS_FLAGS = ['--enable-unicode-properties', '--enable-pcre16', '--enable-pcre32',
'--enable-pcregrep-libz', '--disable-static']
# Have the `PREFIX` set here, and we will include the `CONFIGURE` line options at run-time
# from the `BUILD_SUPERVISOR` Script.
INSTALL_TIME_OPTIONS = {'prefix_options': '--prefix='}
# Set the log location options, to store the Build Information
# for the `PCRE` source build (i.e., this is for the `CONFIGURE`, `MAKE` and `MAKE INSTALL` processes).
PCRE_BUILD_PROCESS_LOG_DIRECTORY = 'Pcre_Subprocess_Logs/'
PCRE_SUBPROCESS_LOG_LOCATION = helpers.BuildConfig.Logger.LoggerConfig.LOG_FILE_LOCATION + PCRE_BUILD_PROCESS_LOG_DIRECTORY
PCRE_SUBPROCESS_LOG_FILENAME = PCRE_SUBPROCESS_LOG_LOCATION + 'Pcre_Subprocess.log' | 2.09375 | 2 |
bin/get_pos_data.py | desihub/desiperf | 0 | 12797835 | <reponame>desihub/desiperf
"""
Get Positioner data
"""
import data_mgt.get_pos_data
import argparse
from itertools import repeat
import os
import pandas as pd
import numpy as np
from datetime import datetime
import multiprocessing
#os.environ['DATA_DIR'] = '/global/cscratch1/sd/parkerf/data_local'
#os.environ['DATA_MGT_DIR'] = '/global/homes/p/parkerf/InstPerf/desiperf/py/desiperf/instperfapp/data_mgt'
fiberpos = pd.read_csv(os.path.join(os.environ['DATA_MGT_DIR'],'fiberpos.csv'))
parser = argparse.ArgumentParser(description='Update Positioner data')
parser.add_argument('start', help='start date')
parser.add_argument('end', help='end date')
parser.add_argument('-o','--option', help='option: new, update (default)', default = 'update')
parser.add_argument("-p", "--positioners", help = 'List of positioners')
args = parser.parse_args()
start_date = args.start
end_date = args.end
option = args.option
print(option)
positioners = args.positioners
print(positioners)
if positioners is None:
all_pos = np.unique(fiberpos.CAN_ID)
else:
all_pos = positioners
#finished = pd.read_csv('/n/home/desiobserver/parkerf/desiperf/py/desiperf/data_local/positioners/finished.txt',header=None)
#fin = list(finished[0])[:-1]
#finished_pos = [int(os.path.splitext(os.path.split(f)[1])[0]) for f in fin]
#print(finished_pos)
#all_pos = [x for x in all_pos if x not in finished_pos]
print('Running for {} positioners'.format(len(all_pos)))
start_time = datetime.now()
exp_df_base, telem_df, coord_df, ptl_dbs = data_mgt.get_pos_data.get_dfs(start_date, end_date)
pool = multiprocessing.Pool(processes=64)
pool.starmap(data_mgt.get_pos_data.run, zip(all_pos, repeat(start_date), repeat(end_date), repeat(exp_df_base), repeat(coord_df), repeat(telem_df), repeat(fiberpos), repeat(ptl_dbs), repeat(option)))
pool.terminate()
print("total time: ",(datetime.now()-start_time).total_seconds()/60.)
| 2.5 | 2 |
pruning/pytorch_snip/network.py | zwxu064/RANP | 9 | 12797836 | import torch.nn as nn
import torch.nn.functional as F
import scipy.io as scio
from torchvision.models import vgg19_bn, resnet152, densenet161
class LeNet_300_100(nn.Module):
def __init__(self, enable_bias=True): # original code is true
super().__init__()
self.fc1 = nn.Linear(784, 300, bias=enable_bias)
self.fc2 = nn.Linear(300, 100, bias=enable_bias)
self.fc3 = nn.Linear(100, 10, bias=enable_bias)
def forward(self, x):
x = F.relu(self.fc1(x.view(-1, 784)))
x = F.relu(self.fc2(x))
return F.log_softmax(self.fc3(x), dim=1)
class LeNet_5(nn.Module):
def __init__(self, enable_bias=True):
super().__init__()
self.conv1 = nn.Conv2d(1, 6, 5, padding=2, bias=enable_bias)
self.conv2 = nn.Conv2d(6, 16, 5, bias=enable_bias)
self.fc3 = nn.Linear(16 * 5 * 5, 120, bias=enable_bias)
self.fc4 = nn.Linear(120, 84, bias=enable_bias)
self.fc5 = nn.Linear(84, 10, bias=enable_bias)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.fc3(x.view(-1, 16 * 5 * 5)))
x = F.relu(self.fc4(x))
x = F.log_softmax(self.fc5(x), dim=1)
return x
class LeNet_5_Caffe(nn.Module):
"""
This is based on Caffe's implementation of Lenet-5 and is slightly different
from the vanilla LeNet-5. Note that the first layer does NOT have padding
and therefore intermediate shapes do not match the official LeNet-5.
"""
def __init__(self, enable_bias=True):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5, padding=0, bias=enable_bias)
self.conv2 = nn.Conv2d(20, 50, 5, bias=enable_bias)
self.fc3 = nn.Linear(50 * 4 * 4, 500, bias=enable_bias)
self.fc4 = nn.Linear(500, 10, bias=enable_bias)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.fc3(x.view(-1, 50 * 4 * 4)))
x = F.log_softmax(self.fc4(x), dim=1)
return x
VGG_CONFIGS = {
'C': [64, 64, 'M', 128, 128, 'M', 256, 256, [256], 'M', 512, 512, [512], 'M', 512, 512, [512], 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'like': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']}
class VGG(nn.Module):
"""
This is a base class to generate three VGG variants used in SNIP paper:
1. VGG-C (16 layers)
2. VGG-D (16 layers)
3. VGG-like
Some of the differences:
* Reduced size of FC layers to 512
* Adjusted flattening to match CIFAR-10 shapes
* Replaced dropout layers with BatchNorm
"""
def __init__(self, config, num_classes=10, enable_bias=True, enable_dump_features=False):
super().__init__()
self.enable_dump_features = enable_dump_features
if enable_dump_features:
self.features_block1 = self.make_layers([64, 64, 'M'], in_channels=3, batch_norm=True, enable_bias=enable_bias)
self.features_block2 = self.make_layers([128, 128, 'M'], in_channels=64, batch_norm=True, enable_bias=enable_bias)
self.features_block3 = self.make_layers([256, 256, [256], 'M'], in_channels=128, batch_norm=True, enable_bias=enable_bias)
self.features_block4 = self.make_layers([512, 512, [512], 'M'], in_channels=256, batch_norm=True, enable_bias=enable_bias)
self.features_block5 = self.make_layers([512, 512, [512], 'M'], in_channels=512, batch_norm=True, enable_bias=enable_bias)
else:
self.features = self.make_layers(VGG_CONFIGS[config], batch_norm=True, enable_bias=enable_bias)
if config in {'C', 'D'}:
self.classifier = nn.Sequential(
nn.Linear(512, 512, bias=enable_bias), # 512 * 7 * 7 in the original VGG
nn.ReLU(True),
nn.BatchNorm1d(512), # instead of dropout
nn.Linear(512, 512, bias=enable_bias),
nn.ReLU(True),
nn.BatchNorm1d(512), # instead of dropout
nn.Linear(512, num_classes, bias=enable_bias))
elif config == 'like':
self.classifier = nn.Sequential(
nn.Linear(512, 512, bias=enable_bias), # 512 * 7 * 7 in the original VGG
nn.ReLU(True),
nn.BatchNorm1d(512), # instead of dropout
nn.Linear(512, num_classes, bias=enable_bias))
else:
assert False
@staticmethod
def make_layers(config, batch_norm=False, enable_bias=True, in_channels=3): # TODO: BN yes or no?
layers = []
for idx, v in enumerate(config):
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
if isinstance(v, list):
v, kernel_size, padding = v[0], 1, 0
else:
kernel_size, padding = 3, 1
conv2d = nn.Conv2d(in_channels, v, kernel_size=kernel_size, padding=padding, bias=enable_bias)
if batch_norm:
layers += [conv2d,
nn.BatchNorm2d(v),
nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def forward(self, input, epoch_id=None, batch_id=None, gt=None):
if self.enable_dump_features:
feat_block1 = self.features_block1(input)
feat_block2 = self.features_block2(feat_block1)
feat_block3 = self.features_block3(feat_block2)
feat_block4 = self.features_block4(feat_block3)
x = self.features_block5(feat_block4)
if (epoch_id is not None) and (batch_id is not None):
scio.savemat('../checkpoints/inter_features_epoch{}_batch{}.mat'.format(epoch_id, batch_id),
{'img': input.detach().squeeze().permute(2,3,1,0).cpu().numpy(),
'gt': gt.detach().squeeze().cpu().numpy(),
'b1': feat_block1.detach().squeeze().permute(2,3,1,0).cpu().numpy(),
'b2': feat_block2.detach().squeeze().permute(2,3,1,0).cpu().numpy(),
'b3': feat_block3.detach().squeeze().permute(2,3,1,0).cpu().numpy(),
'b4': feat_block4.detach().squeeze().permute(2,3,1,0).cpu().numpy(),
'b5': x.detach().squeeze().cpu().numpy()})
else:
x = self.features(input)
x = x.view(x.size(0), -1)
x = self.classifier(x)
x = F.log_softmax(x, dim=1)
return x
class AlexNet(nn.Module):
# copy from https://medium.com/@kushajreal/training-alexnet-with-tips-and-checks-on-how-to-train-cnns-practical-cnns-in-pytorch-1-61daa679c74a
def __init__(self, k=4, num_classes=10, enable_bias=True):
super(AlexNet, self).__init__()
self.conv_base = nn.Sequential(
nn.Conv2d(3, 96, kernel_size=11, stride=2, padding=5, bias=enable_bias),
nn.BatchNorm2d(96),
nn.ReLU(inplace=True),
nn.Conv2d(96, 256, kernel_size=5, stride=2, padding=2, bias=enable_bias),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 384, kernel_size=3, stride=2, padding=1, bias=enable_bias),
nn.BatchNorm2d(384),
nn.ReLU(inplace=True),
nn.Conv2d(384, 384, kernel_size=3, stride=2, padding=1, bias=enable_bias),
nn.BatchNorm2d(384),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, stride=2, padding=1, bias=enable_bias),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True))
self.fc_base = nn.Sequential(
nn.Linear(256, 1024 * k),
nn.BatchNorm1d(1024 * k),
nn.ReLU(inplace=True),
nn.Linear(1024 * k, 1024 * k),
nn.BatchNorm1d(1024 * k),
nn.ReLU(inplace=True),
nn.Linear(1024 * k, num_classes))
def forward(self, x):
x = self.conv_base(x)
x = x.view(x.size(0), -1)
x = self.fc_base(x)
x = F.log_softmax(x, dim=1)
return x | 2.5 | 2 |
utest/x3270/test_assertions.py | MichaelSeeburger/Robot-Framework-Mainframe-3270-Library | 3 | 12797837 | <reponame>MichaelSeeburger/Robot-Framework-Mainframe-3270-Library
import re
import pytest
from pytest_mock import MockerFixture
from robot.api import logger
from Mainframe3270.x3270 import x3270
def test_page_should_contain_string(mocker: MockerFixture, under_test: x3270):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
mocker.patch("robot.api.logger.info")
under_test.page_should_contain_string("abc")
logger.info.assert_called_with('The string "abc" was found')
def test_page_should_contain_string_ignore_case(
mocker: MockerFixture, under_test: x3270
):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="aBc")
mocker.patch("robot.api.logger.info")
under_test.page_should_contain_string("abc", ignore_case=True)
logger.info.assert_called_with('The string "abc" was found')
def test_page_should_contain_string_fails(mocker: MockerFixture, under_test: x3270):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
with pytest.raises(Exception, match='The string "def" was not found'):
under_test.page_should_contain_string("def")
def test_page_should_contain_string_custom_message(
mocker: MockerFixture, under_test: x3270
):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
with pytest.raises(Exception, match="my error message"):
under_test.page_should_contain_string("def", error_message="my error message")
def test_page_should_not_contain_string(mocker: MockerFixture, under_test: x3270):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
under_test.page_should_not_contain_string("ABC")
def test_page_should_not_contain_string_ignore_case(
mocker: MockerFixture, under_test: x3270
):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
under_test.page_should_not_contain_string("def", ignore_case=True)
def test_page_should_not_contain_string_fails(mocker: MockerFixture, under_test: x3270):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
with pytest.raises(Exception, match='The string "ABC" was found'):
under_test.page_should_not_contain_string("ABC", ignore_case=True)
def test_page_should_not_contain_string_custom_message(
mocker: MockerFixture, under_test: x3270
):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
with pytest.raises(Exception, match="my error message"):
under_test.page_should_not_contain_string(
"abc", error_message="my error message"
)
def test_page_should_contain_any_string(mocker: MockerFixture, under_test: x3270):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
under_test.page_should_contain_any_string(["abc", "def"])
def test_page_should_contain_any_string_ignore_case(
mocker: MockerFixture, under_test: x3270
):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
under_test.page_should_contain_any_string(["ABC", "def"], ignore_case=True)
def test_page_should_contain_any_string_fails(mocker: MockerFixture, under_test: x3270):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
with pytest.raises(
Exception, match=re.escape("The strings \"['def', 'ghi']\" were not found")
):
under_test.page_should_contain_any_string(["def", "ghi"])
def test_page_should_contain_any_string_custom_message(
mocker: MockerFixture, under_test: x3270
):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
with pytest.raises(Exception, match="my error message"):
under_test.page_should_contain_any_string(
["def", "ghi"], error_message="my error message"
)
def test_page_should_contain_all_strings(mocker: MockerFixture, under_test: x3270):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", side_effect=["abc", "def"])
under_test.page_should_contain_all_strings(["abc", "def"])
def test_page_should_contain_all_strings_ignore_case(
mocker: MockerFixture, under_test: x3270
):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", side_effect=["AbC", "DeF"])
under_test.page_should_contain_all_strings(["abc", "def"], ignore_case=True)
def test_page_should_contain_all_strings_fails(
mocker: MockerFixture, under_test: x3270
):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value=["def"])
with pytest.raises(Exception, match='The string "ghi" was not found'):
under_test.page_should_contain_all_strings(["def", "ghi"])
def test_page_should_contain_all_strings_custom_message(
mocker: MockerFixture, under_test: x3270
):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
with pytest.raises(Exception, match="my error message"):
under_test.page_should_contain_all_strings(
["abc", "def"], error_message="my error message"
)
def test_page_should_not_contain_any_string(mocker: MockerFixture, under_test: x3270):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
under_test.page_should_not_contain_any_string(["def", "ghi"])
def test_page_should_not_contain_any_string_fails(
mocker: MockerFixture, under_test: x3270
):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
with pytest.raises(Exception, match='The string "abc" was found'):
under_test.page_should_not_contain_any_string(["abc", "def"])
def test_page_should_not_contain_any_string_ignore_case(
mocker: MockerFixture, under_test: x3270
):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="ABC")
with pytest.raises(Exception, match='The string "abc" was found'):
under_test.page_should_not_contain_any_string(["abc", "def"], ignore_case=True)
def test_page_should_not_contain_any_string_custom_message(
mocker: MockerFixture, under_test: x3270
):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
with pytest.raises(Exception, match="my error message"):
under_test.page_should_not_contain_any_string(
["abc", "def"], error_message="my error message"
)
def test_page_should_not_contain_all_strings(mocker: MockerFixture, under_test: x3270):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
under_test.page_should_not_contain_all_strings(["def", "ghi"])
def test_page_should_not_contain_all_strings_ignore_case(
mocker: MockerFixture, under_test: x3270
):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
with pytest.raises(Exception, match='The string "abc" was found'):
under_test.page_should_not_contain_all_strings(["ABC", "def"], ignore_case=True)
def test_page_should_not_contain_all_strings_fails(
mocker: MockerFixture, under_test: x3270
):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
with pytest.raises(Exception, match='The string "abc" was found'):
under_test.page_should_not_contain_all_strings(["abc", "def"])
def test_page_should_not_contain_all_strings_custom_message(
mocker: MockerFixture, under_test: x3270
):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
with pytest.raises(Exception, match="my error message"):
under_test.page_should_not_contain_all_strings(
["abc", "def"], error_message="my error message"
)
def test_page_should_contain_string_x_times(mocker: MockerFixture, under_test: x3270):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="a")
under_test.page_should_contain_string_x_times("a", 24)
def test_page_should_contain_string_x_times_ignore_case(
mocker: MockerFixture, under_test: x3270
):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="a")
under_test.page_should_contain_string_x_times("A", 24, ignore_case=True)
def test_page_should_contain_string_x_times_fails(
mocker: MockerFixture, under_test: x3270
):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="a")
with pytest.raises(
Exception, match='The string "a" was not found "1" times, it appears "24" times'
):
under_test.page_should_contain_string_x_times("a", 1)
with pytest.raises(
Exception, match='The string "b" was not found "1" times, it appears "0" times'
):
under_test.page_should_contain_string_x_times("b", 1)
def test_page_should_contain_string_x_times_custom_message(
mocker: MockerFixture, under_test: x3270
):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="a")
with pytest.raises(Exception, match="my error message"):
under_test.page_should_contain_string_x_times(
"b", 1, error_message="my error message"
)
def test_page_should_match_regex(mocker: MockerFixture, under_test: x3270):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
under_test.page_should_match_regex(r"\w+")
def test_page_should_match_regex_fails(mocker: MockerFixture, under_test: x3270):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
with pytest.raises(
Exception, match=re.escape(r'No matches found for "\d+" pattern')
):
under_test.page_should_match_regex(r"\d+")
def test_page_should_not_match_regex(mocker: MockerFixture, under_test: x3270):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
under_test.page_should_not_match_regex(r"\d+")
def test_page_should_not_match_regex_fails(mocker: MockerFixture, under_test: x3270):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="a")
with pytest.raises(
Exception, match=re.escape('There are matches found for "[a]+" pattern')
):
under_test.page_should_not_match_regex("[a]+")
def test_page_should_contain_match(mocker: MockerFixture, under_test: x3270):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
under_test.page_should_contain_match("*a?c*")
def test_page_should_contain_match_fails(mocker: MockerFixture, under_test: x3270):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
with pytest.raises(
Exception, match=re.escape('No matches found for "*e?g*" pattern')
):
under_test.page_should_contain_match("*e?g*")
def test_page_should_contain_match_ignore_case(
mocker: MockerFixture, under_test: x3270
):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="ABC")
under_test.page_should_contain_match("*a?c*", ignore_case=True)
def test_page_should_contain_match_custom_message(
mocker: MockerFixture, under_test: x3270
):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
with pytest.raises(Exception, match="my error message"):
under_test.page_should_contain_match("*def*", error_message="my error message")
def test_page_should_not_contain_match(mocker: MockerFixture, under_test: x3270):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
under_test.page_should_not_contain_match("*def*")
def test_page_should_not_contain_match_fails(mocker: MockerFixture, under_test: x3270):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
with pytest.raises(
Exception, match=re.escape('There are matches found for "*abc*" pattern')
):
under_test.page_should_not_contain_match("*abc*")
def test_page_should_not_contain_match_ignore_case(
mocker: MockerFixture, under_test: x3270
):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
with pytest.raises(
Exception, match=re.escape('There are matches found for "*abc*" pattern')
):
under_test.page_should_not_contain_match("*ABC*", ignore_case=True)
def test_page_should_not_contain_match_custom_message(
mocker: MockerFixture, under_test: x3270
):
mocker.patch("Mainframe3270.py3270.Emulator.string_get", return_value="abc")
with pytest.raises(Exception, match="my error message"):
under_test.page_should_not_contain_match(
"*abc*", error_message="my error message"
)
| 2.296875 | 2 |
tests/test_series/__init__.py | smok-serwis/firanka | 0 | 12797838 | <gh_stars>0
import math
import unittest
from firanka.exceptions import NotInDomainError, DomainError
from firanka.intervals import Interval
from firanka.series import DiscreteSeries, FunctionSeries, ModuloSeries, \
LinearInterpolationSeries, Series
from .common import NOOP, HUGE_IDENTITY
class TestBase(unittest.TestCase):
def test_abstract(self):
self.assertRaises(NotImplementedError, lambda: Series('<-1;1>')[0])
class TestDiscreteSeries(unittest.TestCase):
def test_redundancy_skip(self):
a = DiscreteSeries([(0, 0), (1, 0), (2, 0)], '<0;5>')
b = DiscreteSeries([(0, 0), (1, 0)], '<0;5>')
a.join(b, lambda i, x, y: x + y)
def test_uncov(self):
self.assertRaises(DomainError,
lambda: DiscreteSeries([[0, 0], [1, 1], [2, 2]],
'<-5;2>'))
def test_base(self):
s = DiscreteSeries([[0, 0], [1, 1], [2, 2]])
self.assertEqual(s[0], 0)
self.assertEqual(s[0.5], 0)
self.assertEqual(s[1], 1)
self.assertRaises(NotInDomainError, lambda: s[-1])
self.assertRaises(NotInDomainError, lambda: s[2.5])
s = DiscreteSeries([[0, 0], [1, 1], [2, 2]],
domain=Interval(0, 3, True, True))
self.assertEqual(s[0], 0)
self.assertEqual(s[0.5], 0)
self.assertEqual(s[1], 1)
self.assertRaises(NotInDomainError, lambda: s[-1])
self.assertEqual(s[2.5], 2)
def test_translation(self):
s = DiscreteSeries([[0, 0], [1, 1], [2, 2]]).translate(3)
self.assertEqual(s[3], 0)
self.assertEqual(s[3.5], 0)
self.assertEqual(s[4], 1)
def test_slice_outdomain(self):
series = DiscreteSeries([[0, 0], [1, 1], [2, 2]])
self.assertRaises(NotInDomainError, lambda: series[-1:2])
def test_translate(self):
sp = DiscreteSeries([[0, 0], [1, 1], [2, 2]]).translate(1)
self.assertEqual(sp[1.5], 0)
self.assertEqual(sp[2.5], 1)
def test_slice(self):
series = DiscreteSeries([[0, 0], [1, 1], [2, 2]])
sp = series[0.5:1.5]
self.assertEqual(sp[0.5], 0)
self.assertEqual(sp[1.5], 1)
self.assertRaises(NotInDomainError, lambda: sp[0])
self.assertRaises(NotInDomainError, lambda: sp[2])
self.assertEqual(sp.domain.start, 0.5)
self.assertEqual(sp.domain.stop, 1.5)
def test_eval(self):
sa = DiscreteSeries([[0, 0], [1, 1], [2, 2]])
sb = DiscreteSeries([[0, 1], [1, 2], [2, 3]])
sc = sa.join_discrete(sb, lambda i, a, b: a + b)
self.assertIsInstance(sc, DiscreteSeries)
self.assertEqual(sc.eval_points([0, 1, 2]), [1, 3, 5])
self.assertEqual(sc.data, [(0, 1), (1, 3), (2, 5)])
def test_eval2(self):
sa = DiscreteSeries([[0, 0], [1, 1], [2, 2]])
sb = FunctionSeries(NOOP, '<0;2>')
sc = sa.join_discrete(sb, lambda i, a, b: a + b)
self.assertEqual(sc.eval_points([0, 1, 2]), [0, 2, 4])
self.assertIsInstance(sc, DiscreteSeries)
self.assertEqual(sc.data, [(0, 0), (1, 2), (2, 4)])
def test_eval2i(self):
sa = DiscreteSeries([[0, 0], [1, 1], [2, 2]])
sc = sa.join_discrete(HUGE_IDENTITY, lambda i, a, b: i)
self.assertEqual(sc.eval_points([0, 1, 2]), [0, 1, 2])
self.assertIsInstance(sc, DiscreteSeries)
self.assertEqual(sc.data, [(0, 0), (1, 1), (2, 2)])
def test_apply(self):
sb = DiscreteSeries([[0, 0], [1, 1], [2, 2]]).apply(
lambda k, v: k)
self.assertEquals(sb.data, [(0, 0), (1, 1), (2, 2)])
def test_eval3(self):
sa = FunctionSeries(lambda x: x ** 2, '<-10;10)')
sb = FunctionSeries(NOOP, '<0;2)')
sc = sa.join(sb, lambda i, a, b: a * b)
PTS = [0, 1, 1.9]
EPTS = [x * x ** 2 for x in PTS]
self.assertEqual(sc.eval_points(PTS), EPTS)
self.assertTrue(Interval('<0;2)') in sc.domain)
def test_discretize(self):
# note the invalid data for covering this domain
self.assertRaises(DomainError, lambda: FunctionSeries(lambda x: x ** 2,
'<-10;10)').discretize(
[0, 1, 2, 3, 4, 5], '(-1;6)'))
self.assertRaises(NotInDomainError, lambda: FunctionSeries(lambda x: x ** 2,
'<-10;10)').discretize(
[-100, 0, 1, 2, 3, 4, 5], '(-1;6)'))
PTS = [-1, 0, 1, 2, 3, 4, 5]
sa = FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize(PTS,
'(-1;6)')
self.assertIsInstance(sa, DiscreteSeries)
self.assertEqual(sa.data, [(i, i ** 2) for i in PTS])
sa = FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize(PTS)
self.assertIsInstance(sa, DiscreteSeries)
self.assertEqual(sa.data, [(i, i ** 2) for i in PTS])
empty = FunctionSeries(lambda x: x ** 2, '<-10;10)').discretize([])
self.assertTrue(empty.domain.is_empty())
class TestFunctionSeries(unittest.TestCase):
def test_slice(self):
series = FunctionSeries(NOOP, '<0;2>')
sp = series[0.5:1.5]
self.assertEqual(sp[0.5], 0.5)
self.assertEqual(sp[1.5], 1.5)
self.assertRaises(NotInDomainError, lambda: sp[0])
self.assertRaises(NotInDomainError, lambda: sp[2])
self.assertEqual(sp.domain.start, 0.5)
self.assertEqual(sp.domain.stop, 1.5)
def test_apply(self):
PTS = [-1, -2, -3, 1, 2, 3]
series = FunctionSeries(NOOP, '<-5;5>').apply(lambda k, x: k)
self.assertEqual(series.eval_points(PTS), [x for x in PTS])
def test_apply_wild(self):
def dzika(k, x, a=5, *args, **kwargs):
return k
PTS = [-1, -2, -3, 1, 2, 3]
series = FunctionSeries(NOOP, '<-5;5>').apply(dzika)
self.assertEqual(series.eval_points(PTS), [x for x in PTS])
def test_domain_sensitivity(self):
logs = FunctionSeries(math.log, '(0;5>')
dirs = DiscreteSeries([(0, 1), (1, 2), (3, 4)], '<0;5>')
self.assertRaises(ValueError,
lambda: dirs.join_discrete(logs, lambda i, x, y: x + y))
class TestModuloSeries(unittest.TestCase):
def test_exceptions(self):
self.assertRaises(ValueError, lambda: ModuloSeries(
FunctionSeries(NOOP, '(-inf; 0>')))
self.assertRaises(ValueError, lambda: ModuloSeries(
FunctionSeries(NOOP, '(-inf; inf)')))
self.assertRaises(ValueError,
lambda: ModuloSeries(FunctionSeries(NOOP, '<0; 0>')))
def test_base(self):
series = ModuloSeries(
DiscreteSeries([(0, 1), (1, 2), (2, 3)], '<0;3)'))
self.assertEquals(series[3], 1)
self.assertEquals(series[4], 2)
self.assertEquals(series[5], 3)
self.assertEquals(series[-1], 3)
def test_advanced(self):
series = ModuloSeries(DiscreteSeries([(-1, 1), (0, 2), (1, 3)], '<-1;2)'))
self.assertEqual(series.period, 3.0)
self.assertEqual(series.eval_points([-1, 0, 1]), [1, 2, 3])
self.assertEqual(series.eval_points([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]),
[3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1])
def test_comp_discrete(self):
ser1 = ModuloSeries(FunctionSeries(lambda x: x ** 2, '<0;3)'))
ser2 = FunctionSeries(NOOP, '<0;3)')
ser3 = ser1.join(ser2, lambda i, x, y: x * y)
class TestLinearInterpolation(unittest.TestCase):
def test_lin(self):
series = LinearInterpolationSeries(
DiscreteSeries([(0, 1), (1, 2), (2, 3)], '<0;3)'))
self.assertEqual(series[0], 1)
self.assertEqual(series[0.5], 1.5)
self.assertEqual(series[1], 2)
self.assertEqual(series[2.3], 3)
def test_conf(self):
self.assertRaises(TypeError, lambda: LinearInterpolationSeries(
FunctionSeries(NOOP, '<0;3)')))
| 2.421875 | 2 |
src/utils/remote/remote_api/routes.py | devs-7/bible-projector-python | 0 | 12797839 | <filename>src/utils/remote/remote_api/routes.py
from typing import Any, Callable, Dict, Optional, Union
from flask import Blueprint, request
from src.utils.remote import Command
from werkzeug.utils import send_from_directory
api_routes_blueprint = Blueprint('api_routes', __name__)
execute: Callable[[Union[Command, str], Optional[Dict[str, Any]]], None]
@api_routes_blueprint.route('/')
def index():
return 'Bible projector remote API'
@api_routes_blueprint.route('/favicon.ico')
def favicon():
return send_from_directory(
api_routes_blueprint.root_path, 'icon.ico',
)
@api_routes_blueprint.route('/<command>', methods=['POST'])
def command(command: str):
global execute
data = request.get_json()
execute(command, data)
return command
| 2.390625 | 2 |
test/test_workspace.py | fkie/rosrepo | 5 | 12797840 | <reponame>fkie/rosrepo
# coding=utf-8
#
# ROSREPO
# Manage ROS workspaces with multiple Gitlab repositories
#
# Author: <NAME>
#
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import unittest
import os
import shutil
import yaml
import pickle
from tempfile import mkdtemp
try:
from mock import patch
except ImportError:
from unittest.mock import patch
import sys
sys.stderr = sys.stdout
from rosrepo.config import Config
import test.helper as helper
class WorkspaceTest(unittest.TestCase):
def setUp(self):
self.ros_root_dir = mkdtemp()
self.wsdir = mkdtemp()
self.homedir = mkdtemp()
helper.create_fake_ros_root(self.ros_root_dir)
helper.create_package(self.wsdir, "alpha", ["beta", "gamma", "installed-system"])
helper.create_package(self.wsdir, "beta", ["delta"])
helper.create_package(self.wsdir, "gamma", [])
helper.create_package(self.wsdir, "delta", [])
helper.create_package(self.wsdir, "epsilon", ["broken"])
helper.create_package(self.wsdir, "broken", ["missing"])
helper.create_package(self.wsdir, "incomplete", ["missing-system"])
helper.create_package(self.wsdir, "ancient", [], deprecated=True)
helper.create_package(self.wsdir, "ancient2", [], deprecated="Walking Dead")
for blacklisted_key in ["ROS_WORKSPACE", "ROS_PACKAGE_PATH"]:
if blacklisted_key in os.environ:
del os.environ[blacklisted_key]
os.environ["HOME"] = self.homedir
os.environ["XDG_CONFIG_HOME"] = os.path.join(self.homedir, ".config")
def tearDown(self):
shutil.rmtree(self.wsdir, ignore_errors=True)
shutil.rmtree(self.homedir, ignore_errors=True)
shutil.rmtree(self.ros_root_dir, ignore_errors=True)
self.ros_root_dir = None
self.wsdir = None
def get_config_value(self, key, default=None):
cfg = Config(self.wsdir, read_only=True)
return cfg.get(key, default)
def test_bash(self):
"""Test proper behavior of 'rosrepo bash'"""
exitcode, stdout = helper.run_rosrepo("init", "-r", self.ros_root_dir, self.wsdir)
self.assertEqual(exitcode, 0)
self.assertEqual(
helper.run_rosrepo("bash", "-w", self.wsdir, "ROS_WORKSPACE", "ROS_PACKAGE_PATH", "PATH", "UNKNOWN"),
(0, "ROS_WORKSPACE=%(wsdir)s\nROS_PACKAGE_PATH=%(wsdir)s/src\nPATH=%(env_path)s\n# variable UNKNOWN is not set\n" % {"wsdir": self.wsdir, "env_path": os.environ["PATH"]})
)
os.environ["ROS_PACKAGE_PATH"] = os.pathsep.join(["/before"] + ["%s/src/%s" % (self.wsdir, d) for d in ["alpha", "beta", "gamma"]] + ["/after"])
self.assertEqual(
helper.run_rosrepo("bash", "-w", self.wsdir),
(0, "ROS_WORKSPACE=%(wsdir)s\nROS_PACKAGE_PATH=/before%(sep)s%(wsdir)s/src%(sep)s/after\n" % {"wsdir": self.wsdir, "sep": os.pathsep})
)
def test_clean(self):
"""Test proper behavior of 'rosrepo clean'"""
exitcode, stdout = helper.run_rosrepo("init", "-r", self.ros_root_dir, self.wsdir)
self.assertEqual(exitcode, 0)
os.makedirs(os.path.join(self.wsdir, "build"))
exitcode, stdout = helper.run_rosrepo("clean", "-w", self.wsdir, "--dry-run")
self.assertEqual(exitcode, 0)
self.assertTrue(os.path.isdir(os.path.join(self.wsdir, "build")))
exitcode, stdout = helper.run_rosrepo("clean", "-w", self.wsdir)
self.assertEqual(exitcode, 0)
self.assertFalse(os.path.isdir(os.path.join(self.wsdir, "build")))
def test_upgrade_from_version_1(self):
"""Test if workspaces from rosrepo 1.x are migrated properly"""
os.rename(os.path.join(self.wsdir, "src"), os.path.join(self.wsdir, "repos"))
os.makedirs(os.path.join(self.wsdir, "src"))
with open(os.path.join(self.wsdir, "src", "CMakeLists.txt"), "w"):
pass
with open(os.path.join(self.wsdir, "src", "toplevel.cmake"), "w"):
pass
with open(os.path.join(self.wsdir, ".catkin_workspace"), "w"):
pass
os.symlink(os.path.join("..", "repos", "alpha"), os.path.join(self.wsdir, "src", "alpha"))
os.symlink(os.path.join("..", "repos", "beta"), os.path.join(self.wsdir, "src", "beta"))
os.symlink(os.path.join("..", "repos", "gamma"), os.path.join(self.wsdir, "src", "gamma"))
os.symlink(os.path.join("..", "repos", "delta"), os.path.join(self.wsdir, "src", "delta"))
with open(os.path.join(self.wsdir, "repos", ".metainfo"), "w") as f:
f.write(yaml.safe_dump(
{
"alpha": {"auto": False, "pin": False},
"beta": {"auto": False, "pin": True},
"gamma": {"auto": True, "pin": False},
"delta": {"auto": True, "pin": False},
},
default_flow_style=False
))
exitcode, stdout = helper.run_rosrepo("init", "-r", self.ros_root_dir, self.wsdir)
self.assertEqual(exitcode, 0)
self.assertEqual(
helper.run_rosrepo("list", "-w", self.wsdir, "-n"),
(0, "alpha\nbeta\ndelta\ngamma\n")
)
self.assertEqual(self.get_config_value("default_build"), ["alpha"])
self.assertEqual(self.get_config_value("pinned_build"), ["beta"])
def test_upgrade_from_version_2(self):
"""Test if workspaces from rosrepo 2.x are migrated properly"""
with open(os.path.join(self.wsdir, ".catkin_workspace"), "w"):
pass
os.makedirs(os.path.join(self.wsdir, ".catkin_tools", "profiles", "rosrepo"))
os.makedirs(os.path.join(self.wsdir, ".rosrepo"))
from rosrepo.common import PkgInfo
with open(os.path.join(self.wsdir, ".rosrepo", "info"), "wb") as f:
metadata = {}
metadata["alpha"] = PkgInfo()
metadata["beta"] = PkgInfo()
metadata["alpha"].selected = True
metadata["beta"].selected = True
metadata["beta"].pinned = True
f.write(pickle.dumps(metadata))
exitcode, stdout = helper.run_rosrepo("init", "-r", self.ros_root_dir, self.wsdir)
self.assertEqual(exitcode, 0)
self.assertEqual(
helper.run_rosrepo("list", "-w", self.wsdir, "-n"),
(0, "alpha\nbeta\ndelta\ngamma\n")
)
self.assertEqual(self.get_config_value("default_build"), ["alpha"])
self.assertEqual(self.get_config_value("pinned_build"), ["beta"])
def test_upgrade_from_older_version_3(self):
"""Test if workspaces from rosrepo 3.x are upgraded to latest version"""
exitcode, stdout = helper.run_rosrepo("init", "-r", self.ros_root_dir, self.wsdir)
self.assertEqual(exitcode, 0)
exitcode, stdout = helper.run_rosrepo("include", "-w", self.wsdir, "alpha")
self.assertEqual(exitcode, 0)
cfg = Config(self.wsdir)
cfg["version"] = "3.0.0a0"
cfg.write()
self.assertEqual(
helper.run_rosrepo("list", "-w", self.wsdir, "-n"),
(0, "alpha\nbeta\ndelta\ngamma\n")
)
from rosrepo import __version__ as rosrepo_version
self.assertEqual(self.get_config_value("version"), rosrepo_version)
def test_incompatible_new_version(self):
"""Test if workspaces from future rosrepo versions are detected"""
exitcode, stdout = helper.run_rosrepo("init", "-r", self.ros_root_dir, self.wsdir)
self.assertEqual(exitcode, 0)
cfg = Config(self.wsdir)
cfg["version"] = "999.0"
cfg.write()
exitcode, stdout = helper.run_rosrepo("list", "-w", self.wsdir, "-n")
self.assertEqual(exitcode, 1)
self.assertIn("newer version", stdout)
def test_buildset(self):
"""Test proper behavior of 'rosrepo include' and 'rosrepo exclude'"""
exitcode, stdout = helper.run_rosrepo("init", "-r", self.ros_root_dir, self.wsdir)
self.assertEqual(exitcode, 0)
exitcode, stdout = helper.run_rosrepo("include", "-w", self.wsdir, "--dry-run", "alpha")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("default_build", []), [])
exitcode, stdout = helper.run_rosrepo("include", "-w", self.wsdir, "alpha")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("default_build"), ["alpha"])
self.assertEqual(self.get_config_value("pinned_build"), [])
self.assertEqual(
helper.run_rosrepo("list", "-w", self.wsdir, "-n"),
(0, "alpha\nbeta\ndelta\ngamma\n")
)
exitcode, stdout = helper.run_rosrepo("include", "-w", self.wsdir, "--pinned", "beta")
self.assertEqual(exitcode, 0)
self.assertEqual(
helper.run_rosrepo("list", "-w", self.wsdir, "-n"),
(0, "alpha\nbeta\ndelta\ngamma\n")
)
self.assertEqual(self.get_config_value("pinned_build"), ["beta"])
exitcode, stdout = helper.run_rosrepo("exclude", "-w", self.wsdir, "-a")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("default_build"), [])
self.assertEqual(
helper.run_rosrepo("list", "-w", self.wsdir, "-n"),
(0, "beta\ndelta\n")
)
exitcode, stdout = helper.run_rosrepo("include", "-w", self.wsdir, "--default", "beta")
self.assertEqual(exitcode, 0)
exitcode, stdout = helper.run_rosrepo("exclude", "-w", self.wsdir, "--pinned", "beta")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("default_build"), ["beta"])
self.assertEqual(self.get_config_value("pinned_build"), [])
exitcode, stdout = helper.run_rosrepo("include", "-w", self.wsdir, "--pinned", "epsilon")
self.assertEqual(exitcode, 1)
self.assertIn("cannot resolve dependencies", stdout)
exitcode, stdout = helper.run_rosrepo("include", "-w", self.wsdir, "--default", "epsilon")
self.assertEqual(exitcode, 1)
self.assertIn("cannot resolve dependencies", stdout)
exitcode, stdout = helper.run_rosrepo("include", "-w", self.wsdir, "--default", "--all")
self.assertEqual(exitcode, 1)
self.assertIn("cannot resolve dependencies", stdout)
self.assertEqual(self.get_config_value("default_build"), ["beta"])
self.assertEqual(self.get_config_value("pinned_build"), [])
exitcode, stdout = helper.run_rosrepo("include", "-w", self.wsdir, "--default", "incomplete")
self.assertEqual(exitcode, 0)
self.assertIn("apt-get install", stdout)
exitcode, stdout = helper.run_rosrepo("include", "-w", self.wsdir, "--default", "ancient", "ancient2")
self.assertEqual(exitcode, 0)
self.assertIn("is deprecated", stdout)
self.assertIn("Walking Dead", stdout)
os.makedirs(os.path.join(self.wsdir, "build"))
exitcode, stdout = helper.run_rosrepo("init", "--reset", "-r", self.ros_root_dir, self.wsdir)
self.assertEqual(exitcode, 0)
self.assertFalse(os.path.isdir(os.path.join(self.wsdir, "build")))
self.assertEqual(self.get_config_value("default_build", []), [])
self.assertEqual(self.get_config_value("pinned_build", []), [])
def test_build(self):
"""Test proper behavior of 'rosrepo build'"""
exitcode, stdout = helper.run_rosrepo("init", "-r", self.ros_root_dir, self.wsdir)
self.assertEqual(exitcode, 0)
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--job-limit", "1")
self.assertEqual(exitcode, 0)
exitcode, stdout = helper.run_rosrepo("build", "-w", self.wsdir, "--dry-run")
self.assertEqual(exitcode, 1)
self.assertIn("no packages to build", stdout)
helper.failing_programs = ["catkin_lint"]
exitcode, stdout = helper.run_rosrepo("build", "-w", self.wsdir, "--dry-run", "alpha")
self.assertEqual(exitcode, 0)
self.assertIn("alpha", stdout)
self.assertIn("beta", stdout)
self.assertIn("gamma", stdout)
self.assertIn("delta", stdout)
exitcode, stdout = helper.run_rosrepo("build", "-w", self.wsdir, "alpha")
self.assertEqual(exitcode, 1)
self.assertIn("catkin_lint reported errors", stdout)
helper.failing_programs = []
with patch("rosrepo.cmd_build.find_ros_root", lambda x: None):
exitcode, stdout = helper.run_rosrepo("build", "-w", self.wsdir, "alpha")
self.assertEqual(exitcode, 1)
self.assertIn("cannot detect ROS distribution", stdout)
exitcode, stdout = helper.run_rosrepo("build", "-w", self.wsdir, "--all")
self.assertEqual(exitcode, 1)
self.assertIn("cannot resolve dependencies", stdout)
exitcode, stdout = helper.run_rosrepo("build", "-w", self.wsdir, "--set-default")
self.assertEqual(exitcode, 1)
self.assertIn("no packages given", stdout)
exitcode, stdout = helper.run_rosrepo("build", "-w", self.wsdir, "--set-default", "alpha")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("default_build", []), ["alpha"])
exitcode, stdout = helper.run_rosrepo("build", "-w", self.wsdir, "--set-pinned")
self.assertEqual(exitcode, 1)
self.assertIn("no packages given", stdout)
exitcode, stdout = helper.run_rosrepo("build", "-w", self.wsdir, "--set-pinned", "beta")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("pinned_build", []), ["beta"])
exitcode, stdout = helper.run_rosrepo("build", "-w", self.wsdir)
self.assertEqual(exitcode, 0)
self.assertIn("alpha", stdout)
self.assertIn("beta", stdout)
self.assertIn("gamma", stdout)
self.assertIn("delta", stdout)
exitcode, stdout = helper.run_rosrepo("exclude", "-w", self.wsdir, "--all")
self.assertEqual(exitcode, 0)
exitcode, stdout = helper.run_rosrepo("build", "-w", self.wsdir)
self.assertEqual(exitcode, 0)
self.assertNotIn("alpha", stdout)
self.assertNotIn("gamma", stdout)
self.assertIn("beta", stdout)
self.assertIn("delta", stdout)
exitcode, stdout = helper.run_rosrepo("build", "-w", self.wsdir, "incomplete")
self.assertEqual(exitcode, 1)
self.assertIn("missing system package", stdout)
exitcode, stdout = helper.run_rosrepo("build", "-w", self.wsdir, "--clean")
self.assertEqual(exitcode, 0)
exitcode, stdout = helper.run_rosrepo("build", "-w", self.wsdir, "--clean", "--dry-run", "--offline", "--verbose", "--no-status", "--keep-going", "-j2")
self.assertEqual(exitcode, 0)
def test_list(self):
"""Test proper behavior of 'rosrepo list'"""
exitcode, stdout = helper.run_rosrepo("init", "-r", self.ros_root_dir, self.wsdir)
self.assertEqual(exitcode, 0)
exitcode, stdout = helper.run_rosrepo("include", "-w", self.wsdir, "alpha")
self.assertEqual(exitcode, 0)
exitcode, stdout = helper.run_rosrepo("include", "-w", self.wsdir, "--pinned", "beta")
self.assertEqual(exitcode, 0)
exitcode, stdout = helper.run_rosrepo("list", "-w", self.wsdir)
self.assertEqual(exitcode, 0)
self.assertIn("alpha", stdout)
self.assertIn("beta", stdout)
self.assertIn("gamma", stdout)
self.assertIn("delta", stdout)
self.assertNotIn("epsilon", stdout)
exitcode, stdout = helper.run_rosrepo("list", "-w", self.wsdir, "-BC")
self.assertEqual(exitcode, 0)
self.assertIn("search filter", stdout)
exitcode, stdout = helper.run_rosrepo("list", "-w", self.wsdir, "-S")
self.assertEqual(exitcode, 0)
self.assertIn("alpha", stdout)
self.assertNotIn("beta", stdout)
exitcode, stdout = helper.run_rosrepo("list", "-w", self.wsdir, "-P")
self.assertEqual(exitcode, 0)
self.assertNotIn("alpha", stdout)
self.assertIn("beta", stdout)
self.assertNotIn("delta", stdout)
exitcode, stdout = helper.run_rosrepo("list", "-w", self.wsdir, "-Pv")
self.assertEqual(exitcode, 0)
self.assertIn("alpha", stdout)
self.assertNotIn("beta", stdout)
self.assertIn("delta", stdout)
exitcode, stdout = helper.run_rosrepo("list", "-w", self.wsdir, "-PD")
self.assertEqual(exitcode, 0)
self.assertNotIn("alpha", stdout)
self.assertIn("beta", stdout)
self.assertIn("delta", stdout)
exitcode, stdout = helper.run_rosrepo("list", "-w", self.wsdir, "-W")
self.assertIn("alpha", stdout)
self.assertIn("beta", stdout)
self.assertIn("epsilon", stdout)
def test_config(self):
"""Test proper behavior of 'rosrepo config'"""
exitcode, stdout = helper.run_rosrepo("init", "-r", self.ros_root_dir, self.wsdir)
self.assertEqual(exitcode, 0)
with patch("rosrepo.cmd_config.find_ros_root", lambda x: None):
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir)
self.assertEqual(exitcode, 1)
self.assertIn("cannot detect ROS distribution", stdout)
#######################
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--job-limit", "16")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("job_limit"), 16)
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--job-limit", "0")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("job_limit"), None)
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--job-limit", "8")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("job_limit"), 8)
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--no-job-limit")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("job_limit"), None)
#######################
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--install")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("install"), True)
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--no-install")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("install"), False)
#######################
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--set-compiler", "clang")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("compiler"), "clang")
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--set-compiler", "does_not_exist")
self.assertEqual(exitcode, 1)
self.assertIn("unknown compiler", stdout)
self.assertEqual(self.get_config_value("compiler"), "clang")
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--unset-compiler")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("compiler"), None)
#######################
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--set-gitlab-crawl-depth", "2")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("gitlab_crawl_depth"), 2)
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--set-gitlab-crawl-depth", "1")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("gitlab_crawl_depth"), 1)
#######################
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--set-gitlab-url", "Test", "http://localhost", "--store-credentials")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("gitlab_servers"), [{"label": "Test", "url": "http://localhost", "private_token": "<PASSWORD>token"}])
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--set-gitlab-url", "Test", "http://localhost", "--private-token", "<PASSWORD>", "--store-credentials")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("store_credentials"), True)
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--no-store-credentials")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("store_credentials"), False)
self.assertEqual(self.get_config_value("gitlab_servers"), [{"label": "Test", "url": "http://localhost", "private_token": "<PASSWORD>"}])
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--set-gitlab-url", "Test", "http://localhost", "--private-token", "<PASSWORD>")
self.assertEqual(exitcode, 0)
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--set-gitlab-url", "Test", "http://localhost")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("gitlab_servers"), [{"label": "Test", "url": "http://localhost", "private_token": "<PASSWORD>"}])
self.assertEqual(
helper.run_rosrepo("config", "-w", self.wsdir, "--get-gitlab-url", "does_not_exist"),
(0, "\n")
)
self.assertEqual(
helper.run_rosrepo("config", "-w", self.wsdir, "--get-gitlab-url", "Test"),
(0, "http://localhost\n")
)
self.assertEqual(
helper.run_rosrepo("config", "-w", self.wsdir, "--show-gitlab-urls", "--autocomplete"),
(0, "Test\n")
)
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--show-gitlab-urls")
self.assertEqual(exitcode, 0)
self.assertIn("Test", stdout)
self.assertIn("http://localhost", stdout)
self.assertIn("yes", stdout)
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--gitlab-logout", "does_not_exist")
self.assertEqual(exitcode, 1)
self.assertIn("no such Gitlab server", stdout)
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--gitlab-logout", "Test")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("gitlab_servers"), [{"label": "Test", "url": "http://localhost"}])
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--set-gitlab-url", "Test", "http://localhost", "--private-token", "<PASSWORD>")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("gitlab_servers"), [{"label": "Test", "url": "http://localhost"}])
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--set-gitlab-url", "Test", "http://localhost")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("gitlab_servers"), [{"label": "Test", "url": "http://localhost"}])
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--unset-gitlab-url", "Test")
self.assertEqual(exitcode, 0)
self.assertEqual(
helper.run_rosrepo("config", "-w", self.wsdir, "--show-gitlab-urls", "--autocomplete"),
(0, "\n")
)
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--set-gitlab-url", "Test", "http://localhost", "--private-token", "<PASSWORD>")
self.assertEqual(exitcode, 0)
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--gitlab-login", "Test", "--private-token", "<PASSWORD>")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("gitlab_servers"), [{"label": "Test", "url": "http://localhost"}])
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--set-gitlab-url", "Test", "http://localhost", "--private-token", "<PASSWORD>", "--store-credentials")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("gitlab_servers"), [{"label": "Test", "url": "http://localhost", "private_token": "t0ps3cr3t"}])
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--remove-credentials")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("gitlab_servers"), [{"label": "Test", "url": "http://localhost"}])
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--gitlab-login", "Test", "--private-token", "<PASSWORD>")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("gitlab_servers"), [{"label": "Test", "url": "http://localhost", "private_token": "<PASSWORD>"}])
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--gitlab-login", "Test")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("gitlab_servers"), [{"label": "Test", "url": "http://localhost", "private_token": "<PASSWORD>"}])
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--offline", "--set-gitlab-url", "Test", "http://localhost")
self.assertEqual(exitcode, 0)
self.assertIn("cannot verify Gitlab private token in offline mode", stdout)
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--offline", "--gitlab-login", "Test")
self.assertEqual(exitcode, 0)
self.assertIn("cannot verify Gitlab private token in offline mode", stdout)
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--remove-credentials")
self.assertEqual(exitcode, 0)
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--offline", "--set-gitlab-url", "Test", "http://localhost")
self.assertEqual(exitcode, 1)
self.assertIn("cannot acquire Gitlab private token in offline mode", stdout)
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--offline", "--gitlab-login", "Test")
self.assertEqual(exitcode, 1)
self.assertIn("cannot acquire Gitlab private token in offline mode", stdout)
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--unset-gitlab-url", "Test")
self.assertEqual(exitcode, 0)
cfg = Config(self.wsdir)
cfg["gitlab_servers"] = [{"label": "NoURL"}]
cfg.write()
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--gitlab-login", "NoURL")
self.assertEqual(exitcode, 1)
self.assertIn("cannot acquire token for Gitlab server without URL", stdout)
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--gitlab-login", "does_not_exist")
self.assertEqual(exitcode, 1)
self.assertIn("no such Gitlab server", stdout)
#######################
self.assertEqual(self.get_config_value("ros_root"), self.ros_root_dir)
helper.run_rosrepo("config", "-w", self.wsdir, "--unset-ros-root")
self.assertEqual(self.get_config_value("ros_root"), None)
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--set-ros-root", self.ros_root_dir)
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("ros_root"), self.ros_root_dir)
#######################
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--no-catkin-lint")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("use_catkin_lint"), False)
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--catkin-lint")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("use_catkin_lint"), True)
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--no-catkin-lint")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("use_catkin_lint"), False)
#######################
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--no-rosclipse")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("use_rosclipse"), False)
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--rosclipse")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("use_rosclipse"), True)
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--no-rosclipse")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("use_rosclipse"), False)
#######################
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--no-env-cache")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("use_env_cache"), False)
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--env-cache")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("use_env_cache"), True)
exitcode, stdout = helper.run_rosrepo("config", "-w", self.wsdir, "--no-env-cache")
self.assertEqual(exitcode, 0)
self.assertEqual(self.get_config_value("use_env_cache"), False)
#######################
def test_init_failures(self):
"""Test proper behavior of 'rosrepo init'"""
with patch("rosrepo.cmd_init.find_ros_root", lambda x: None):
exitcode, stdout = helper.run_rosrepo("init", self.wsdir)
self.assertEqual(exitcode, 1)
self.assertIn("cannot detect ROS distribution", stdout)
os.environ["HOME"] = self.wsdir
exitcode, stdout = helper.run_rosrepo("init", "-r", self.ros_root_dir, self.wsdir)
self.assertEqual(exitcode, 1)
self.assertIn("$HOME", stdout)
exitcode, stdout = helper.run_rosrepo("init", "-r", self.ros_root_dir, os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir)))
self.assertEqual(exitcode, 1)
self.assertIn("rosrepo source folder", stdout)
| 1.765625 | 2 |
gym/common/profiler/profiler.py | intrig-unicamp/gym | 8 | 12797841 | <gh_stars>1-10
import os
import json
import logging
from gym.common.process import Actuator
logger = logging.getLogger(__name__)
class Profiler:
FILES = 'info'
FILES_PREFIX = 'info_'
FILES_SUFFIX = 'py'
def __init__(self):
self.profiles = {}
self._outputs = []
self.actuator = Actuator()
self.cfg_acts()
def cfg_acts(self):
logger.info("Loading Profile Infos")
folder = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
Profiler.FILES)
cfg = {
"folder": folder,
"prefix": Profiler.FILES_PREFIX,
"sufix": Profiler.FILES_SUFFIX,
"full_path": True,
}
self.actuator.cfg(cfg)
def profile(self):
self._outputs = self.actuator.get_acts()
for value in self._outputs.values():
name = value.get("name", None)
if name:
self.profiles[name] = value
else:
logger.info("Could not load profiler output %s", value)
return self.profiles
if __name__ == "__main__":
level = logging.DEBUG
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
handler.setLevel(level)
logging.getLogger().addHandler(handler)
logging.getLogger().setLevel(level)
logger = logging.getLogger(__name__)
prfl = Profiler()
msg = prfl.profile()
print(msg) | 2.3125 | 2 |
tests/scripts/thread-cert/pktverify/null_field.py | AdityaHPatwardhan/openthread | 2,962 | 12797842 | #!/usr/bin/env python3
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
nullField = None
class NullField(object):
"""
Represents a null field that does not exists.
"""
def __new__(cls, *args, **kwargs):
global nullField
if nullField is None:
nullField = object.__new__(cls, *args, **kwargs)
return nullField
def __init__(self):
assert self is nullField
def __bool__(self):
"""
NullField is always treated as False.
"""
return False
def __getattr__(self, item):
"""
Any sub field of the NullField is NullField itself.
"""
return self
def __setattr__(self, key, value):
pass
def __len__(self) -> 0:
return 0
def __eq__(self, other):
"""
NullField is always not equal to any other value.
"""
return False
def __ne__(self, other):
return True
def __lt__(self, other):
"""
Comparing NullField to any other value gets False.
"""
return False
def __le__(self, other):
"""
Comparing NullField to any other value gets False.
"""
return False
def __gt__(self, other):
"""
Comparing NullField to any other value gets False.
"""
return False
def __ge__(self, other):
"""
Comparing NullField to any other value gets False.
"""
return False
def __str__(self):
return "nullField"
def __repr__(self):
return 'nullField'
NullField()
if __name__ == '__main__':
assert nullField is NullField()
assert not nullField, repr(nullField)
assert nullField != nullField, repr(nullField)
assert nullField != 0
assert not (nullField > 1)
assert not (nullField < 1)
assert not (nullField < nullField)
assert not (nullField > nullField)
assert bool(nullField) is False
assert nullField != ""
assert nullField != None # noqa
assert nullField is not None
| 1.6875 | 2 |
utils/device_util.py | zhangxdnw/pc_remote_android | 0 | 12797843 | <gh_stars>0
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : device_util.py
@Contact : <EMAIL>
@License : MIT
@Modify Time @Author @Version @Description
------------ ------- -------- -----------
2021/10/25 10:23 zxd 1.0 None
"""
from adbutils import adb
import uiautomator2 as u2
from uiautomator2 import Device
def list_all_devices() -> []:
"""
列出当前所有连接的设备
Returns:
已连接设备的序列号数组, 没有连接设备返回空数组
"""
device_list = adb.device_list()
if len(device_list) < 1:
return []
else:
return [device.serial for device in device_list]
def connect(addr: str) -> Device:
return u2.connect(addr)
def top_app(device: Device) -> str:
return device.current_app()['package']
def top_activity(device: Device) -> str:
return device.app_current()['package'] + device.app_current()['activity']
def activity_window(device: Device, activity: str) -> list:
wins = device.shell("dumpsys window " + activity + " | grep \"Window #\"")
return wins.output.strip().split('\n')
def stop_app(device: Device, package: str):
device.app_stop(package)
def stop_all(device: Device):
device.app_stop_all()
if __name__ == '__main__':
print(list_all_devices())
| 2.515625 | 3 |
jupyter_rsession_proxy/__init__.py | uc-cdis/jupyter-rsession-proxy | 0 | 12797844 | import os
import tempfile
import subprocess
import getpass
import shutil
from textwrap import dedent
def get_r_env():
env = {}
executable = 'R'
try:
# get notebook app
from notebook.notebookapp import NotebookApp
nbapp = NotebookApp.instance()
kernel_name = nbapp.kernel_manager.default_kernel_name
if kernel_name:
kernel_spec = nbapp.kernel_spec_manager.get_kernel_spec(kernel_name)
env.update(kernel_spec.env)
executable = kernel_spec.argv[0]
# patch LD_LIBRARY_PATH for conda env
conda_lib_dir = os.path.join(env['CONDA_PREFIX'], 'lib')
#r_lib_dir = os.path.join(conda_lib_dir, 'R/lib')
env.update({
# 'LD_LIBRARY_PATH': r_lib_dir + ':' + conda_lib_dir
'LD_LIBRARY_PATH': conda_lib_dir
})
except Exception:
nbapp.log.warning('Error when trying to get R executable from kernel')
# Detect various environment variables rsession requires to run
# Via rstudio's src/cpp/core/r_util/REnvironmentPosix.cpp
cmd = [executable, '--slave', '--vanilla', '-e',
'cat(paste(R.home("home"),R.home("share"),R.home("include"),R.home("doc"),getRversion(),sep=":"))']
r_output = subprocess.check_output(cmd)
R_HOME, R_SHARE_DIR, R_INCLUDE_DIR, R_DOC_DIR, version = \
r_output.decode().split(':')
# TODO:
# maybe set a few more env vars?
# e.g. MAXENT, DISPLAY='' (to avoid issues with java)
# e.g. would be nice if RStudio terminal starts with correct conda env?
# -> either patch ~/Renviron / Renviron.site
# -> user Rprofile.site (if conda env specific?)
# -> use ~/.Rprofile ... if user specific?
# make R kernel used configurable?
# ... or rather use standard system R installation, and let user install stuff in home folder?
env.update({
'R_DOC_DIR': R_DOC_DIR,
'R_HOME': R_HOME,
'R_INCLUDE_DIR': R_INCLUDE_DIR,
'R_SHARE_DIR': R_SHARE_DIR,
'RSTUDIO_DEFAULT_R_VERSION_HOME': R_HOME,
'RSTUDIO_DEFAULT_R_VERSION': version,
})
return env
def setup_shiny():
'''Manage a Shiny instance.'''
def _get_shiny_cmd(port):
# server.r_path ???
conf = dedent("""
run_as {user};
server {{
bookmark_state_dir {site_dir}/shiny-server-boomarks;
listen {port};
location / {{
site_dir {site_dir};
log_dir {site_dir}/logs;
directory_index on;
}}
}}
""").format(
user=getpass.getuser(),
port=str(port),
site_dir=os.getcwd()
)
f = tempfile.NamedTemporaryFile(mode='w', delete=False)
f.write(conf)
f.close()
return ['shiny-server', f.name]
def _get_shiny_env(port):
env = get_r_env()
return env
return {
'command': _get_shiny_cmd,
'environment': _get_shiny_env,
'launcher_entry': {
'title': 'Shiny',
'icon_path': os.path.join(os.path.dirname(os.path.abspath(__file__)), 'icons', 'shiny.svg')
}
}
def setup_rstudio():
def _get_rsession_env(port):
env = get_r_env()
# rserver needs USER to be set to something sensible,
# otherwise it'll throw up an authentication page
if not os.environ.get('USER', ''):
env['USER'] = getpass.getuser()
return env
def _get_r_executable():
try:
# get notebook app
from notebook.notebookapp import NotebookApp
nbapp = NotebookApp.instance()
# get R executable:
kernel_name = nbapp.kernel_manager.default_kernel_name
if kernel_name:
kernel_spec = nbapp.kernel_spec_manager.get_kernel_spec(kernel_name)
return kernel_spec.argv[0]
except Exception:
nbapp.log.warning('Error when trying to get R executable from kernel')
return 'R'
def _get_rsession_cmd(port):
# Other paths rsession maybe in
other_paths = [
# When rstudio-server deb is installed
'/usr/lib/rstudio-server/bin/rserver',
]
if shutil.which('rserver'):
executable = 'rserver'
else:
for op in other_paths:
if os.path.exists(op):
executable = op
break
else:
raise FileNotFoundError('Can not find rserver in PATH')
cmd = [
executable,
'--www-port=' + str(port),
'--rsession-which-r=' + _get_r_executable(),
]
env = get_r_env()
if env.get('LD_LIBRARY_PATH'):
cmd.append('--rsession-ld-library-path=' + env['LD_LIBRARY_PATH'])
return cmd
return {
'command': _get_rsession_cmd,
'environment': _get_rsession_env,
'launcher_entry': {
'title': 'RStudio',
'icon_path': os.path.join(os.path.dirname(os.path.abspath(__file__)), 'icons', 'rstudio.svg')
}
}
| 2.21875 | 2 |
chapter_1/08.py | takumi34/nlp_100 | 0 | 12797845 | def cipher(s):
res = []
for i in s:
if (i.islower()):
res.append(chr(219 - ord(i)))
else:
res.append(i)
return ''.join(res)
text = "chika chika"
a = cipher(text)
print(a)
b = cipher(a)
print(b)
| 3.734375 | 4 |
analysis_data.py | leeh43/MULAN_universal_lesion_analysis | 6 | 12797846 | <reponame>leeh43/MULAN_universal_lesion_analysis
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 29 02:29:53 2019
@author: leeh43
"""
import os
import numpy as np
import json
import matplotlib.pyplot as plt; plt.rcdefaults()
import matplotlib.pyplot as plt
import random
def get_files_endswith(src_dir, ext):
if not os.path.isdir(src_dir):
raise ValueError('Folder does not exist:' + src_dir)
file_list = []
for root, dirs, files in os.walk(src_dir):
for file in files:
if file.lower().endswith(ext.lower()):
file_list.append(root + '/' + file)
return file_list
file_list = []
dirs_list = []
data_dir = os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/results')
file_list= get_files_endswith(data_dir, '.txt')
data_list = []
for i in file_list:
with open(i) as f:
content = f.readlines()
content = [x.strip() for x in content]
data_list.append(content)
json_file = os.path.join('/nfs/masi/leeh43/MULAN_universal_lesion_analysis/program_data/'
+ 'tags_cache.json')
with open(json_file) as f:
json_data = json.load(f)
tag_class=[]
tag_class = json_data['tag_dict_list']
tag_list = []
tag = []
for item in tag_class:
tag_list.append(item['tag'])
################## Get Tag List Completed ######################
intermediate_list = []
num_list = []
for tag in tag_list:
for item in data_list:
for row in item:
if tag in row:
intermediate_list.append(row)
num_list.append(len(intermediate_list))
intermediate_list = []
################ Plot Graph ###################
#width = 0.5
#fig = plt.figure(figsize=(10,40))
#y_pos = np.arange(len(tag_list))
#plt.barh(y_pos,num_list, width, align='center')
#plt.yticks(y_pos, tag_list)
#plt.xlabel('Numbers in Zhoubing datasets')
#plt.title('Evaluation of DeepLesion on Zhoubing100')
#plt.show()
#fig.set_size_inches(25, 40)
#fig.savefig('evaluation.png', dpi=300)
################# Total number of lesion ################
num_lesion = []
for item in data_list:
for row in item:
if 'lesion' in row:
num_lesion.append(row)
################# Total number of liver lesion ################
liver_list = []
for item in num_lesion:
if 'liver' in item:
liver_list.append(item)
| 2.3125 | 2 |
pydash/Dash/Measurement/Metric/__init__.py | ensomniac/dash | 0 | 12797847 | #!/usr/bin/python
#
# 2022 <NAME>, <EMAIL>
# <NAME>, <EMAIL>
class _Metric:
def __init__(self):
pass
@property
def DisplayName(self):
return "Metric"
@property
def AssetPath(self):
return "metric"
@property
def UnitModules(self):
return [
self.Millimeter,
self.Centimeter,
self.Decimeter,
self.Meter
]
@property
def UnitCombos(self):
return self.get_all_unit_combos()
@property
def BaseUnit(self):
return self.Meter.ToDict()
@property
def Millimeter(self):
from .millimeter import Millimeter
return Millimeter
@property
def Centimeter(self):
from .centimeter import Centimeter
return Centimeter
@property
def Decimeter(self):
from .decimeter import Decimeter
return Decimeter
@property
def Meter(self):
from .meter import Meter
return Meter
def ToDict(self):
from .. import SystemToDict
return SystemToDict(self)
def get_all_unit_combos(self):
units = []
for mod in self.UnitModules:
unit_combo = {
"display_name": mod.DisplayName,
"asset_path": mod.AssetPath, # May need to be "id" for combos
"abbreviation": mod.Abbreviation,
"base_conversion_multiplier": mod.BaseConversionMultiplier,
}
units.append(unit_combo)
return units
Metric = _Metric()
| 2.34375 | 2 |
suministrospr/utils/models.py | amberlowh/suministrospr | 42 | 12797848 | from django.db import models
from .fields import DateTimeCreatedField, DateTimeModifiedField
class BaseModel(models.Model):
created_at = DateTimeCreatedField()
modified_at = DateTimeModifiedField()
class Meta:
get_latest_by = "modified_at"
ordering = ("-modified_at", "-created_at")
abstract = True
| 2.40625 | 2 |
bank_api.py | blizzarac/BankTransferTools | 0 | 12797849 | __author__ = 'alexanderstolz'
import hib_sql_connection
def getAllTransfers():
connection = hib_sql_connection.connectToHibiscus()
return hib_sql_connection.queryToHibiscus(connection, "select * from umsatz;")
def getOutgoingTransfers():
connection = hib_sql_connection.connectToHibiscus()
return hib_sql_connection.queryToHibiscus(connection, "select * from umsatz where betrag < 0;")
def getIncomingTransfers():
connection = hib_sql_connection.connectToHibiscus()
return hib_sql_connection.queryToHibiscus(connection, "select * from umsatz where betrag > 0;") | 2.328125 | 2 |
libs/indicators/ema.py | meetri/cryptolib | 0 | 12797850 | import os,sys,talib,numpy,math,logging,time,datetime,numbers
from collections import OrderedDict
from baseindicator import BaseIndicator
class EMA(BaseIndicator):
def __init__(self,csdata, config = {}):
config["period"] = config.get("period",30)
config["metric"] = config.get("metric","closed")
config["label"] = config.get("label","ema")
config["label"] = "{}{}".format(config["label"],config["period"])
BaseIndicator.__init__(self,csdata,config)
self.chartcolors = ["mediumslateblue"]
self.data = None
self.analysis = None
self.get_analysis()
def get_settings(self):
return "{}".format(self.config["period"])
def get_charts(self):
data = []
for i in range(0,len(self.csdata[ self.config["metric"] ])):
if isinstance(self.data[i],numbers.Number) and self.data[i] > 0:
ts = time.mktime(datetime.datetime.strptime(self.csdata["time"][i], "%Y-%m-%dT%H:%M:%SZ").timetuple())
data.append({
"x": ts,
"y": self.data[i],
})
return [{
"key": "{}:{}".format(self.label,self.config["period"]),
"type": "line",
"color": "#FFF5EE",
"yAxis": 1,
"values": data
}]
def get_ema(self):
if self.csdata is not None:
try:
smetric = self.scaleup( self.csdata[self.config["metric"]])
data = talib.EMA( numpy.array(smetric), self.config["period"])
self.data = self.scaledown(data)
# scaledown
except Exception as ex:
self.data = None
raise ex
return self.data
def get_analysis(self ):
if self.data is None:
self.get_ema()
ema = self.data[-1]
ema1 = self.data[-2]
slope = None
for k in range(-1,-10,-1):
if slope == None:
slope = self.data[k-1] / self.data[k]
else:
slope = slope / ( self.data[k-1] / self.data[k] )
last_price = self.csdata["closed"][-1]
closing_time = self.csdata["time"][-1]
action = None
if last_price < ema:
action = "oversold"
res = {
"weight": 2,
"time": closing_time,
"indicator-data": {
"ema": ema
},
"analysis": OrderedDict()
}
res["analysis"]["name"] = "{}:{}".format(self.get_name(),self.get_settings())
res["analysis"]["signal"] = action
res["analysis"]["ema"] = ema
res["analysis"]["slope"] = slope
res["analysis"]["order"] = ["ema"]
self.analysis = res
return res
def format_view(self):
newres = dict(self.analysis["analysis"])
newres["slope"] = "{:.4f}".format(newres["slope"])
newres["ema"] = "{:.8f}".format(newres["ema"])
return newres
| 2.328125 | 2 |
Subsets and Splits